Kiln »
Unity3DUnity 3D's proposed fixes and extensions to Kiln BFiles
Alias information
is an alias for .
Mercurial and Git clients can push and pull from this alias URL to interact with this repository. You can change to which repository an alias points by going to the Aliases link on the project page.
'''Setup code for bfiles extension: reposetup(), uisetup().'''importosimporttypesimportcopyfrommercurialimporthg,extensions,commands,util,context,cmdutil, \
matchasmatch_,filemerge,node,archival,httprepo,errorfrommercurial.i18nimport_frommercurial.nodeimporthexfromhgextimportrebaseimportbfutil,bfcommandsdefhgversion():frommercurial.__version__importversionreturn[int(n)forninversion.partition('+')[0].split('.')]hgversion=hgversion()# -- Wrappers: modify existing commands --------------------------------defreposetup(ui,repo):# add a kbfiles-specific querystring argument to remote requests, so kiln can reject# operations on a kbfiles-enabled remote repo from a non-kbfiles local repo.ifissubclass(repo.__class__,httprepo.httprepository):classkbfilesrepo(repo.__class__):# The function we want to override is do_cmd for Mercurial <= 1.6# and _callstream for Mercurial > 1.6. Wrap whichever one we can find.ifhasattr(repo.__class__,'do_cmd'):defdo_cmd(self,cmd,**args):args['kbfiles']='true'returnsuper(repo.__class__,self).do_cmd(cmd,**args)ifhasattr(repo.__class__,'_callstream'):def_callstream(self,cmd,**args):args['kbfiles']='true'returnsuper(repo.__class__,self)._callstream(cmd,**args)repo.__class__=kbfilesrepo# bfiles doesn't support non-local repositories -- get out quick in# such a caseifnotrepo.local():returnfornamein('status','commitctx','commit','push'):method=getattr(repo,name)#if not (isinstance(method, types.MethodType) and# method.im_func is repo.__class__.commitctx.im_func):ifisinstance(method,types.FunctionType)andmethod.func_name=='wrap':ui.warn(_('kbfiles: repo method %r appears to have already been ''wrapped by another extension: ''kbfiles may behave incorrectly\n')%name)classbfiles_repo(repo.__class__):bfstatus=Falsedefstatus_nobfiles(self,*args,**kwargs):returnsuper(bfiles_repo,self).status(*args,**kwargs)# Figure out the status of big files and insert them into the# appropriate list in the result. Also removes standin files from# the listing. This function reverts to the original status if# self.bfstatus is Falsedefstatus(self,node1='.',node2=None,match=None,ignored=False,clean=False,unknown=False,subrepos=None):listignored,listclean,listunknown=ignored,clean,unknownifnotself.bfstatus:try:returnsuper(bfiles_repo,self).status(node1,node2,match,listignored,listclean,listunknown,subrepos)exceptTypeError:returnsuper(bfiles_repo,self).status(node1,node2,match,listignored,listclean,listunknown)else:# some calls in this function rely on the old version of statusself.bfstatus=Falseifisinstance(node1,context.changectx):ctx1=node1else:ctx1=repo[node1]ifisinstance(node2,context.changectx):ctx2=node2else:ctx2=repo[node2]working=ctx2.rev()isNoneparentworking=workingandctx1==self['.']definctx(file,ctx):try:ifctx.rev()isNone:returnfileinctx.manifest()ctx[file]returnTrueexcept:returnFalse# create a copy of match that matches standins instead of bfiles# if matcher not set then it is the always matcher so overwrite thatifmatchisNone:match=match_.always(self.root,self.getcwd())deftostandin(file):ifinctx(bfutil.standin(file),ctx2):returnbfutil.standin(file)returnfilem=copy.copy(match)m._files=[tostandin(f)forfinm._files]# get ignored clean and unknown but remove them later if they were not asked fortry:result=super(bfiles_repo,self).status(node1,node2,m,True,True,True,subrepos)exceptTypeError:result=super(bfiles_repo,self).status(node1,node2,m,True,True,True)ifworking:# Hold the wlock while we read bfiles and update the bfdirstatewlock=repo.wlock()try:# Any non bfiles that were explicitly listed must be taken out or# bfdirstate.status will report an error. The status of these files# was already computed using super's status.bfdirstate=bfutil.open_bfdirstate(ui,self)match._files=[fforfinmatch._filesiffinbfdirstate]s=bfdirstate.status(match,[],listignored,listclean,listunknown)(unsure,modified,added,removed,missing,unknown,ignored,clean)=sifparentworking:forbfileinunsure:ifctx1[bfutil.standin(bfile)].data().strip()!=bfutil.hashfile(self.wjoin(bfile)):modified.append(bfile)else:clean.append(bfile)bfdirstate.normal(bfutil.unixpath(bfile))bfdirstate.write()else:tocheck=unsure+modified+added+cleanmodified,added,clean=[],[],[]forbfileintocheck:standin=bfutil.standin(bfile)ifinctx(standin,ctx1):ifctx1[standin].data().strip()!=bfutil.hashfile(self.wjoin(bfile)):modified.append(bfile)else:clean.append(bfile)else:added.append(bfile)finally:wlock.release()forstandininctx1.manifest():ifnotbfutil.is_standin(standin):continuebfile=bfutil.split_standin(standin)ifnotmatch(bfile):continueifbfilenotinbfdirstate:removed.append(bfile)# Handle unknown and ignored differentlybfiles=(modified,added,removed,missing,[],[],clean)result=list(result)# Unknown filesresult[4]=[fforfinunknownifrepo.dirstate[f]=='?'andnotbfutil.is_standin(f)]# Ignored files must be ignored by both the dirstate and bfdirstateresult[5]=set(ignored).intersection(set(result[5]))# combine normal files and bfilesnormals=[[fnforfninfilelistifnotbfutil.is_standin(fn)]forfilelistinresult]result=[sorted(list1+list2)for(list1,list2)inzip(normals,bfiles)]else:deftoname(f):ifbfutil.is_standin(f):returnbfutil.split_standin(f)returnfresult=[[toname(f)forfinitems]foritemsinresult]ifnotlistunknown:result[4]=[]ifnotlistignored:result[5]=[]ifnotlistclean:result[6]=[]self.bfstatus=Truereturnresult# This call happens after a commit has occurred. Copy all of the bfiles# into the cachedefcommitctx(self,*args,**kwargs):node=super(bfiles_repo,self).commitctx(*args,**kwargs)ctx=self[node]forfilenameinctx.files():ifbfutil.is_standin(filename)andfilenameinctx.manifest():realfile=bfutil.split_standin(filename)bfutil.copy_to_cache(self,ctx.node(),realfile)returnnode# This call happens before a commit has occurred. The bfile standins# have not had their contents updated (to reflect the hash of their bfile).# Do that here.defcommit(self,text="",user=None,date=None,match=None,force=False,editor=False,extra={}):orig=super(bfiles_repo,self).commitwlock=repo.wlock()try:ifgetattr(repo,"_are_rebasing",False):# We have to take the time to pull down the new bfiles now. Otherwise# if we are rebasing, any bfiles that were modified in the changesets we # are rebasing on top of get overwritten either by the rebase or in the# first commit after the rebase.bfcommands.update_bfiles(repo.ui,repo)# Case 1: user calls commit with no specific files or# include/exclude patterns: refresh and commit everything.if(matchisNone)or(notmatch.anypats()andnotmatch.files()):bfiles=bfutil.list_bfiles(self)bfdirstate=bfutil.open_bfdirstate(ui,self)# this only loops through bfiles that exist (not removed/renamed)forbfileinbfiles:ifos.path.exists(self.wjoin(bfutil.standin(bfile))):# this handles the case where a rebase is being performed and the# working copy is not updated yet.ifos.path.exists(self.wjoin(bfile)):bfutil.update_standin(self,bfutil.standin(bfile))bfdirstate.normal(bfutil.unixpath(bfile))forbfileinbfdirstate:ifnotos.path.exists(repo.wjoin(bfutil.standin(bfile))):bfdirstate.forget(bfutil.unixpath(bfile))bfdirstate.write()returnorig(text=text,user=user,date=date,match=match,force=force,editor=editor,extra=extra)forfileinmatch.files():ifbfutil.is_standin(file):raiseutil.Abort("Don't commit bfile standin. Commit bfile.")# Case 2: user calls commit with specified patterns: refresh any# matching big files.smatcher=bfutil.compose_standin_matcher(self,match)standins=bfutil.dirstate_walk(self.dirstate,smatcher)# No matching big files: get out of the way and pass control to# the usual commit() method.ifnotstandins:returnorig(text=text,user=user,date=date,match=match,force=force,editor=editor,extra=extra)# Refresh all matching big files. It's possible that the commit# will end up failing, in which case the big files will stay# refreshed. No harm done: the user modified them and asked to# commit them, so sooner or later we're going to refresh the# standins. Might as well leave them refreshed.bfdirstate=bfutil.open_bfdirstate(ui,self)forstandininstandins:bfile=bfutil.split_standin(standin)ifbfdirstate[bfile]isnot'r':bfutil.update_standin(self,standin)bfdirstate.normal(bfutil.unixpath(bfile))else:bfdirstate.forget(bfutil.unixpath(bfile))bfdirstate.write()# Cook up a new matcher that only matches regular files or# standins corresponding to the big files requested by the user.# Have to modify _files to prevent commit() from complaining# "not tracked" for big files.bfiles=bfutil.list_bfiles(repo)match=copy.copy(match)orig_matchfn=match.matchfn# Check both the list of bfiles and the list of standins because if a bfile was removed, it# won't be in the list of bfiles at this pointmatch._files+=sorted(standins)actualfiles=[]forfinmatch._files:fstandin=bfutil.standin(f)# Ignore known bfiles and standinsiffinbfilesorfstandininstandins:continue# Append directory separator to avoid collisionsifnotfstandin.endswith('/'):fstandin+='/'# Prevalidate matching standin directoriesifany(stforstinmatch._filesifst.startswith(fstandin)):continueactualfiles.append(f)match._files=actualfilesdefmatchfn(f):iforig_matchfn(f):returnfnotinbfileselse:returnfinstandinsmatch.matchfn=matchfnreturnorig(text=text,user=user,date=date,match=match,force=force,editor=editor,extra=extra)finally:wlock.release()defpush(self,remote,force=False,revs=None,newbranch=False):o=bfutil.findoutgoing(repo,remote,force)ifo:toupload=set()o=repo.changelog.nodesbetween(o,revs)[0]fornino:parents=[pforpinrepo.changelog.parents(n)ifp!=node.nullid]ctx=repo[n]files=set(ctx.files())iflen(parents)==2:mc=ctx.manifest()mp1=ctx.parents()[0].manifest()mp2=ctx.parents()[1].manifest()forfinmp1:iffnotinmc:files.add(f)forfinmp2:iffnotinmc:files.add(f)forfinmc:ifmc[f]!=mp1.get(f,None)ormc[f]!=mp2.get(f,None):files.add(f)toupload=toupload.union(set([ctx[f].data().strip()forfinfilesifbfutil.is_standin(f)andfinctx]))bfcommands.upload_bfiles(ui,self,remote,toupload)# Mercurial >= 1.6 takes the newbranch argument, try that first.try:returnsuper(bfiles_repo,self).push(remote,force,revs,newbranch)exceptTypeError:returnsuper(bfiles_repo,self).push(remote,force,revs)repo.__class__=bfiles_repo# Add works by going through the files that the user wanted to add# and checking if they should be added as bfiles. Then making a new# matcher which matches only the normal files and running the original# version of add.defoverride_add(orig,ui,repo,*pats,**opts):bf=opts.pop('bf',None)bfsize=opts.pop('bfsize',None)ifbfsize:try:bfsize=int(bfsize)exceptValueError:raiseutil.Abort(_('size must be an integer, was %s\n')%bfsize)else:ifos.path.exists(repo.wjoin(bfutil.short_name)):bfsize=ui.config(bfutil.long_name,'size',default='10')ifbfsize:try:bfsize=int(bfsize)exceptValueError:raiseutil.Abort(_('bfiles.size must be integer, was %s\n')%bfsize)bfmatcher=Noneifos.path.exists(repo.wjoin(bfutil.short_name)):bfpats=ui.config(bfutil.long_name,'patterns',default=())ifbfpats:bfpats=bfpats.split(' ')bfmatcher=match_.match(repo.root,'',list(bfpats))bfnames=[]m=cmdutil.match(repo,pats,opts)m.bad=lambdax,y:Nonewctx=repo[None]forfinrepo.walk(m):exact=m.exact(f)bfile=bfutil.standin(f)inwctxnfile=finwctxifexactandbfile:ui.warn(_('%s already a bfile\n')%f)continue# Don't warn the user when they attempt to add a normal tracked file. The normal add code# will do that for us.ifexactandnfile:continueifexactor(notbfileandnotnfile):ifbfor(bfsizeandos.path.getsize(repo.wjoin(f))>=bfsize*1024*1024) \
or(bfmatcherandbfmatcher(f)):bfnames.append(f)ifui.verboseornotexact:ui.status(_('adding %s as bfile\n')%m.rel(f))bad=[]standins=[]# Need to lock otherwise there could be a race condition inbetween when standins are created# and added to the repowlock=repo.wlock()try:ifnotopts.get('dry_run'):bfdirstate=bfutil.open_bfdirstate(ui,repo)forfinbfnames:standinname=bfutil.standin(f)bfutil.write_standin(repo,standinname,hash='',executable=bfutil.get_executable(repo.wjoin(f)))standins.append(standinname)ifbfdirstate[bfutil.unixpath(f)]=='r':bfdirstate.normallookup(bfutil.unixpath(f))else:bfdirstate.add(bfutil.unixpath(f))bfdirstate.write()bad+=[bfutil.split_standin(f)forfinbfutil.repo_add(repo,standins)iffinm.files()]finally:wlock.release()oldmatch=cmdutil.matchmanifest=repo[None].manifest()defoverride_match(repo,pats=[],opts={},globbed=False,default='relpath'):match=oldmatch(repo,pats,opts,globbed,default)m=copy.copy(match)notbfile=lambdaf:notbfutil.is_standin(f)andbfutil.standin(f)notinmanifestm._files=[fforfinm._filesifnotbfile(f)]m._fmap=set(m._files)orig_matchfn=m.matchfnm.matchfn=lambdaf:notbfile(f)andorig_matchfn(f)orNonereturnmcmdutil.match=override_matchresult=orig(ui,repo,*pats,**opts)cmdutil.match=oldmatchreturn(resultis1orbad)and1or0defoverride_remove(orig,ui,repo,*pats,**opts):wctx=repo[None].manifest()oldmatch=cmdutil.matchdefoverride_match(repo,pats=[],opts={},globbed=False,default='relpath'):match=oldmatch(repo,pats,opts,globbed,default)m=copy.copy(match)notbfile=lambdaf:notbfutil.is_standin(f)andbfutil.standin(f)notinwctxm._files=[fforfinm._filesifnotbfile(f)]m._fmap=set(m._files)orig_matchfn=m.matchfnm.matchfn=lambdaf:orig_matchfn(f)andnotbfile(f)returnmcmdutil.match=override_matchorig(ui,repo,*pats,**opts)cmdutil.match=oldmatchafter,force=opts.get('after'),opts.get('force')ifnotpatsandnotafter:raiseutil.Abort(_('no files specified'))m=cmdutil.match(repo,pats,opts)try:repo.bfstatus=Trues=repo.status(match=m,clean=True)finally:repo.bfstatus=Falsemodified,added,deleted,clean=[[fforfinlistifbfutil.standin(f)inwctx]forlistin[s[0],s[1],s[3],s[6]]]defwarn(files,reason):forfinfiles:ui.warn(_('not removing %s: file %s (use -f to force removal)\n')%(m.rel(f),reason))ifforce:remove,forget=modified+deleted+clean,addedelifafter:remove,forget=deleted,[]warn(modified+added+clean,_('still exists'))else:remove,forget=deleted+clean,[]warn(modified,_('is modified'))warn(added,_('has been marked for add'))forfinsorted(remove+forget):ifui.verboseornotm.exact(f):ui.status(_('removing %s\n')%m.rel(f))# Need to lock because standin files are deleted then removed from the repository# and we could race inbetween.wlock=repo.wlock()try:bfdirstate=bfutil.open_bfdirstate(ui,repo)forfinremove:ifnotafter:os.unlink(repo.wjoin(f))currentdir=os.path.split(f)[0]whilecurrentdirandnotos.listdir(repo.wjoin(currentdir)):os.rmdir(repo.wjoin(currentdir))currentdir=os.path.split(currentdir)[0]bfdirstate.remove(bfutil.unixpath(f))bfdirstate.write()forget=[bfutil.standin(f)forfinforget]remove=[bfutil.standin(f)forfinremove]bfutil.repo_forget(repo,forget)bfutil.repo_remove(repo,remove,unlink=True)finally:wlock.release()defoverride_status(orig,ui,repo,*pats,**opts):try:repo.bfstatus=Truereturnorig(ui,repo,*pats,**opts)finally:repo.bfstatus=Falsedefoverride_verify(orig,ui,repo,*pats,**opts):bf=opts.pop('bf',False)all=opts.pop('bfa',False)contents=opts.pop('bfc',False)result=orig(ui,repo,*pats,**opts)ifbf:result=resultorbfcommands.verify_bfiles(ui,repo,all,contents)returnresult# Override needs to refresh standins so that update's normal merge# will go through properly. Then the other update hook (overriding repo.update)# will get the new files. Filemerge is also overriden so that the merge# will merge standins correctly.defoverride_update(orig,ui,repo,*pats,**opts):bfdirstate=bfutil.open_bfdirstate(ui,repo)s=bfdirstate.status(match_.always(repo.root,repo.getcwd()),[],False,False,False)(unsure,modified,added,removed,missing,unknown,ignored,clean)=s# Need to lock between the standins getting updated and their bfiles getting updatedwlock=repo.wlock()try:ifopts['check']:mod=len(modified)>0forbfileinunsure:standin=bfutil.standin(bfile)ifrepo['.'][standin].data().strip()!=bfutil.hashfile(repo.wjoin(bfile)):mod=Trueelse:bfdirstate.normal(bfutil.unixpath(bfile))bfdirstate.write()ifmod:raiseutil.Abort(_('uncommitted local changes'))# XXX handle removed differentlyifnotopts['clean']:forbfileinunsure+modified+added:bfutil.update_standin(repo,bfutil.standin(bfile))finally:wlock.release()returnorig(ui,repo,*pats,**opts)# Override filemerge to prompt the user about how they wish to merge bfiles.# This will handle identical edits, and copy/rename + edit without prompting the user.defoverride_filemerge(origfn,repo,mynode,orig,fcd,fco,fca):# Use better variable names here. Because this is a wrapper we cannot change# the variable names in the function declaration.fcdest,fcother,fcancestor=fcd,fco,fcaifnotbfutil.is_standin(orig):returnorigfn(repo,mynode,orig,fcdest,fcother,fcancestor)else:ifnotfcother.cmp(fcdest):# files identical?returnNoneiffcancestor==fcother:# backwards, use working dir parent as ancestorfcancestor=fcdest.parents()[0]iforig!=fcother.path():repo.ui.status(_('merging %s and %s to %s\n')%(bfutil.split_standin(orig),bfutil.split_standin(fcother.path()),bfutil.split_standin(fcdest.path())))else:repo.ui.status(_('merging %s\n')%bfutil.split_standin(fcdest.path()))iffcancestor.path()!=fcother.path()andfcother.data()==fcancestor.data():return0iffcancestor.path()!=fcdest.path()andfcdest.data()==fcancestor.data():repo.wwrite(fcdest.path(),fcother.data(),fcother.flags())return0ifrepo.ui.promptchoice(_('bfile %s has a merge conflict\n''keep (l)ocal or take (o)ther?')%bfutil.split_standin(orig),(_('&Local'),_('&Other')),0)==0:return0else:repo.wwrite(fcdest.path(),fcother.data(),fcother.flags())return0# Copy first changes the matchers to match standins instead of bfiles.# Then it overrides util.copyfile in that function it checks if the destination# bfile already exists. It also keeps a list of copied files so that the bfiles# can be copied and the dirstate updated.defoverride_copy(orig,ui,repo,pats,opts,rename=False):# doesn't remove bfile on renameiflen(pats)<2:# this isn't legal, let the original function deal with itreturnorig(ui,repo,pats,opts,rename)defmakestandin(relpath):returnos.path.join(os.path.relpath('.',repo.getcwd()),bfutil.standin(util.canonpath(repo.root,repo.getcwd(),relpath)))fullpats=cmdutil.expandpats(pats)dest=fullpats[-1]ifos.path.isdir(dest):ifnotos.path.isdir(makestandin(dest)):os.makedirs(makestandin(dest))# This could copy both bfiles and normal files in one command, but we don't want# to do that first replace their matcher to only match normal files and run it# then replace it to just match bfiles and run it againnonormalfiles=Falsenobfiles=Falseoldmatch=cmdutil.matchtry:manifest=repo[None].manifest()defoverride_match(repo,pats=[],opts={},globbed=False,default='relpath'):match=oldmatch(repo,pats,opts,globbed,default)m=copy.copy(match)notbfile=lambdaf:notbfutil.is_standin(f)andbfutil.standin(f)notinmanifestm._files=[fforfinm._filesifnotbfile(f)]m._fmap=set(m._files)orig_matchfn=m.matchfnm.matchfn=lambdaf:notbfile(f)andorig_matchfn(f)orNonereturnmcmdutil.match=override_matchresult=orig(ui,repo,pats,opts,rename)exceptutil.Abortase:ifstr(e)!='no files to copy':raiseeelse:nonormalfiles=Trueresult=0finally:cmdutil.match=oldmatch# The first rename can cause our current working directory to be removed. In that case# there is nothing left to copy/rename so just quit.try:repo.getcwd()exceptOSError:returnresulttry:# When we call orig below it creates the standins but we don't add them to the dir state# until later so lock during that time.wlock=repo.wlock()manifest=repo[None].manifest()defoverride_match(repo,pats=[],opts={},globbed=False,default='relpath'):newpats=[]# The patterns were previously mangled to add .hgbfiles, we need to remove that nowforpatinpats:ifmatch_.patkind(pat)==Noneandbfutil.short_nameinpat:newpats.append(pat.replace(bfutil.short_name,''))else:newpats.append(pat)match=oldmatch(repo,newpats,opts,globbed,default)m=copy.copy(match)bfile=lambdaf:bfutil.standin(f)inmanifestm._files=[bfutil.standin(f)forfinm._filesifbfile(f)]m._fmap=set(m._files)orig_matchfn=m.matchfnm.matchfn=lambdaf:bfutil.is_standin(f)andbfile(bfutil.split_standin(f))andorig_matchfn(bfutil.split_standin(f))orNonereturnmcmdutil.match=override_matchlistpats=[]forpatinpats:ifmatch_.patkind(pat)!=None:listpats.append(pat)else:listpats.append(makestandin(pat))try:origcopyfile=util.copyfilecopiedfiles=[]defoverride_copyfile(src,dest):ifbfutil.short_nameinsrcandbfutil.short_nameindest:destbfile=dest.replace(bfutil.short_name,'')ifnotopts['force']andos.path.exists(destbfile):raiseIOError('',_('destination bfile already exists'))copiedfiles.append((src,dest))origcopyfile(src,dest)util.copyfile=override_copyfileresult+=orig(ui,repo,listpats,opts,rename)finally:util.copyfile=origcopyfilebfdirstate=bfutil.open_bfdirstate(ui,repo)for(src,dest)incopiedfiles:ifbfutil.short_nameinsrcandbfutil.short_nameindest:srcbfile=src.replace(bfutil.short_name,'')destbfile=dest.replace(bfutil.short_name,'')destbfiledir=os.path.dirname(destbfile)or'.'ifnotos.path.isdir(destbfiledir):os.makedirs(destbfiledir)ifrename:os.rename(srcbfile,destbfile)bfdirstate.remove(bfutil.unixpath(os.path.relpath(srcbfile,repo.root)))else:util.copyfile(srcbfile,destbfile)bfdirstate.add(bfutil.unixpath(os.path.relpath(destbfile,repo.root)))bfdirstate.write()exceptutil.Abortase:ifstr(e)!='no files to copy':raiseeelse:nobfiles=Truefinally:cmdutil.match=oldmatchwlock.release()ifnobfilesandnonormalfiles:raiseutil.Abort(_('no files to copy'))returnresult# Standins are only updated (to match the hash of bfiles) before commits.# Update the standins then run the original revert (changing the matcher to hit standins# instead of bfiles). Based on the resulting standins update the bfiles. Then return the# standins to their proper statedefoverride_revert(orig,ui,repo,*pats,**opts):# Because we put the standins in a bad state (by updating them) and then return them# to a correct state we need to lock to prevent others from changing them in their# incorrect state.wlock=repo.wlock()try:bfdirstate=bfutil.open_bfdirstate(ui,repo)(modified,added,removed,missing,unknown,ignored,clean)=bfutil.bfdirstate_status(bfdirstate,repo,repo['.'].rev())forbfileinmodified:bfutil.update_standin(repo,bfutil.standin(bfile))oldmatch=cmdutil.matchtry:ctx=repo[opts.get('rev')]defoverride_match(repo,pats=[],opts={},globbed=False,default='relpath'):match=oldmatch(repo,pats,opts,globbed,default)m=copy.copy(match)deftostandin(f):ifbfutil.standin(f)inrepo[None]orbfutil.standin(f)inctx:returnbfutil.standin(f)returnfm._files=[tostandin(f)forfinm._files]m._fmap=set(m._files)orig_matchfn=m.matchfndefmatchfn(f):ifbfutil.is_standin(f):returnorig_matchfn(bfutil.split_standin(f))and(finrepo[None]orfinctx)returnorig_matchfn(f)m.matchfn=matchfnreturnmcmdutil.match=override_matchorig(ui,repo,*pats,**opts)finally:cmdutil.match=oldmatchbfcommands.revert_bfiles(ui,repo)forbfileinmodified:ifos.path.exists(repo.wjoin(bfutil.standin(bfile)))andbfileinrepo['.']:bfutil.write_standin(repo,bfutil.standin(bfile),repo['.'][bfile].data().strip(),'x'inrepo['.'][bfile].flags())finally:wlock.release()defhg_update(orig,repo,node):result=orig(repo,node)# XXX check if it worked firstbfcommands.update_bfiles(repo.ui,repo)returnresultdefhg_clean(orig,repo,node,show_stats=True):result=orig(repo,node,show_stats)bfcommands.update_bfiles(repo.ui,repo)returnresultdefhg_merge(orig,repo,node,force=None,remind=True):result=orig(repo,node,force,remind)bfcommands.update_bfiles(repo.ui,repo)returnresult# When we rebase a repository with remotely changed bfiles, we need# to take some extra care so that the bfiles are correctly updated# in the working copydefoverride_pull(orig,ui,repo,source="default",**opts):ifopts.get('rebase',False):setattr(repo,"_are_rebasing",True)try:ifopts.get('update'):delopts['update']ui.debug('--update and --rebase are not compatible, ignoring ''the update flag\n')delopts['rebase']cmdutil.bail_if_changed(repo)revsprepull=len(repo)origpostincoming=commands.postincomingdef_dummy(*args,**kwargs):passcommands.postincoming=_dummytry:result=commands.pull(ui,repo,source,**opts)finally:commands.postincoming=origpostincomingrevspostpull=len(repo)ifrevspostpull>revsprepull:result=resultorrebase.rebase(ui,repo)branch=repo[None].branch()dest=repo[branch].rev()finally:setattr(repo,"_are_rebasing",False)else:result=orig(ui,repo,source,**opts)returnresultdefoverride_rebase(orig,ui,repo,**opts):setattr(repo,"_are_rebasing",True)try:orig(ui,repo,**opts)finally:setattr(repo,"_are_rebasing",False)defoverride_archive(orig,repo,dest,node,kind,decode=True,matchfn=None,prefix=None,mtime=None,subrepos=None):# No need to lock because we are only reading history and bfile caches# neither of which are modifiedifkindnotinarchival.archivers:raiseutil.Abort(_("unknown archive type '%s'")%kind)ctx=repo[node]# In Mercurial <= 1.5 the prefix is passed to the archiver so try that# if that doesn't work we are probably in Mercurial >= 1.6 where the# prefix is not handled by the archivertry:archiver=archival.archivers[kind](dest,prefix,mtimeorctx.date()[0])defwrite(name,mode,islink,getdata):ifmatchfnandnotmatchfn(name):returndata=getdata()ifdecode:data=repo.wwritedata(name,data)archiver.addfile(name,mode,islink,data)exceptTypeError:ifkind=='files':ifprefix:raiseutil.Abort(_('cannot give prefix when archiving to files'))else:prefix=archival.tidyprefix(dest,kind,prefix)defwrite(name,mode,islink,getdata):ifmatchfnandnotmatchfn(name):returndata=getdata()ifdecode:data=repo.wwritedata(name,data)archiver.addfile(prefix+name,mode,islink,data)archiver=archival.archivers[kind](dest,mtimeorctx.date()[0])ifrepo.ui.configbool("ui","archivemeta",True):defmetadata():base='repo: %s\nnode: %s\nbranch: %s\n'%(hex(repo.changelog.node(0)),hex(node),ctx.branch())tags=''.join('tag: %s\n'%tfortinctx.tags()ifrepo.tagtype(t)=='global')ifnottags:repo.ui.pushbuffer()opts={'template':'{latesttag}\n{latesttagdistance}','style':'','patch':None,'git':None}cmdutil.show_changeset(repo.ui,repo,opts).show(ctx)ltags,dist=repo.ui.popbuffer().split('\n')tags=''.join('latesttag: %s\n'%tfortinltags.split(':'))tags+='latesttagdistance: %s\n'%distreturnbase+tagswrite('.hg_archival.txt',0644,False,metadata)forfinctx:ff=ctx.flags(f)getdata=ctx[f].dataifbfutil.is_standin(f):path=bfutil.find_file(repo,getdata().strip())### TODO: What if the file is not cached?f=bfutil.split_standin(f)defgetdatafn():withopen(path,'rb')asfd:returnfd.read()getdata=getdatafnwrite(f,'x'inffand0755or0644,'l'inff,getdata)archiver.done()# If a bfile is modified the change is not reflected in its standin until a commit.# cmdutil.bail_if_changed raises an exception if the repo has uncommitted changes.# Wrap it to also check if bfiles were changed. This is used by bisect and backout.defoverride_bail_if_changed(orig,repo):orig(repo)repo.bfstatus=Truemodified,added,removed,deleted=repo.status()[:4]repo.bfstatus=Falseifmodifiedoraddedorremovedordeleted:raiseutil.Abort(_('outstanding uncommitted changes'))# Fetch doesn't use cmdutil.bail_if_changed so override it to add the checkdefoverride_fetch(orig,ui,repo,*pats,**opts):repo.bfstatus=Truemodified,added,removed,deleted=repo.status()[:4]repo.bfstatus=Falseifmodifiedoraddedorremovedordeleted:raiseutil.Abort(_('outstanding uncommitted changes'))returnorig(ui,repo,*pats,**opts)defoverride_forget(orig,ui,repo,*pats,**opts):wctx=repo[None].manifest()oldmatch=cmdutil.matchdefoverride_match(repo,pats=[],opts={},globbed=False,default='relpath'):match=oldmatch(repo,pats,opts,globbed,default)m=copy.copy(match)notbfile=lambdaf:notbfutil.is_standin(f)andbfutil.standin(f)notinwctxm._files=[fforfinm._filesifnotbfile(f)]m._fmap=set(m._files)orig_matchfn=m.matchfnm.matchfn=lambdaf:orig_matchfn(f)andnotbfile(f)returnmcmdutil.match=override_matchorig(ui,repo,*pats,**opts)cmdutil.match=oldmatchm=cmdutil.match(repo,pats,opts)try:repo.bfstatus=Trues=repo.status(match=m,clean=True)finally:repo.bfstatus=Falseforget=sorted(s[0]+s[1]+s[3]+s[6])forget=[fforfinforgetifbfutil.standin(f)inwctx]forfinforget:ifbfutil.standin(f)notinrepo.dirstateandnotos.path.isdir(m.rel(bfutil.standin(f))):ui.warn(_('not removing %s: file is already untracked\n')%m.rel(f))forfinforget:ifui.verboseornotm.exact(f):ui.status(_('removing %s\n')%m.rel(f))# Need to lock because standin files are deleted then removed from the repository# and we could race inbetween.wlock=repo.wlock()try:bfdirstate=bfutil.open_bfdirstate(ui,repo)forfinforget:bfdirstate.remove(bfutil.unixpath(f))bfdirstate.write()bfutil.repo_remove(repo,[bfutil.standin(f)forfinforget],unlink=True)finally:wlock.release()defget_outgoing_bfiles(ui,repo,dest=None,**opts):dest=ui.expandpath(destor'default-push',destor'default')dest,branches=hg.parseurl(dest,opts.get('branch'))revs,checkout=hg.addbranchrevs(repo,repo,branches,opts.get('rev'))ifrevs:revs=[repo.lookup(rev)forrevinrevs]# Mercurial <= 1.5 had remoteui in cmdutil, then it moved to hgtry:remoteui=cmdutil.remoteuiexceptAttributeError:remoteui=hg.remoteuitry:remote=hg.repository(remoteui(repo,opts),dest)excepterror.RepoError:returnNoneo=bfutil.findoutgoing(repo,remote,False)ifnoto:returnNoneo=repo.changelog.nodesbetween(o,revs)[0]ifopts.get('newest_first'):o.reverse()toupload=set()fornino:parents=[pforpinrepo.changelog.parents(n)ifp!=node.nullid]ctx=repo[n]files=set(ctx.files())iflen(parents)==2:mc=ctx.manifest()mp1=ctx.parents()[0].manifest()mp2=ctx.parents()[1].manifest()forfinmp1:iffnotinmc:files.add(f)forfinmp2:iffnotinmc:files.add(f)forfinmc:ifmc[f]!=mp1.get(f,None)ormc[f]!=mp2.get(f,None):files.add(f)toupload=toupload.union(set([fforfinfilesifbfutil.is_standin(f)andfinctx]))returntouploaddefoverride_outgoing(orig,ui,repo,dest=None,**opts):orig(ui,repo,dest,**opts)ifopts.pop('bf',None):toupload=get_outgoing_bfiles(ui,repo,dest,**opts)iftouploadisNone:ui.status(_('kbfiles: No remote repo\n'))else:ui.status(_('kbfiles to upload:\n'))forfileintoupload:ui.status(bfutil.split_standin(file)+'\n')ui.status('\n')defoverride_summary(orig,ui,repo,*pats,**opts):orig(ui,repo,*pats,**opts)ifopts.pop('bf',None):toupload=get_outgoing_bfiles(ui,repo,None,**opts)iftouploadisNone:ui.status(_('kbfiles: No remote repo\n'))else:ui.status(_('kbfiles: %d to upload\n')%len(toupload))defoverride_addremove(orig,ui,repo,*pats,**opts):# Check if the parent or child has bfiles if they do don't allow it.# If there is a symlink in the manifest then getting the manifest throws an exception# catch it and let addremove deal with it. This happens in Mercurial's test# test-addremove-symlinktry:manifesttip=set(repo['tip'].manifest())exceptutil.Abort:manifesttip=set()try:manifestworking=set(repo[None].manifest())exceptutil.Abort:manifestworking=set()# Manifests are only iterable so turn them into sets then unionforfileinmanifesttip.union(manifestworking):iffile.startswith(bfutil.short_name):raiseutil.Abort(_('addremove cannot be run on a repo with bfiles')) return orig(ui, repo, *pats, **opts)
+# Calling purge with --all will cause the kbfiles to be deleted, but doesn't+# re-populate them. We must do it explicitly. It doesn't hurt anything to+# be safe and update them the rest of the time after calling purge, either.+def override_purge(orig, ui, repo, *dirs, **opts):+ orig(ui, repo, *dirs, **opts)+ bfcommands.update_bfiles(repo.ui, repo)+def uisetup(ui):
# Disable auto-status for some commands which assume that all
# files in the result are under Mercurial's control
entry=extensions.wrapcommand(commands.table,'add',override_add)addopt=[('','bf',None,_('add as bfile')),('','bfsize','',_('add all files above this size (in megabytes) as bfiles (default: 10)'))]entry[1].extend(addopt)entry=extensions.wrapcommand(commands.table,'addremove',override_addremove)entry=extensions.wrapcommand(commands.table,'remove',override_remove)entry=extensions.wrapcommand(commands.table,'forget',override_forget)entry=extensions.wrapcommand(commands.table,'status',override_status)entry=extensions.wrapcommand(commands.table,'verify',override_verify)verifyopt=[('','bf',None,_('verify bfiles')),('','bfa',None,_('verify all revisions of bfiles not just current')),('','bfc',None,_('verify bfile contents not just existence'))]entry[1].extend(verifyopt)entry=extensions.wrapcommand(commands.table,'outgoing',override_outgoing)outgoingopt=[('','bf',None,_('display outgoing bfiles'))]entry[1].extend(outgoingopt)entry=extensions.wrapcommand(commands.table,'summary',override_summary)summaryopt=[('','bf',None,_('display outgoing bfiles'))]entry[1].extend(summaryopt)entry=extensions.wrapcommand(commands.table,'update',override_update)entry=extensions.wrapcommand(commands.table,'pull',override_pull)entry=extensions.wrapfunction(filemerge,'filemerge',override_filemerge)entry=extensions.wrapfunction(cmdutil,'copy',override_copy)# Backout calls revert so we need to override both the command and the functionentry=extensions.wrapcommand(commands.table,'revert',override_revert)entry=extensions.wrapfunction(commands,'revert',override_revert)# clone uses hg._update instead of hg.update even though they are the# same function... so wrap both of them)extensions.wrapfunction(hg,'update',hg_update)extensions.wrapfunction(hg,'_update',hg_update)extensions.wrapfunction(hg,'clean',hg_clean)extensions.wrapfunction(hg,'merge',hg_merge)extensions.wrapfunction(archival,'archive',override_archive)extensions.wrapfunction(cmdutil,'bail_if_changed',override_bail_if_changed) for name, module in extensions.extensions():
if name == 'fetch':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch', override_fetch)
+ if name == 'purge':+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge', override_purge) if name == 'rebase':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase', override_rebase)
Attach a Trello Card
Add a tag
Your session has expired
You are no longer logged in. Please log in and try your request again.
Filter RSS Feed
This RSS feed URL allows you to see the contents of your current filter using any feed reader.
This link includes a special authentication token. If you share the URL with anyone else, they can see this RSS feed's activity. You can disable these tokens when needed.
Your current filter is unsaved; changing it won't affect this RSS feed.