Changeset 03426ac40250…
Parent 08a3cb8d3d4a…
by Benjamin Pollack <benjamin@fogcreek.com>
Changes to 11 files · Browse files at 03426ac40250 Showing diff from parent 08a3cb8d3d4a Diff from another changeset...
@@ -89,7 +89,7 @@ if os.path.exists(outfilename): # for windows
os.remove(outfilename)
os.rename(tmpfilename, outfilename)
- bfutil.copy_to_cache(self.repo, self.repo['.'].node(), filename, True)
+ bfutil.copytocache(self.repo, self.repo['.'].node(), filename, True)
success.append((filename, hhash))
ui.progress(_('getting bfiles'), None)
@@ -143,7 +143,7 @@
import localstore, httpstore
-_store_provider = {
+_storeprovider = {
'file': (localstore, 'localstore'),
'http': (httpstore, 'httpstore'),
'https': (httpstore, 'httpstore'),
@@ -154,7 +154,7 @@# During clone this function is passed the src's ui object
# but it needs the dest's ui object so it can read out of
# the config file. Use repo.ui instead.
-def _open_store(repo, path=None, put=False):
+def _openstore(repo, path=None, put=False):
ui = repo.ui
if not path:
path = ui.expandpath('default-push', 'default')
@@ -176,7 +176,7 @@ scheme = match.group(1)
try:
- (mod, klass) = _store_provider[scheme]
+ (mod, klass) = _storeprovider[scheme]
except KeyError:
raise util.Abort(_('unsupported URL scheme %r') % scheme)
|
|
|
@@ -28,7 +28,7 @@ tobfile = True
size = opts['size']
if not size:
- size = ui.config(bfutil.long_name, 'size', default=None)
+ size = ui.config(bfutil.longname, 'size', default=None)
try:
size = int(size)
except ValueError:
@@ -73,7 +73,7 @@ bfiles = set()
normalfiles = set()
if not pats:
- pats = ui.config(bfutil.long_name, 'patterns', default=())
+ pats = ui.config(bfutil.longname, 'patterns', default=())
if pats:
pats = pats.split(' ')
if pats:
@@ -88,8 +88,8 @@ bfiles, normalfiles, matcher, size, bfiletohash)
ui.progress(_('converting revisions'), None)
- if os.path.exists(rdst.wjoin(bfutil.short_name)):
- shutil.rmtree(rdst.wjoin(bfutil.short_name))
+ if os.path.exists(rdst.wjoin(bfutil.shortname)):
+ shutil.rmtree(rdst.wjoin(bfutil.shortname))
for f in bfiletohash.keys():
if os.path.isfile(rdst.wjoin(f)):
@@ -146,10 +146,10 @@ raise IOError()
renamed = fctx.renamed()
if renamed:
- renamed = bfutil.split_standin(renamed[0])
+ renamed = bfutil.splitstandin(renamed[0])
hash = fctx.data().strip()
- path = bfutil.find_file(rsrc, hash)
+ path = bfutil.findfile(rsrc, hash)
### TODO: What if the file is not cached?
data = ''
with open(path, 'rb') as fd:
@@ -176,8 +176,8 @@
dstfiles = []
for file in files:
- if bfutil.is_standin(file):
- dstfiles.append(bfutil.split_standin(file))
+ if bfutil.isstandin(file):
+ dstfiles.append(bfutil.splitstandin(file))
else:
dstfiles.append(file)
# Commit
@@ -214,7 +214,7 @@ dstfiles = []
for f in files:
if f not in bfiles and f not in normalfiles:
- isbfile = _is_bfile(f, ctx, matcher, size)
+ isbfile = _isbfile(f, ctx, matcher, size)
# If this file was renamed or copied then copy
# the bfileness of its predecessor
if f in ctx.manifest():
@@ -241,7 +241,7 @@
# bfile was modified, update standins
fullpath = rdst.wjoin(f)
- bfutil.create_dir(os.path.dirname(fullpath))
+ bfutil.createdir(os.path.dirname(fullpath))
m = util.sha1('')
m.update(ctx[f].data())
hash = m.hexdigest()
@@ -249,18 +249,18 @@ with open(fullpath, 'wb') as fd:
fd.write(ctx[f].data())
executable = 'x' in ctx[f].flags()
- os.chmod(fullpath, bfutil.get_mode(executable))
- bfutil.write_standin(rdst, bfutil.standin(f), hash, executable)
+ os.chmod(fullpath, bfutil.getmode(executable))
+ bfutil.writestandin(rdst, bfutil.standin(f), hash, executable)
bfiletohash[f] = hash
else:
# normal file
dstfiles.append(f)
def getfilectx(repo, memctx, f):
- if bfutil.is_standin(f):
+ if bfutil.isstandin(f):
# if the file isn't in the manifest then it was removed
# or renamed, raise IOError to indicate this
- srcfname = bfutil.split_standin(f)
+ srcfname = bfutil.splitstandin(f)
try:
fctx = ctx.filectx(srcfname)
except error.LookupError:
@@ -299,7 +299,7 @@ rdst.dirstate.setparents(ret)
revmap[ctx.node()] = rdst.changelog.tip()
-def _is_bfile(file, ctx, matcher, size):
+def _isbfile(file, ctx, matcher, size):
'''
A file is a bfile if it matches a pattern or is over
the given size.
@@ -314,7 +314,7 @@ except error.LookupError:
return False
-def upload_bfiles(ui, rsrc, rdst, files):
+def uploadbfiles(ui, rsrc, rdst, files):
'''upload big files to the central store'''
if not files:
@@ -325,7 +325,7 @@ if not rdst.path.startswith('http'):
return
- store = basestore._open_store(rsrc, rdst.path, put=True)
+ store = basestore._openstore(rsrc, rdst.path, put=True)
at = 0
for hash in files:
@@ -333,7 +333,7 @@ if store.exists(hash):
at += 1
continue
- source = bfutil.find_file(rsrc, hash)
+ source = bfutil.findfile(rsrc, hash)
if not source:
raise util.Abort(_('Missing bfile %s needs to be uploaded') % hash)
# XXX check for errors here
@@ -341,7 +341,7 @@ at += 1
ui.progress('uploading bfiles', None)
-def verify_bfiles(ui, repo, all=False, contents=False):
+def verifybfiles(ui, repo, all=False, contents=False):
'''Verify that every big file revision in the current changeset
exists in the central store. With --contents, also verify that
the contents of each big file revision are correct (SHA-1 hash
@@ -353,22 +353,22 @@ else:
revs = ['.']
- store = basestore._open_store(repo)
+ store = basestore._openstore(repo)
return store.verify(revs, contents=contents)
-def revert_bfiles(ui, repo, file_list=None):
+def revertbfiles(ui, repo, filelist=None):
wlock = repo.wlock()
try:
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
- bfiles = bfutil.list_bfiles(repo)
+ bfiles = bfutil.listbfiles(repo)
toget = []
at = 0
updated = 0
for bfile in bfiles:
- if file_list == None or bfile in file_list:
+ if filelist == None or bfile in filelist:
if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
bfdirstate.remove(bfile)
continue
@@ -378,7 +378,7 @@ expectedhash = repo[None][bfutil.standin(bfile)].data().strip()
mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode
if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)):
- path = bfutil.find_file(repo, expectedhash)
+ path = bfutil.findfile(repo, expectedhash)
if path is None:
toget.append((bfile, expectedhash))
else:
@@ -403,7 +403,7 @@ bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile))
if toget:
- store = basestore._open_store(repo)
+ store = basestore._openstore(repo)
success, missing = store.get(toget)
else:
success, missing = [], []
@@ -421,7 +421,7 @@
removed = 0
for bfile in bfdirstate:
- if file_list == None or bfile in file_list:
+ if filelist == None or bfile in filelist:
if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
if os.path.exists(repo.wjoin(bfile)):
os.unlink(repo.wjoin(bfile))
@@ -449,14 +449,14 @@ finally:
wlock.release()
-def update_bfiles(ui, repo):
+def updatebfiles(ui, repo):
wlock = repo.wlock()
try:
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
- bfiles = bfutil.list_bfiles(repo)
+ bfiles = bfutil.listbfiles(repo)
toget = []
at = 0
updated = 0
@@ -476,7 +476,7 @@ expectedhash = repo[None][bfutil.standin(bfile)].data().strip()
mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode
if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)):
- path = bfutil.find_file(repo, expectedhash)
+ path = bfutil.findfile(repo, expectedhash)
if not path:
toget.append((bfile, expectedhash))
else:
@@ -491,7 +491,7 @@ bfdirstate.normal(bfutil.unixpath(bfile))
if toget:
- store = basestore._open_store(repo)
+ store = basestore._openstore(repo)
(success, missing) = store.get(toget)
else:
success, missing = [],[]
|
|
|
@@ -6,7 +6,8 @@ import re
from mercurial import hg, extensions, commands, util, context, cmdutil, \
- match as match_, filemerge, node, archival, httprepo, error, manifest
+ match as match_, filemerge, node, archival, httprepo, error, \
+ manifest, merge
from mercurial.i18n import _
from mercurial.node import hex
from hgext import rebase
@@ -147,7 +148,7 @@ # Any non bfiles that were explicitly listed must be taken out or
# bfdirstate.status will report an error. The status of these files
# was already computed using super's status.
- bfdirstate = bfutil.open_bfdirstate(ui, self)
+ bfdirstate = bfutil.openbfdirstate(ui, self)
match._files = [f for f in match._files if f in bfdirstate]
s = bfdirstate.status(match, [], listignored, listclean, listunknown)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
@@ -176,9 +177,9 @@ wlock.release()
for standin in ctx1.manifest():
- if not bfutil.is_standin(standin):
+ if not bfutil.isstandin(standin):
continue
- bfile = bfutil.split_standin(standin)
+ bfile = bfutil.splitstandin(standin)
if not match(bfile):
continue
if bfile not in bfdirstate:
@@ -187,16 +188,16 @@ bfiles = (modified, added, removed, missing, [], [], clean)
result = list(result)
# Unknown files
- result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.is_standin(f)]
+ result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.isstandin(f)]
# Ignored files must be ignored by both the dirstate and bfdirstate
result[5] = set(ignored).intersection(set(result[5]))
# combine normal files and bfiles
- normals = [[fn for fn in filelist if not bfutil.is_standin(fn)] for filelist in result]
+ normals = [[fn for fn in filelist if not bfutil.isstandin(fn)] for filelist in result]
result = [sorted(list1 + list2) for (list1, list2) in zip(normals, bfiles)]
else:
def toname(f):
- if bfutil.is_standin(f):
- return bfutil.split_standin(f)
+ if bfutil.isstandin(f):
+ return bfutil.splitstandin(f)
return f
result = [[toname(f) for f in items] for items in result]
@@ -215,9 +216,9 @@ node = super(bfiles_repo, self).commitctx(*args, **kwargs)
ctx = self[node]
for filename in ctx.files():
- if bfutil.is_standin(filename) and filename in ctx.manifest():
- realfile = bfutil.split_standin(filename)
- bfutil.copy_to_cache(self, ctx.node(), realfile)
+ if bfutil.isstandin(filename) and filename in ctx.manifest():
+ realfile = bfutil.splitstandin(filename)
+ bfutil.copytocache(self, ctx.node(), realfile)
return node
@@ -235,19 +236,19 @@ # if we are rebasing, any bfiles that were modified in the changesets we
# are rebasing on top of get overwritten either by the rebase or in the
# first commit after the rebase.
- bfcommands.update_bfiles(repo.ui, repo)
+ bfcommands.updatebfiles(repo.ui, repo)
# Case 1: user calls commit with no specific files or
# include/exclude patterns: refresh and commit everything.
if (match is None) or (not match.anypats() and not match.files()):
- bfiles = bfutil.list_bfiles(self)
- bfdirstate = bfutil.open_bfdirstate(ui, self)
+ bfiles = bfutil.listbfiles(self)
+ bfdirstate = bfutil.openbfdirstate(ui, self)
# this only loops through bfiles that exist (not removed/renamed)
for bfile in bfiles:
if os.path.exists(self.wjoin(bfutil.standin(bfile))):
# this handles the case where a rebase is being performed and the
# working copy is not updated yet.
if os.path.exists(self.wjoin(bfile)):
- bfutil.update_standin(self, bfutil.standin(bfile))
+ bfutil.updatestandin(self, bfutil.standin(bfile))
bfdirstate.normal(bfutil.unixpath(bfile))
for bfile in bfdirstate:
if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
@@ -264,12 +265,12 @@ force=force, editor=editor, extra=extra)
for file in match.files():
- if bfutil.is_standin(file):
+ if bfutil.isstandin(file):
raise util.Abort("Don't commit bfile standin. Commit bfile.")
# Case 2: user calls commit with specified patterns: refresh any
# matching big files.
- smatcher = bfutil.compose_standin_matcher(self, match)
+ smatcher = bfutil.composestandinmatcher(self, match)
standins = bfutil.dirstate_walk(self.dirstate, smatcher)
# No matching big files: get out of the way and pass control to
@@ -283,11 +284,11 @@ # refreshed. No harm done: the user modified them and asked to
# commit them, so sooner or later we're going to refresh the
# standins. Might as well leave them refreshed.
- bfdirstate = bfutil.open_bfdirstate(ui, self)
+ bfdirstate = bfutil.openbfdirstate(ui, self)
for standin in standins:
- bfile = bfutil.split_standin(standin)
+ bfile = bfutil.splitstandin(standin)
if bfdirstate[bfile] is not 'r':
- bfutil.update_standin(self, standin)
+ bfutil.updatestandin(self, standin)
bfdirstate.normal(bfutil.unixpath(bfile))
else:
path = bfutil.unixpath(bfile)
@@ -303,7 +304,7 @@ # standins corresponding to the big files requested by the user.
# Have to modify _files to prevent commit() from complaining
# "not tracked" for big files.
- bfiles = bfutil.list_bfiles(repo)
+ bfiles = bfutil.listbfiles(repo)
match = copy.copy(match)
orig_matchfn = match.matchfn
@@ -364,8 +365,8 @@ if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
- toupload = toupload.union(set([ctx[f].data().strip() for f in files if bfutil.is_standin(f) and f in ctx]))
- bfcommands.upload_bfiles(ui, self, remote, toupload)
+ toupload = toupload.union(set([ctx[f].data().strip() for f in files if bfutil.isstandin(f) and f in ctx]))
+ bfcommands.uploadbfiles(ui, self, remote, toupload)
# Mercurial >= 1.6 takes the newbranch argument, try that first.
try:
return super(bfiles_repo, self).push(remote, force, revs, newbranch)
@@ -388,8 +389,8 @@ except ValueError:
raise util.Abort(_('size must be an integer, was %s\n') % bfsize)
else:
- if os.path.exists(repo.wjoin(bfutil.short_name)):
- bfsize = ui.config(bfutil.long_name, 'size', default='10')
+ if os.path.exists(repo.wjoin(bfutil.shortname)):
+ bfsize = ui.config(bfutil.longname, 'size', default='10')
if bfsize:
try:
bfsize = int(bfsize)
@@ -397,8 +398,8 @@ raise util.Abort(_('bfiles.size must be integer, was %s\n') % bfsize)
bfmatcher = None
- if os.path.exists(repo.wjoin(bfutil.short_name)):
- bfpats = ui.config(bfutil.long_name, 'patterns', default=())
+ if os.path.exists(repo.wjoin(bfutil.shortname)):
+ bfpats = ui.config(bfutil.longname, 'patterns', default=())
if bfpats:
bfpats = bfpats.split(' ')
bfmatcher = match_.match(repo.root, '', list(bfpats))
@@ -439,17 +440,17 @@ wlock = repo.wlock()
try:
if not opts.get('dry_run'):
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
for f in bfnames:
standinname = bfutil.standin(f)
- bfutil.write_standin(repo, standinname, hash='', executable=bfutil.get_executable(repo.wjoin(f)))
+ bfutil.writestandin(repo, standinname, hash='', executable=bfutil.getexecutable(repo.wjoin(f)))
standins.append(standinname)
if bfdirstate[bfutil.unixpath(f)] == 'r':
bfdirstate.normallookup(bfutil.unixpath(f))
else:
bfdirstate.add(bfutil.unixpath(f))
bfdirstate.write()
- bad += [bfutil.split_standin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()]
+ bad += [bfutil.splitstandin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()]
finally:
wlock.release()
@@ -463,7 +464,7 @@ def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in manifest
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -493,7 +494,7 @@ def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in wctx
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -549,7 +550,7 @@ # and we could race inbetween.
wlock = repo.wlock()
try:
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
for f in remove:
if not after:
os.unlink(repo.wjoin(f))
@@ -588,7 +589,7 @@
result = orig(ui, repo, *pats, **opts)
if bf:
- result = result or bfcommands.verify_bfiles(ui, repo, all, contents)
+ result = result or bfcommands.verifybfiles(ui, repo, all, contents)
return result
# Override needs to refresh standins so that update's normal merge
@@ -596,7 +597,7 @@# will get the new files. Filemerge is also overriden so that the merge
# will merge standins correctly.
def override_update(orig, ui, repo, *pats, **opts):
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
@@ -617,7 +618,7 @@ # XXX handle removed differently
if not opts['clean']:
for bfile in unsure + modified + added:
- bfutil.update_standin(repo, bfutil.standin(bfile))
+ bfutil.updatestandin(repo, bfutil.standin(bfile))
finally:
wlock.release()
return orig(ui, repo, *pats, **opts)
@@ -628,7 +629,7 @@ # Use better variable names here. Because this is a wrapper we cannot change
# the variable names in the function declaration.
fcdest, fcother, fcancestor = fcd, fco, fca
- if not bfutil.is_standin(orig):
+ if not bfutil.isstandin(orig):
return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
else:
if not fcother.cmp(fcdest): # files identical?
@@ -639,9 +640,9 @@
if orig != fcother.path():
repo.ui.status(_('merging %s and %s to %s\n')
- % (bfutil.split_standin(orig), bfutil.split_standin(fcother.path()), bfutil.split_standin(fcdest.path())))
+ % (bfutil.splitstandin(orig), bfutil.splitstandin(fcother.path()), bfutil.splitstandin(fcdest.path())))
else:
- repo.ui.status(_('merging %s\n') % bfutil.split_standin(fcdest.path()))
+ repo.ui.status(_('merging %s\n') % bfutil.splitstandin(fcdest.path()))
if fcancestor.path() != fcother.path() and fcother.data() == fcancestor.data():
return 0
@@ -650,7 +651,7 @@ return 0
if repo.ui.promptchoice(_('bfile %s has a merge conflict\n'
- 'keep (l)ocal or take (o)ther?') % bfutil.split_standin(orig),
+ 'keep (l)ocal or take (o)ther?') % bfutil.splitstandin(orig),
(_('&Local'), _('&Other')), 0) == 0:
return 0
else:
@@ -703,7 +704,7 @@ def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in manifest
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -747,8 +748,8 @@ newpats = []
# The patterns were previously mangled to add .hgbfiles, we need to remove that now
for pat in pats:
- if match_.patkind(pat) == None and bfutil.short_name in pat:
- newpats.append(pat.replace( bfutil.short_name, ''))
+ if match_.patkind(pat) == None and bfutil.shortname in pat:
+ newpats.append(pat.replace(bfutil.shortname, ''))
else:
newpats.append(pat)
match = oldmatch(repo, newpats, opts, globbed, default)
@@ -757,7 +758,7 @@ m._files = [bfutil.standin(f) for f in m._files if bfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
- m.matchfn = lambda f: bfutil.is_standin(f) and bfile(bfutil.split_standin(f)) and orig_matchfn(bfutil.split_standin(f)) or None
+ m.matchfn = lambda f: bfutil.isstandin(f) and bfile(bfutil.splitstandin(f)) and orig_matchfn(bfutil.splitstandin(f)) or None
return m
try:
# Mercurial >= 1.9
@@ -776,8 +777,8 @@ origcopyfile = util.copyfile
copiedfiles = []
def override_copyfile(src, dest):
- if bfutil.short_name in src and bfutil.short_name in dest:
- destbfile = dest.replace(bfutil.short_name, '')
+ if bfutil.shortname in src and bfutil.shortname in dest:
+ destbfile = dest.replace(bfutil.shortname, '')
if not opts['force'] and os.path.exists(destbfile):
raise IOError('', _('destination bfile already exists'))
copiedfiles.append((src, dest))
@@ -788,11 +789,11 @@ finally:
util.copyfile = origcopyfile
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
for (src, dest) in copiedfiles:
- if bfutil.short_name in src and bfutil.short_name in dest:
- srcbfile = src.replace(bfutil.short_name, '')
- destbfile = dest.replace(bfutil.short_name, '')
+ if bfutil.shortname in src and bfutil.shortname in dest:
+ srcbfile = src.replace(bfutil.shortname, '')
+ destbfile = dest.replace(bfutil.shortname, '')
destbfiledir = os.path.dirname(destbfile) or '.'
if not os.path.isdir(destbfiledir):
os.makedirs(destbfiledir)
@@ -836,10 +837,10 @@ # incorrect state.
wlock = repo.wlock()
try:
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
(modified, added, removed, missing, unknown, ignored, clean) = bfutil.bfdirstate_status(bfdirstate, repo, repo['.'].rev())
for bfile in modified:
- bfutil.update_standin(repo, bfutil.standin(bfile))
+ bfutil.updatestandin(repo, bfutil.standin(bfile))
try:
# Mercurial >= 1.9
@@ -859,20 +860,23 @@ def tostandin(f):
if bfutil.standin(f) in ctx0 or bfutil.standin(f) in ctx:
return bfutil.standin(f)
+ elif bfutil.standin(f) in repo[None]:
+ return None
return f
m._files = [tostandin(f) for f in m._files]
+ m._files = [f for f in m._files if f is not None]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
def matchfn(f):
- if bfutil.is_standin(f):
+ if bfutil.isstandin(f):
# We need to keep track of what bfiles are being matched so we know which
# ones to update later (otherwise we revert changes to other bfiles
# accidentally). This is repo specific, so duckpunch the repo object to
# keep the list of bfiles for us later.
- if(orig_matchfn(bfutil.split_standin(f)) and (f in repo[None] or f in ctx)):
- bfiles_list = getattr(repo, '_bfiles_to_update', [])
- bfiles_list.append(bfutil.split_standin(f))
- repo._bfiles_to_update = bfiles_list;
+ if(orig_matchfn(bfutil.splitstandin(f)) and (f in repo[None] or f in ctx)):
+ bfileslist = getattr(repo, '_bfilestoupdate', [])
+ bfileslist.append(bfutil.splitstandin(f))
+ repo._bfilestoupdate = bfileslist;
return True
else:
return False
@@ -882,9 +886,11 @@ try:
# Mercurial >= 1.9
scmutil.match = override_match
+ matches = override_match(repo[None], pats, opts)
except ImportError:
# Mercurial <= 1.8
cmdutil.match = override_match
+ matches = override_match(repo, pats, opts)
orig(ui, repo, *pats, **opts)
finally:
try:
@@ -893,31 +899,44 @@ except ImportError:
# Mercurial <= 1.8
cmdutil.match = oldmatch
- bfiles_list = getattr(repo, '_bfiles_to_update', [])
- bfcommands.revert_bfiles(ui, repo, bfiles_list)
+ bfileslist = getattr(repo, '_bfilestoupdate', [])
+ bfcommands.revertbfiles(ui, repo, bfileslist)
# Empty out the bfiles list so we start fresh next time
- repo._bfiles_to_update = []
+ repo._bfilestoupdate = []
for bfile in modified:
- if bfile in bfiles_list:
+ if bfile in bfileslist:
if os.path.exists(repo.wjoin(bfutil.standin(bfile))) and bfile in repo['.']:
- bfutil.write_standin(repo, bfutil.standin(bfile), repo['.'][bfile].data().strip(), 'x' in repo['.'][bfile].flags())
+ bfutil.writestandin(repo, bfutil.standin(bfile), repo['.'][bfile].data().strip(), 'x' in repo['.'][bfile].flags())
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
+ for bfile in added:
+ standin = bfutil.standin(bfile)
+ if standin not in ctx and (standin in matches or opts.get('all')):
+ if bfile in bfdirstate:
+ try:
+ # Mercurial >= 1.9
+ bfdirstate.drop(bfile)
+ except AttributeError:
+ # Mercurial <= 1.8
+ bfdirstate.forget(bfile)
+ util.unlinkpath(repo.wjoin(standin))
+ bfdirstate.write()
finally:
wlock.release()
def hg_update(orig, repo, node):
result = orig(repo, node)
# XXX check if it worked first
- bfcommands.update_bfiles(repo.ui, repo)
+ bfcommands.updatebfiles(repo.ui, repo)
return result
def hg_clean(orig, repo, node, show_stats=True):
result = orig(repo, node, show_stats)
- bfcommands.update_bfiles(repo.ui, repo)
+ bfcommands.updatebfiles(repo.ui, repo)
return result
def hg_merge(orig, repo, node, force=None, remind=True):
result = orig(repo, node, force, remind)
- bfcommands.update_bfiles(repo.ui, repo)
+ bfcommands.updatebfiles(repo.ui, repo)
return result
# When we rebase a repository with remotely changed bfiles, we need
@@ -1028,10 +1047,10 @@ for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
- if bfutil.is_standin(f):
- path = bfutil.find_file(repo, getdata().strip())
+ if bfutil.isstandin(f):
+ path = bfutil.findfile(repo, getdata().strip())
### TODO: What if the file is not cached?
- f = bfutil.split_standin(f)
+ f = bfutil.splitstandin(f)
def getdatafn():
with open(path, 'rb') as fd:
@@ -1081,7 +1100,7 @@ def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in wctx
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -1121,7 +1140,7 @@ # and we could race inbetween.
wlock = repo.wlock()
try:
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
for f in forget:
bfdirstate.remove(bfutil.unixpath(f))
bfdirstate.write()
@@ -1129,7 +1148,7 @@ finally:
wlock.release()
-def get_outgoing_bfiles(ui, repo, dest=None, **opts):
+def getoutgoingbfiles(ui, repo, dest=None, **opts):
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
@@ -1171,27 +1190,27 @@ for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
- toupload = toupload.union(set([f for f in files if bfutil.is_standin(f) and f in ctx]))
+ toupload = toupload.union(set([f for f in files if bfutil.isstandin(f) and f in ctx]))
return toupload
def override_outgoing(orig, ui, repo, dest=None, **opts):
orig(ui, repo, dest, **opts)
if opts.pop('bf', None):
- toupload = get_outgoing_bfiles(ui, repo, dest, **opts)
+ toupload = getoutgoingbfiles(ui, repo, dest, **opts)
if toupload is None:
ui.status(_('kbfiles: No remote repo\n'))
else:
ui.status(_('kbfiles to upload:\n'))
for file in toupload:
- ui.status(bfutil.split_standin(file) + '\n')
+ ui.status(bfutil.splitstandin(file) + '\n')
ui.status('\n')
def override_summary(orig, ui, repo, *pats, **opts):
orig(ui, repo, *pats, **opts)
if opts.pop('bf', None):
- toupload = get_outgoing_bfiles(ui, repo, None, **opts)
+ toupload = getoutgoingbfiles(ui, repo, None, **opts)
if toupload is None:
ui.status(_('kbfiles: No remote repo\n'))
else:
@@ -1213,7 +1232,7 @@
# Manifests are only iterable so turn them into sets then union
for file in manifesttip.union(manifestworking):
- if file.startswith(bfutil.short_name):
+ if file.startswith(bfutil.shortname):
raise util.Abort(_('addremove cannot be run on a repo with bfiles'))
return orig(ui, repo, *pats, **opts)
@@ -1226,7 +1245,7 @@ clean=False, unknown=False, listsubrepos=False):
r = oldstatus(node1, node2, match, ignored, clean, unknown,
listsubrepos)
- bfdirstate = bfutil.open_bfdirstate(ui, repo)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
modified, added, removed, deleted, unknown, ignored, clean = r
unknown = [f for f in unknown if bfdirstate[f] == '?']
ignored = [f for f in ignored if bfdirstate[f] == '?']
@@ -1235,6 +1254,20 @@ orig(ui, repo, *dirs, **opts)
repo.status = oldstatus
+def override_rollback(orig, ui, repo, **opts):
+ result = orig(ui, repo, **opts)
+ merge.update(repo, node=None, branchmerge=False, force=True, partial=bfutil.isstandin)
+ bfdirstate = bfutil.openbfdirstate(ui, repo)
+ bfiles = bfutil.listbfiles(repo)
+ oldbfiles = bfutil.listbfiles(repo, repo[None].parents()[0].rev())
+ for file in bfiles:
+ if file in oldbfiles:
+ bfdirstate.normallookup(file)
+ else:
+ bfdirstate.add(file)
+ bfdirstate.write()
+ return result
+
def uisetup(ui):
# Disable auto-status for some commands which assume that all
# files in the result are under Mercurial's control
@@ -1249,6 +1282,7 @@ entry = extensions.wrapcommand(commands.table, 'forget', override_forget)
entry = extensions.wrapcommand(commands.table, 'status', override_status)
entry = extensions.wrapcommand(commands.table, 'log', override_log)
+ entry = extensions.wrapcommand(commands.table, 'rollback', override_rollback)
entry = extensions.wrapcommand(commands.table, 'verify', override_verify)
verifyopt = [('', 'bf', None, _('verify bfiles')),
@@ -1292,5 +1326,3 @@ extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge', override_purge)
if name == 'rebase':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase', override_rebase)
-
-
|
|
|
@@ -15,8 +15,8 @@ except ImportError:
pass
-short_name = '.kbf'
-long_name = 'kilnbfiles'
+shortname = '.kbf'
+longname = 'kilnbfiles'
# -- Portability wrappers ----------------------------------------------
@@ -124,37 +124,37 @@ shutil.copyfile(src, dest)
os.chmod(dest, os.stat(src).st_mode)
-def system_cache_path(ui, hash):
- path = ui.config(long_name, 'systemcache', None)
+def systemcachepath(ui, hash):
+ path = ui.config(longname, 'systemcache', None)
if path:
path = os.path.join(path, hash)
else:
if os.name == 'nt':
- path = os.path.join(os.getenv('LOCALAPPDATA') or os.getenv('APPDATA'), long_name, hash)
+ path = os.path.join(os.getenv('LOCALAPPDATA') or os.getenv('APPDATA'), longname, hash)
elif os.name == 'posix':
- path = os.path.join(os.getenv('HOME'), '.' + long_name, hash)
+ path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
else:
raise util.Abort(_('Unknown operating system: %s\n') % os.name)
return path
-def in_system_cache(ui, hash):
- return os.path.exists(system_cache_path(ui, hash))
+def insystemcache(ui, hash):
+ return os.path.exists(systemcachepath(ui, hash))
-def find_file(repo, hash):
- if in_cache(repo, hash):
+def findfile(repo, hash):
+ if incache(repo, hash):
repo.ui.note(_('Found %s in cache\n') % hash)
- return cache_path(repo, hash)
- if in_system_cache(repo.ui, hash):
+ return cachepath(repo, hash)
+ if insystemcache(repo.ui, hash):
repo.ui.note(_('Found %s in system cache\n') % hash)
- return system_cache_path(repo.ui, hash)
+ return systemcachepath(repo.ui, hash)
return None
-def open_bfdirstate(ui, repo):
+def openbfdirstate(ui, repo):
'''
Return a dirstate object that tracks big files: i.e. its root is the
repo root, but it is saved in .hg/bfiles/dirstate.
'''
- admin = repo.join(long_name)
+ admin = repo.join(longname)
try:
# Mercurial >= 1.9
opener = scmutil.opener(admin)
@@ -177,10 +177,10 @@ # .hg/bfiles/{pending,committed}).
if not os.path.exists(os.path.join(admin, 'dirstate')):
util.makedirs(admin)
- matcher = get_standin_matcher(repo)
+ matcher = getstandinmatcher(repo)
for standin in dirstate_walk(repo.dirstate, matcher):
- bigfile = split_standin(standin)
- hash = read_standin(repo, standin)
+ bigfile = splitstandin(standin)
+ hash = readstandin(repo, standin)
try:
curhash = hashfile(bigfile)
except IOError, err:
@@ -215,50 +215,50 @@ wlock.release()
return (modified, added, removed, missing, unknown, ignored, clean)
-def list_bfiles(repo, rev=None, matcher=None):
+def listbfiles(repo, rev=None, matcher=None):
'''list big files in the working copy or specified changeset'''
if matcher is None:
- matcher = get_standin_matcher(repo)
+ matcher = getstandinmatcher(repo)
bfiles = []
- if rev:
+ if rev is not None:
cctx = repo[rev]
for standin in cctx.walk(matcher):
- filename = split_standin(standin)
+ filename = splitstandin(standin)
bfiles.append(filename)
else:
for standin in sorted(dirstate_walk(repo.dirstate, matcher)):
- filename = split_standin(standin)
+ filename = splitstandin(standin)
bfiles.append(filename)
return bfiles
-def in_cache(repo, hash):
- return os.path.exists(cache_path(repo, hash))
+def incache(repo, hash):
+ return os.path.exists(cachepath(repo, hash))
-def create_dir(dir):
+def createdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
-def cache_path(repo, hash):
- return repo.join(os.path.join(long_name, hash))
+def cachepath(repo, hash):
+ return repo.join(os.path.join(longname, hash))
-def copy_to_cache(repo, rev, file, uploaded=False):
- hash = read_standin(repo, standin(file))
- if in_cache(repo, hash):
+def copytocache(repo, rev, file, uploaded=False):
+ hash = readstandin(repo, standin(file))
+ if incache(repo, hash):
return
- create_dir(os.path.dirname(cache_path(repo, hash)))
- if in_system_cache(repo.ui, hash):
- link(system_cache_path(repo.ui, hash), cache_path(repo, hash))
+ createdir(os.path.dirname(cachepath(repo, hash)))
+ if insystemcache(repo.ui, hash):
+ link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
else:
- shutil.copyfile(repo.wjoin(file), cache_path(repo, hash))
- os.chmod(cache_path(repo, hash), os.stat(repo.wjoin(file)).st_mode)
- create_dir(os.path.dirname(system_cache_path(repo.ui, hash)))
- link(cache_path(repo, hash), system_cache_path(repo.ui, hash))
+ shutil.copyfile(repo.wjoin(file), cachepath(repo, hash))
+ os.chmod(cachepath(repo, hash), os.stat(repo.wjoin(file)).st_mode)
+ createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
+ link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
-def get_standin_matcher(repo, pats=[], opts={}):
+def getstandinmatcher(repo, pats=[], opts={}):
'''Return a match object that applies pats to <repo>/.kbf.'''
- standin_dir = repo.pathto(short_name)
+ standindir = repo.pathto(shortname)
if pats:
# patterns supplied: search .hgbfiles relative to current dir
cwd = repo.getcwd()
@@ -266,19 +266,19 @@ # cwd is an absolute path for hg -R <reponame>
# work relative to the repository root in this case
cwd = ''
- pats = [os.path.join(standin_dir, cwd, pat) for pat in pats]
- elif os.path.isdir(standin_dir):
+ pats = [os.path.join(standindir, cwd, pat) for pat in pats]
+ elif os.path.isdir(standindir):
# no patterns: relative to repo root
- pats = [standin_dir]
+ pats = [standindir]
else:
# no patterns and no .hgbfiles dir: return matcher that matches nothing
match = match_.match(repo.root, None, [], exact=True)
match.matchfn = lambda f: False
return match
- return get_matcher(repo, pats, opts, showbad=False)
+ return getmatcher(repo, pats, opts, showbad=False)
-def get_matcher(repo, pats=[], opts={}, showbad=True):
- '''Wrapper around cmdutil.match() that adds showbad: if false, neuter
+def getmatcher(repo, pats=[], opts={}, showbad=True):
+ '''Wrapper around scmutil.match() that adds showbad: if false, neuter
the match object\'s bad() method so it does not print any warnings
about missing files or directories.'''
try:
@@ -292,14 +292,14 @@ match.bad = lambda f, msg: None
return match
-def compose_standin_matcher(repo, rmatcher):
+def composestandinmatcher(repo, rmatcher):
'''Return a matcher that accepts standins corresponding to the files
accepted by rmatcher. Pass the list of files in the matcher as the
paths specified by the user.'''
- smatcher = get_standin_matcher(repo, rmatcher.files())
+ smatcher = getstandinmatcher(repo, rmatcher.files())
isstandin = smatcher.matchfn
def composed_matchfn(f):
- return isstandin(f) and rmatcher.matchfn(split_standin(f))
+ return isstandin(f) and rmatcher.matchfn(splitstandin(f))
smatcher.matchfn = composed_matchfn
return smatcher
@@ -314,39 +314,39 @@ # 2) Join with '/' because that's what dirstate always uses, even on
# Windows. Change existing separator to '/' first in case we are
# passed filenames from an external source (like the command line).
- return short_name + '/' + filename.replace(os.sep, '/')
+ return shortname + '/' + filename.replace(os.sep, '/')
-def is_standin(filename):
+def isstandin(filename):
'''Return true if filename is a big file standin. filename must
be in Mercurial\'s internal form (slash-separated).'''
- return filename.startswith(short_name+'/')
+ return filename.startswith(shortname + '/')
-def split_standin(filename):
+def splitstandin(filename):
# Split on / because that's what dirstate always uses, even on Windows.
# Change local separator to / first just in case we are passed filenames
# from an external source (like the command line).
bits = filename.replace(os.sep, '/').split('/', 1)
- if len(bits) == 2 and bits[0] == short_name:
+ if len(bits) == 2 and bits[0] == shortname:
return bits[1]
else:
return None
-def update_standin(repo, standin):
- file = repo.wjoin(split_standin(standin))
+def updatestandin(repo, standin):
+ file = repo.wjoin(splitstandin(standin))
if os.path.exists(file):
hash = hashfile(file)
- executable = get_executable(file)
- write_standin(repo, standin, hash, executable)
+ executable = getexecutable(file)
+ writestandin(repo, standin, hash, executable)
-def read_standin(repo, standin):
+def readstandin(repo, standin):
'''read hex hash from <repo.root>/<standin>'''
- return read_hash(repo.wjoin(standin))
+ return readhash(repo.wjoin(standin))
-def write_standin(repo, standin, hash, executable):
+def writestandin(repo, standin, hash, executable):
'''write hhash to <repo.root>/<standin>'''
- write_hash(hash, repo.wjoin(standin), executable)
+ writehash(hash, repo.wjoin(standin), executable)
-def copy_and_hash(instream, outfile):
+def copyandhash(instream, outfile):
'''Read bytes from instream (iterable) and write them to outfile,
computing the SHA-1 hash of the data along the way. Close outfile
when done and return the binary hash.'''
@@ -384,7 +384,7 @@ # Same blecch as above.
infile.close()
-def read_hash(filename):
+def readhash(filename):
rfile = open(filename, 'rb')
hash = rfile.read(40)
rfile.close()
@@ -393,14 +393,14 @@ % (filename, len(hash)))
return hash
-def write_hash(hash, filename, executable):
+def writehash(hash, filename, executable):
util.makedirs(os.path.dirname(filename))
if os.path.exists(filename):
os.unlink(filename)
if os.name == 'posix':
# Yuck: on Unix, go through open(2) to ensure that the caller's mode is
# filtered by umask() in the kernel, where it's supposed to be done.
- wfile = os.fdopen(os.open(filename, os.O_WRONLY|os.O_CREAT, get_mode(executable)), 'wb')
+ wfile = os.fdopen(os.open(filename, os.O_WRONLY|os.O_CREAT, getmode(executable)), 'wb')
else:
# But on Windows, use open() directly, since passing mode='wb' to os.fdopen()
# does not work. (Python bug?)
@@ -412,11 +412,11 @@ finally:
wfile.close()
-def get_executable(filename):
+def getexecutable(filename):
mode = os.stat(filename).st_mode
return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & stat.S_IXOTH)
-def get_mode(executable):
+def getmode(executable):
if executable:
return 0755
else:
|
@@ -89,7 +89,7 @@ # all fail too.
reason = err[0][1] # assumes err[0] is a socket.error
raise util.Abort('%s: %s' % (baseurl, reason))
- return bfutil.copy_and_hash(bfutil.blockstream(infile), tmpfile)
+ return bfutil.copyandhash(bfutil.blockstream(infile), tmpfile)
def _verify(self, hash):
try:
@@ -117,7 +117,7 @@ except AttributeError:
# Mercurial <= 1.8
baseurl, authinfo = url_.getauthinfo(self.url)
- filename = bfutil.split_standin(standin)
+ filename = bfutil.splitstandin(standin)
if not filename:
return False
fctx = cctx[standin]
|
@@ -11,7 +11,7 @@ Since the cache is updated elsewhere, we can just read from it here as if it were the store.'''
def __init__(self, ui, repo, url):
- url = os.path.join(url, '.hg', bfutil.long_name)
+ url = os.path.join(url, '.hg', bfutil.longname)
super(localstore, self).__init__(ui, repo, util.expandpath(url))
def put(self, source, filename, hash):
@@ -19,15 +19,15 @@ return
def exists(self, hash):
- return bfutil.in_system_cache(self.repo.ui, hash)
+ return bfutil.insystemcache(self.repo.ui, hash)
def _getfile(self, tmpfile, filename, hash):
- if bfutil.in_system_cache(self.ui, hash):
- return bfutil.system_cache_path(self.ui, hash)
+ if bfutil.insystemcache(self.ui, hash):
+ return bfutil.systemcachepath(self.ui, hash)
raise basestore.StoreError(filename, hash, '', _("Can't get file locally"))
def _verifyfile(self, cctx, cset, contents, standin, verified):
- filename = bfutil.split_standin(standin)
+ filename = bfutil.splitstandin(standin)
if not filename:
return False
fctx = cctx[standin]
@@ -35,25 +35,24 @@ if key in verified:
return False
- expect_hash = fctx.data()[0:40]
+ expecthash = fctx.data()[0:40]
verified.add(key)
- if not bfutil.in_system_cache(self.ui, expect_hash):
+ if not bfutil.insystemcache(self.ui, expecthash):
self.ui.warn(
_('changeset %s: %s missing\n'
' (%s: %s)\n')
- % (cset, filename, expect_hash, err.strerror))
+ % (cset, filename, expecthash, err.strerror))
return True # failed
if contents:
- store_path = bfutil.system_cache_path(self.ui, expect_hash)
- actual_hash = bfutil.hashfile(store_path)
- if actual_hash != expect_hash:
+ storepath = bfutil.systemcachepath(self.ui, expecthash)
+ actualhash = bfutil.hashfile(storepath)
+ if actualhash != expecthash:
self.ui.warn(
_('changeset %s: %s: contents differ\n'
- ' (%s:\n'
- ' expected hash %s,\n'
- ' but got %s)\n')
- % (cset, filename,
- store_path, expect_hash, actual_hash))
+ ' (%s:\n'
+ ' expected hash %s,\n'
+ ' but got %s)\n')
+ % (cset, filename, storepath, expecthash, actualhash))
return True # failed
return False
|
@@ -30,8 +30,9 @@ stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = child.communicate()
- versions = re.findall(r'\d+\.\d+\.\d+', stdout)
+ versions = re.findall(r'\d+\.\d+(?:\.\d+)?', stdout)
parts = [re.match(r'\d+', v).group(0) for v in versions[0].split('.')]
+
version = [0, 0, 0]
for i, part in enumerate(map(int, parts)):
version[i] = part
|
|
@@ -152,10 +152,7 @@ os.unlink('n1')
os.unlink('n2.txt')
os.unlink('dir/b3')
-os.unlink('.kbf/dir/b3')
os.unlink('.kbf/b1.orig')
-os.unlink('.kbf/b2.txt')
-os.unlink('.kbf/dir/dir/b4.txt')
hgt.announce('revert specific files')
hgt.hg(['revert', '-r', '1', 'glob:**.txt'],
@@ -231,6 +228,7 @@# second bfile
os.chdir('..')
os.mkdir('repo3')
+os.chdir('repo3')
hgt.hg(['init', '-q'])
hgt.writefile('b1', 'b1')
hgt.hg(['add', '--bf', 'b1'])
@@ -242,3 +240,64 @@hgt.writefile('b2', 'b22')
hgt.hg(['revert', 'b1'])
hgt.asserttrue(hgt.readfile('b2') == 'b22', 'file changed')
+# Test that a newly added, uncommitted bfile can be reverted
+hgt.announce('revert uncommitted files')
+os.chdir('..')
+os.mkdir('repo4')
+os.chdir('repo4')
+hgt.hg(['init', '-q'])
+hgt.writefile('n1', 'n1')
+hgt.hg(['add', 'n1'])
+hgt.hg(['commit', '-m', 'add normal file'])
+hgt.writefile('b1', 'b1')
+hgt.hg(['add', '--bf', 'b1'])
+hgt.hg(['revert', 'b1'])
+hgt.hg(['status'], stdout='''? b1
+''')
+hgt.hg(['add', 'b1'])
+hgt.hg(['status'], stdout='''A b1
+''')
+hgt.hg(['revert', 'b1'])
+hgt.hg(['add', '--bf', 'b1'])
+hgt.hg(['revert', '--all'], stdout='''forgetting .kbf/b1
+''')
+hgt.hg(['status'], stdout='''? b1
+''')
+hgt.hg(['add', 'b1'])
+hgt.hg(['status'], stdout='''A b1
+''')
+hgt.hg(['revert', 'b1'])
+hgt.hg(['add', '--bf', 'b1'])
+hgt.hg(['commit', '-m', 'add bfile'])
+hgt.writefile('b2', 'b2')
+hgt.writefile('b3', 'b3')
+hgt.hg(['add', '--bf', 'b2'])
+hgt.hg(['revert', 'b2'])
+hgt.hg(['status'], stdout='''? b2
+? b3
+''')
+hgt.hg(['add', '--bf', 'b2'])
+hgt.hg(['revert', '--all'], stdout='''forgetting .kbf/b2
+''')
+hgt.hg(['status'], stdout='''? b2
+? b3
+''')
+hgt.hg(['add', '--bf'], stdout='''adding b2 as bfile
+adding b3 as bfile
+''')
+hgt.hg(['revert', 'b3'])
+hgt.hg(['status'], stdout='''A b2
+? b3
+''')
+hgt.hg(['commit', '-m', 'add another bfile'])
+hgt.hg(['rm', 'b2'])
+hgt.assertfalse(os.path.exists('b2'), 'file shouldnt exist')
+hgt.assertfalse(os.path.exists('.kbf/b2'), 'file shouldnt exist')
+hgt.hg(['revert', 'b2'])
+hgt.asserttrue(hgt.readfile('b2') == 'b2', 'file changed')
+hgt.asserttrue(hgt.readfile('.kbf/b2') == '32f28ea03b1b20126629d2ca63fc6665b0bbb604\n', 'file changed')
+hgt.hg(['rm', 'b2'])
+hgt.hg(['revert', '--all'], stdout='''undeleting .kbf/b2
+''')
+hgt.asserttrue(hgt.readfile('b2') == 'b2', 'file changed')
+hgt.asserttrue(hgt.readfile('.kbf/b2') == '32f28ea03b1b20126629d2ca63fc6665b0bbb604\n', 'file changed')
|
@@ -35,3 +35,36 @@ hg add --bf b2
hg commit -m 'added second bfile'
hg revert b1
+
+% revert uncommitted files
+hg init -q
+hg add n1
+hg commit -m 'add normal file'
+hg add --bf b1
+hg revert b1
+hg status
+hg add b1
+hg status
+hg revert b1
+hg add --bf b1
+hg revert --all
+hg status
+hg add b1
+hg status
+hg revert b1
+hg add --bf b1
+hg commit -m 'add bfile'
+hg add --bf b2
+hg revert b2
+hg status
+hg add --bf b2
+hg revert --all
+hg status
+hg add --bf
+hg revert b3
+hg status
+hg commit -m 'add another bfile'
+hg rm b2
+hg revert b2
+hg rm b2
+hg revert --all
|
|
@@ -0,0 +1,53 @@ + #!/usr/bin/env python
+#
+# Test rollback
+
+import os
+import common
+
+hgt = common.BfilesTester()
+
+hgt.updaterc()
+hgt.announce('setup')
+os.mkdir('repo1')
+os.chdir('repo1')
+hgt.hg(['init', '-q'])
+hgt.writefile('b1', 'b1')
+hgt.hg(['add', '--bf', 'b1'])
+hgt.hg(['commit', '-m', 'add bfile'])
+hgt.hg(['rollback'],
+ stdout='''repository tip rolled back to revision -1 (undo commit)
+working directory now based on revision -1
+''')
+hgt.hg(['status'], stdout='''A b1
+''')
+hgt.hg(['commit', '-m', 'add bfile'])
+hgt.writefile('b2', 'b2')
+hgt.hg(['add', '--bf', 'b2'])
+hgt.hg(['commit', '-m', 'add another bfile'])
+hgt.hg(['rollback'],
+ stdout='''repository tip rolled back to revision 0 (undo commit)
+working directory now based on revision 0
+''')
+hgt.hg(['status'], stdout='''A b2
+''')
+hgt.hg(['commit', '-m', 'add another bfile'])
+hgt.writefile('b2', 'b22')
+hgt.hg(['commit', '-m', 'modify bfile'])
+hgt.hg(['rollback'],
+ stdout='''repository tip rolled back to revision 1 (undo commit)
+working directory now based on revision 1
+''')
+hgt.hg(['status'], stdout='''M b2
+''')
+hgt.asserttrue(hgt.readfile('b2') == 'b22', 'file changed')
+hgt.hg(['commit', '-m', 'modify bfile'])
+hgt.hg(['rm', 'b2'])
+hgt.hg(['commit', '-m', 'delete bfile'])
+hgt.hg(['rollback'],
+ stdout='''repository tip rolled back to revision 2 (undo commit)
+working directory now based on revision 2
+''')
+hgt.hg(['status'], stdout='''! b2
+''')
+hgt.asserttrue(hgt.readfile('.kbf/b2') == 'ad280552ca89b1d13baa498ef352e1eabaafdf28\n', 'file changed')
|
|
@@ -0,0 +1,20 @@ + % setup
+hg init -q
+hg add --bf b1
+hg commit -m 'add bfile'
+hg rollback
+hg status
+hg commit -m 'add bfile'
+hg add --bf b2
+hg commit -m 'add another bfile'
+hg rollback
+hg status
+hg commit -m 'add another bfile'
+hg commit -m 'modify bfile'
+hg rollback
+hg status
+hg commit -m 'modify bfile'
+hg rm b2
+hg commit -m 'delete bfile'
+hg rollback
+hg status
|
Loading...