Changeset 3f4083dafa13…
Parent 35d64ba16cb6…
by Benjamin Pollack <benjamin@fogcreek.com>
Changes to 31 files · Browse files at 3f4083dafa13 Showing diff from parent 35d64ba16cb6 Diff from another changeset...
@@ -20,8 +20,8 @@ version.
'''
-from mercurial import commands
-from mercurial import wireproto
+from mercurial import commands, localrepo, wireproto
+from mercurial.hgweb import hgweb_mod
import bfsetup
import bfcommands
@@ -39,8 +39,22 @@ wireproto.commands['getbfile'] = (bfproto.getbfile, 'sha')
wireproto.commands['statbfile'] = (bfproto.statbfile, 'sha')
wireproto.commands['capabilities'] = (bfproto.capabilities, '')
+ wireproto.commands['heads'] = (bfproto.heads, '')
+
+ # make putbfile behave the same as push and {get,stat}bfile behave the same
+ # as pull w.r.t. permissions checks
+ hgweb_mod.perms['putbfile'] = 'push'
+ hgweb_mod.perms['getbfile'] = 'pull'
+ hgweb_mod.perms['statbfile'] = 'pull'
+
+ # the hello wireproto command uses wireproto.capabilities, so it won't see
+ # our bfilestore capability unless we replace the actual function as well.
+ # we also need to save the existing function for local use.
+ # this is really ugly.
+ # however, the alternative would be to parse hello's output and rewrite the
+ # capabilities line.
+ bfproto.capabilities_orig = wireproto.capabilities
+ wireproto.capabilities = bfproto.capabilities
wireproto.dispatch = bfproto.dispatch
- wireproto.wirerepository.putbfile = bfproto.wirerepo_putbfile
- wireproto.wirerepository.getbfile = bfproto.wirerepo_getbfile
- wireproto.wirerepository.statbfile = bfproto.wirerepo_statbfile
+ localrepo.localrepository.supported |= set(['kbfiles'])
|
|
@@ -9,6 +9,7 @@ from mercurial.i18n import _
import bfutil
+import remotestore
class StoreError(Exception):
'''Raised when there is a problem getting files from or putting
@@ -61,14 +62,16 @@
at = 0
for filename, hash in files:
- ui.progress(_('getting kbfiles'), at, unit='kbfile', total=len(files))
+ ui.progress(_('getting kbfiles'), at, unit='kbfile',
+ total=len(files))
at += 1
ui.note(_('getting %s\n') % filename)
outfilename = self.repo.wjoin(filename)
destdir = os.path.dirname(outfilename)
util.makedirs(destdir)
if not os.path.isdir(destdir):
- self.abort(error.RepoError(_('cannot create dest directory %s') % destdir))
+ self.abort(error.RepoError(_('cannot create dest directory %s')
+ % destdir))
# No need to pass mode='wb' to fdopen(), since mkstemp() already
# opened the file in binary mode.
@@ -95,7 +98,8 @@ if os.path.exists(outfilename): # for windows
os.remove(outfilename)
os.rename(tmpfilename, outfilename)
- bfutil.copytocache(self.repo, self.repo['.'].node(), filename, True)
+ bfutil.copytocache(self.repo, self.repo['.'].node(), filename,
+ True)
success.append((filename, hhash))
ui.progress(_('getting bfiles'), None)
@@ -147,12 +151,13 @@ '''
raise NotImplementedError('abstract method')
-import localstore, httpstore
+import localstore, kilnstore, wirestore
_storeprovider = {
- 'file': (localstore, 'localstore'),
- 'http': (httpstore, 'httpstore'),
- 'https': (httpstore, 'httpstore'),
+ 'file': [(localstore, 'localstore')],
+ 'http': [(wirestore, 'wirestore'), (kilnstore, 'kilnstore')],
+ 'https': [(wirestore, 'wirestore'), (kilnstore, 'kilnstore')],
+ 'ssh': [(wirestore, 'wirestore')],
}
_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
@@ -160,22 +165,24 @@# During clone this function is passed the src's ui object
# but it needs the dest's ui object so it can read out of
# the config file. Use repo.ui instead.
-def _openstore(repo, path=None, put=False):
+def _openstore(repo, remote=None, put=False):
ui = repo.ui
- if not path:
- path = getattr(repo, 'bfpullsource', None)
- if not path:
- path = ui.expandpath('default-push', 'default')
- # If 'default-push' and 'default' can't be expanded
- # they are just returned. In that case fail with an informative
- # error message
- if path in ('default-push', 'default'):
- path = ''
+
+ if not remote:
+ path = getattr(repo, 'bfpullsource', None) or \
+ ui.expandpath('default-push', 'default')
+ # If 'default-push' and 'default' can't be expanded
+ # they are just returned. In that case use the empty string which
+ # use the filescheme.
+ if path == 'default-push' or path == 'default':
+ path = ''
+ remote = repo
+ else:
+ remote = hg.peer(repo, {}, path)
# The path could be a scheme so use Mercurial's normal functionality
# to resolve the scheme to a repository and use its path
- if path:
- path = hg.repository(ui, path).path
+ path = hasattr(remote, 'url') and remote.url() or remote.path
match = _scheme_re.match(path)
if not match: # regular filesystem path
@@ -184,9 +191,15 @@ scheme = match.group(1)
try:
- mod, klass = _storeprovider[scheme]
+ storeproviders = _storeprovider[scheme]
except KeyError:
raise util.Abort(_('unsupported URL scheme %r') % scheme)
- klass = getattr(mod, klass)
- return klass(ui, repo, path)
+ for (mod, klass) in storeproviders:
+ klass = getattr(mod, klass)
+ try:
+ return klass(ui, repo, remote)
+ except remotestore.storeprotonotcapable:
+ pass
+
+ raise util.Abort(_('%s does not appear to be a bfile store'), path)
|
|
|
@@ -6,7 +6,8 @@ from mercurial import util, match as match_, hg, node, context, error
from mercurial.i18n import _
-import bfutil, basestore
+import bfutil
+import basestore
# -- Commands ----------------------------------------------------------
@@ -32,7 +33,8 @@ try:
size = int(size)
except ValueError:
- raise util.Abort(_('bfiles.size must be integer, was %s\n') % size)
+ raise util.Abort(_('bfiles.size must be integer, was %s\n') % \
+ size)
except TypeError:
raise util.Abort(_('size must be specified'))
@@ -67,7 +69,8 @@ # is to simply walk the changelog, using changelog.nodesbewteen().
# Take a look at mercurial/revlog.py:639 for more details.
# Use a generator instead of a list to decrease memory usage
- ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0])
+ ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
+ rsrc.heads())[0])
revmap = {node.nullid: node.nullid}
if tobfile:
bfiles = set()
@@ -83,9 +86,10 @@
bfiletohash = {}
for ctx in ctxs:
- ui.progress(_('converting revisions'), ctx.rev(), unit=_('revision'), total=rsrc['tip'].rev())
+ ui.progress(_('converting revisions'), ctx.rev(),
+ unit=_('revision'), total=rsrc['tip'].rev())
_bfconvert_addchangeset(rsrc, rdst, ctx, revmap,
- bfiles, normalfiles, matcher, size, bfiletohash)
+ bfiles, normalfiles, matcher, size, bfiletohash)
ui.progress(_('converting revisions'), None)
if os.path.exists(rdst.wjoin(bfutil.shortname)):
@@ -101,7 +105,8 @@
else:
for ctx in ctxs:
- ui.progress(_('converting revisions'), ctx.rev(), unit=_('revision'), total=rsrc['tip'].rev())
+ ui.progress(_('converting revisions'), ctx.rev(),
+ unit=_('revision'), total=rsrc['tip'].rev())
_addchangeset(ui, rsrc, rdst, ctx, revmap)
ui.progress(_('converting revisions'), None)
@@ -152,8 +157,12 @@ path = bfutil.findfile(rsrc, hash)
### TODO: What if the file is not cached?
data = ''
- with open(path, 'rb') as fd:
+ fd = None
+ try:
+ fd = open(path, 'rb')
data = fd.read()
+ finally:
+ if fd: fd.close()
return context.memfilectx(f, data, 'l' in fctx.flags(),
'x' in fctx.flags(), renamed)
else:
@@ -169,7 +178,8 @@ newdata = []
for line in data.splitlines():
id, name = line.split(' ', 1)
- newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name))
+ newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
+ name))
data = ''.join(newdata)
return context.memfilectx(f, data, 'l' in fctx.flags(),
'x' in fctx.flags(), renamed)
@@ -187,7 +197,8 @@ rdst.dirstate.setparents(ret)
revmap[ctx.node()] = rdst.changelog.tip()
-def _bfconvert_addchangeset(rsrc, rdst, ctx, revmap, bfiles, normalfiles, matcher, size, bfiletohash):
+def _bfconvert_addchangeset(rsrc, rdst, ctx, revmap, bfiles, normalfiles,
+ matcher, size, bfiletohash):
# Convert src parents to dst parents
parents = []
for p in ctx.parents():
@@ -224,7 +235,8 @@ isbfile |= renamedbfile
if 'l' in fctx.flags():
if renamedbfile:
- raise util.Abort(_('Renamed/copied bfile %s becomes symlink') % f)
+ raise util.Abort(
+ _('Renamed/copied bfile %s becomes symlink') % f)
isbfile = False
if isbfile:
bfiles.add(f)
@@ -246,11 +258,16 @@ m.update(ctx[f].data())
hash = m.hexdigest()
if f not in bfiletohash or bfiletohash[f] != hash:
- with open(fullpath, 'wb') as fd:
+ try:
+ fd = open(fullpath, 'wb')
fd.write(ctx[f].data())
+ finally:
+ if fd:
+ fd.close()
executable = 'x' in ctx[f].flags()
os.chmod(fullpath, bfutil.getmode(executable))
- bfutil.writestandin(rdst, bfutil.standin(f), hash, executable)
+ bfutil.writestandin(rdst, bfutil.standin(f), hash,
+ executable)
bfiletohash[f] = hash
else:
# normal file
@@ -271,7 +288,8 @@ # doesn't change after rename or copy
renamed = bfutil.standin(renamed[0])
- return context.memfilectx(f, bfiletohash[srcfname], 'l' in fctx.flags(),
+ return context.memfilectx(f, bfiletohash[srcfname], 'l' in
+ fctx.flags(),
'x' in fctx.flags(), renamed)
else:
try:
@@ -287,7 +305,8 @@ newdata = []
for line in data.splitlines():
id, name = line.split(' ', 1)
- newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name))
+ newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
+ name))
data = ''.join(newdata)
return context.memfilectx(f, data, 'l' in fctx.flags(),
'x' in fctx.flags(), renamed)
@@ -317,15 +336,12 @@def uploadbfiles(ui, rsrc, rdst, files):
'''upload big files to the central store'''
- if not files:
+ # Don't upload locally. All bfiles are in the system wide cache
+ # so the other repo can just get them from there.
+ if not files or rdst.local():
return
- # Don't upload locally. All bfiles are in the system wide cache
- # so the other repo can just get them from there.
- if not rdst.path.startswith('http'):
- return
-
- store = basestore._openstore(rsrc, rdst.path, put=True)
+ store = basestore._openstore(rsrc, rdst, put=True)
at = 0
for hash in files:
@@ -348,7 +364,8 @@ matches the revision ID). With --all, check every changeset in
this repository.'''
if all:
- # Pass a list to the function rather than an iterator because we know a list will work.
+ # Pass a list to the function rather than an iterator because we know a
+ # list will work.
revs = range(len(repo))
else:
revs = ['.']
@@ -360,24 +377,28 @@ wlock = repo.wlock()
try:
bfdirstate = bfutil.openbfdirstate(ui, repo)
- s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
+ s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
+ False, False, False)
+ unsure, modified, added, removed, missing, unknown, ignored, clean = s
bfiles = bfutil.listbfiles(repo)
toget = []
at = 0
updated = 0
for bfile in bfiles:
- if filelist == None or bfile in filelist:
+ if filelist is None or bfile in filelist:
if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
bfdirstate.remove(bfile)
continue
- if os.path.exists(repo.wjoin(bfutil.standin(os.path.join(bfile + '.orig')))):
- shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + '.orig'))
+ if os.path.exists(repo.wjoin(bfutil.standin(os.path.join(bfile\
+ + '.orig')))):
+ shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + \
+ '.orig'))
at += 1
expectedhash = repo[None][bfutil.standin(bfile)].data().strip()
mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode
- if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)):
+ if not os.path.exists(repo.wjoin(bfile)) or expectedhash != \
+ bfutil.hashfile(repo.wjoin(bfile)):
path = bfutil.findfile(repo, expectedhash)
if path is None:
toget.append((bfile, expectedhash))
@@ -388,19 +409,24 @@ updated += 1
if bfutil.standin(bfile) not in repo['.']:
bfdirstate.add(bfutil.unixpath(bfile))
- elif expectedhash == repo['.'][bfutil.standin(bfile)].data().strip():
+ elif expectedhash == repo['.'][bfutil.standin(bfile)] \
+ .data().strip():
bfdirstate.normal(bfutil.unixpath(bfile))
else:
- bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile))
- elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat(repo.wjoin(bfile)).st_mode:
+ bfutil.dirstate_normaldirty(bfdirstate,
+ bfutil.unixpath(bfile))
+ elif os.path.exists(repo.wjoin(bfile)) and mode != \
+ os.stat(repo.wjoin(bfile)).st_mode:
os.chmod(repo.wjoin(bfile), mode)
updated += 1
if bfutil.standin(bfile) not in repo['.']:
bfdirstate.add(bfutil.unixpath(bfile))
- elif expectedhash == repo['.'][bfutil.standin(bfile)].data().strip():
+ elif expectedhash == \
+ repo['.'][bfutil.standin(bfile)].data().strip():
bfdirstate.normal(bfutil.unixpath(bfile))
else:
- bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile))
+ bfutil.dirstate_normaldirty(bfdirstate,
+ bfutil.unixpath(bfile))
if toget:
store = basestore._openstore(repo)
@@ -417,7 +443,8 @@ elif hash == repo['.'][bfutil.standin(filename)].data().strip():
bfdirstate.normal(bfutil.unixpath(filename))
else:
- bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(filename))
+ bfutil.dirstate_normaldirty(bfdirstate,
+ bfutil.unixpath(filename))
removed = 0
for bfile in bfdirstate:
@@ -453,8 +480,9 @@ wlock = repo.wlock()
try:
bfdirstate = bfutil.openbfdirstate(ui, repo)
- s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
+ s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
+ False, False, False)
+ unsure, modified, added, removed, missing, unknown, ignored, clean = s
bfiles = bfutil.listbfiles(repo)
toget = []
@@ -468,14 +496,16 @@
for bfile in bfiles:
at += 1
- if os.path.exists(repo.wjoin(bfile)) and not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
+ if os.path.exists(repo.wjoin(bfile)) and not \
+ os.path.exists(repo.wjoin(bfutil.standin(bfile))):
os.unlink(repo.wjoin(bfile))
removed += 1
bfdirstate.forget(bfutil.unixpath(bfile))
continue
expectedhash = repo[None][bfutil.standin(bfile)].data().strip()
mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode
- if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)):
+ if not os.path.exists(repo.wjoin(bfile)) or expectedhash != \
+ bfutil.hashfile(repo.wjoin(bfile)):
path = bfutil.findfile(repo, expectedhash)
if not path:
toget.append((bfile, expectedhash))
@@ -485,7 +515,8 @@ os.chmod(repo.wjoin(bfile), mode)
updated += 1
bfdirstate.normal(bfutil.unixpath(bfile))
- elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat(repo.wjoin(bfile)).st_mode:
+ elif os.path.exists(repo.wjoin(bfile)) and mode != \
+ os.stat(repo.wjoin(bfile)).st_mode:
os.chmod(repo.wjoin(bfile), mode)
updated += 1
bfdirstate.normal(bfutil.unixpath(bfile))
@@ -520,7 +551,8 @@
bfdirstate.write()
if printed:
- ui.status(_('%d big files updated, %d removed\n') % (updated, removed))
+ ui.status(_('%d big files updated, %d removed\n') % (updated,
+ removed))
finally:
wlock.release()
@@ -529,8 +561,10 @@
cmdtable = {
'kbfconvert': (bfconvert,
- [('s', 'size', 0, 'All files over this size '
- '(in megabytes) will be considered bfiles. This can also be specified in your hgrc as [bfiles].size.'),
- ('','tonormal',False, 'Convert from a bfiles repo to a normal repo')],
+ [('s', 'size', 0, 'All files over this size (in megabytes) '
+ 'will be considered bfiles. This can also be specified in '
+ 'your hgrc as [bfiles].size.'),
+ ('','tonormal',False,
+ 'Convert from a bfiles repo to a normal repo')],
_('hg kbfconvert SOURCE DEST [FILE ...]')),
}
|
|
@@ -1,57 +1,151 @@ - import tempfile, shutil, os
+import os
+import shutil
+import tempfile
+import urllib2
from mercurial.i18n import _
-from mercurial import wireproto, error, util
+from mercurial import error, httprepo, util, wireproto
-import bfutil, bfsetup
+import bfsetup
+import bfutil
+
+_heads_prefix = 'kbfiles\n'
def putbfile(repo, proto, sha):
- fd, tempname = tempfile.mkstemp(prefix='hg-putbfile-')
- with os.fdopen(fd, 'wb+') as fp:
- proto.getfile(fp)
- bfutil.copytocacheabsolute(repo, tempname, sha)
- return ''
+ """putbfile puts a bfile into a repository's local cache and into the
+ system cache."""
+ f = None
+ proto.redirect()
+ try:
+ try:
+ f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putbfile-')
+ proto.getfile(f)
+ f.seek(0)
+ if sha != bfutil.hexsha1(f):
+ return wireproto.pushres(1)
+ bfutil.copytocacheabsolute(repo, f.name, sha)
+ except IOError:
+ repo.ui.warn(
+ _('error: could not put received data into bfile store'))
+ return wireproto.pushres(1)
+ finally:
+ if f:
+ f.close()
+
+ return wireproto.pushres(0)
def getbfile(repo, proto, sha):
+ """getbfile retrieves a bfile from the repository-local cache or system
+ cache."""
filename = bfutil.findfile(repo, sha)
if not filename:
raise util.Abort(_('requested bfile %s not present in cache') % sha)
f = open(filename, 'rb')
- return wireproto.streamres(f)
+ length = os.fstat(f.fileno())[6]
+ # since we can't set an HTTP content-length header here, and mercurial core
+ # provides no way to give the length of a streamres (and reading the entire
+ # file into RAM would be ill-advised), we just send the length on the first
+ # line of the response, like the ssh proto does for string responses.
+ def generator():
+ yield '%d\n' % length
+ for chunk in f:
+ yield chunk
+ return wireproto.streamres(generator())
-# '0' for OK, '1' for invalid checksum, '2' for missing
def statbfile(repo, proto, sha):
+ """statbfile sends '2\n' if the bfile is missing, '1\n' if it has a
+ mismatched checksum, or '0\n' if it is in good condition"""
filename = bfutil.findfile(repo, sha)
if not filename:
return '2\n'
- with open(filename, 'rb') as f:
- return '0\n' if bfutil.hexsha1(f) == sha else '1\n'
+ fd = None
+ try:
+ fd = open(filename, 'rb')
+ return bfutil.hexsha1(fd) == sha and '0\n' or '1\n'
+ finally:
+ if fd:
+ fd.close()
-def wirerepo_putbfile(self, sha, fd):
- return self._callstream("putbfile", data=fd, sha=sha, headers={'content-type':'application/mercurial-0.1'})
+def wirereposetup(ui, repo):
+ class kbfileswirerepository(repo.__class__):
+ def putbfile(self, sha, fd):
+ # unfortunately, httprepository._callpush tries to convert its
+ # input file-like into a bundle before sending it, so we can't use
+ # it ...
+ if issubclass(self.__class__, httprepo.httprepository):
+ try:
+ return int(self._call('putbfile', data=fd, sha=sha,
+ headers={'content-type':'application/mercurial-0.1'}))
+ except (ValueError, urllib2.HTTPError):
+ return 1
+ # ... but we can't use sshrepository._call because the data=
+ # argument won't get sent, and _callpush does exactly what we want
+ # in this case: send the data straight through
+ else:
+ try:
+ ret, output = self._callpush("putbfile", fd, sha=sha,
+ headers={'content-type':'application/mercurial-0.1'})
+ if ret == "":
+ raise error.ResponseError(_('putbfile failed:'),
+ output)
+ return int(ret)
+ except IOError:
+ return 1
+ except ValueError:
+ raise error.ResponseError(
+ _('putbfile failed (unexpected response):'), ret)
-def wirerepo_getbfile(self, sha):
- return self._callstream("getbfile", sha=sha)
+ def getbfile(self, sha):
+ stream = self._callstream("getbfile", sha=sha)
+ length = stream.readline()
+ try:
+ length = int(length)
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), l))
+ return (length, stream)
-def wirerepo_statbfile(self, sha):
- try:
- return int(self._call("statbfile", sha=sha))
- except:
- return 2
- # if the server returns something that's not an integer followed by a
- # newline, it's not kbfiles-capable, so obviously it doesn't have the
- # bfile; any other exception means _something_ went wrong, so tell the
- # caller the bfile is missing
+ def statbfile(self, sha):
+ try:
+ return int(self._call("statbfile", sha=sha))
+ except (ValueError, urllib2.HTTPError):
+ # if the server returns anything but an integer followed by a
+ # newline, newline, it's not speaking our language; if we get
+ # an HTTP error, we can't be sure the bfile is present; either
+ # way, consider it missing
+ return 2
+ @wireproto.batchable
+ def heads(self):
+ f = wireproto.future()
+ yield {}, f
+ d = f.value
+ if d[:len(_heads_prefix)] == _heads_prefix:
+ d = d[len(_heads_prefix):]
+ try:
+ yield wireproto.decodelist(d[:-1])
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+ repo.__class__ = kbfileswirerepository
+
+# wrap dispatch to check for and remove the kbfiles argument so commands with
+# fixed argument lists don't complain
def dispatch(repo, proto, command):
func, spec = wireproto.commands[command]
args = proto.getargs(spec)
+
+ # remove the kbfiles argument and ignore it: it still needs to be sent
+ # to avoid breaking compatibility with older versions of the extension,
+ # but it is unused in current versions
if len(args) > 0 and isinstance(args[-1], dict):
- if bfutil.listbfiles(repo) and command in affectedcommands and not args[-1].pop('kbfiles'):
- return '0\n'
+ args[-1].pop('kbfiles', None)
+
return func(repo, proto, *args)
+# advertise the bfilestore=serve capability
def capabilities(repo, proto):
- return wireproto.capabilities(repo, proto) + ' bfilestore=serve'
+ return capabilities_orig(repo, proto) + ' bfilestore=serve'
-affectedcommands = [ 'changegroup', 'changegroupsubset', 'getbundle', 'unbundle', 'stream_out', 'pushkey' ]
+def heads(repo, proto):
+ if bfutil.iskbfilesrepo(repo):
+ return _heads_prefix + wireproto.heads(repo, proto)
+ return wireproto.heads(repo, proto)
|
|
|
@@ -5,28 +5,33 @@ import copy
import re
-from mercurial import hg, extensions, commands, util, context, cmdutil, \
- match as match_, filemerge, node, archival, httprepo, error, \
- manifest, merge
+from mercurial import hg, extensions, commands, util, context, cmdutil
+from mercurial import match as match_, filemerge, node, archival, httprepo
+from mercurial import error, manifest, merge
from mercurial.i18n import _
from mercurial.node import hex
from hgext import rebase
-import bfutil, bfcommands
try:
from mercurial import scmutil
except ImportError:
pass
+import bfutil
+import bfcommands
+import bfproto
+
# -- Wrappers: modify existing commands --------------------------------
def reposetup(ui, repo):
- # add a kbfiles-specific querystring argument to remote requests, so kiln can reject
- # operations on a kbfiles-enabled remote repo from a non-kbfiles local repo.
+ # add a kbfiles-specific querystring argument to remote requests, so kiln
+ # can reject operations on a kbfiles-enabled remote repo from a non-kbfiles
+ # local repo.
if issubclass(repo.__class__, httprepo.httprepository):
class kbfilesrepo(repo.__class__):
# The function we want to override is do_cmd for Mercurial <= 1.6
- # and _callstream for Mercurial > 1.6. Wrap whichever one we can find.
+ # and _callstream for Mercurial > 1.6. Wrap whichever one we can
+ # find.
if hasattr(repo.__class__, 'do_cmd'):
def do_cmd(self, cmd, **args):
args['kbfiles'] = 'true'
@@ -37,16 +42,17 @@ return super(kbfilesrepo, self)._callstream(cmd, **args)
repo.__class__ = kbfilesrepo
- # bfiles doesn't support non-local repositories -- get out quick in
- # such a case
+ # wire repositories should be given new wireproto functions but not the
+ # other bfiles modifications
if not repo.local():
- return
+ return bfproto.wirereposetup(ui, repo)
for name in ('status', 'commitctx', 'commit', 'push'):
method = getattr(repo, name)
#if not (isinstance(method, types.MethodType) and
# method.im_func is repo.__class__.commitctx.im_func):
- if isinstance(method, types.FunctionType) and method.func_name == 'wrap':
+ if isinstance(method, types.FunctionType) and method.func_name == \
+ 'wrap':
ui.warn(_('kbfiles: repo method %r appears to have already been '
'wrapped by another extension: '
'kbfiles may behave incorrectly\n')
@@ -65,24 +71,29 @@ if self.bfstatus:
class bfiles_manifestdict(manifest.manifestdict):
def __contains__(self, filename):
- if super(bfiles_manifestdict, self).__contains__(filename):
+ if super(bfiles_manifestdict,
+ self).__contains__(filename):
return True
- return super(bfiles_manifestdict, self).__contains__('.kbf/' + filename)
+ return super(bfiles_manifestdict,
+ self).__contains__('.kbf/' + filename)
class bfiles_ctx(ctx.__class__):
def files(self):
filenames = super(bfiles_ctx, self).files()
- return [re.sub(r'^\.kbf/', '', filename) for filename in filenames]
+ return [re.sub(r'^\.kbf/', '', filename) for filename
+ in filenames]
def manifest(self):
man1 = super(bfiles_ctx, self).manifest()
man1.__class__ = bfiles_manifestdict
return man1
def filectx(self, path, fileid=None, filelog=None):
try:
- result = super(bfiles_ctx, self).filectx(path, fileid, filelog)
+ result = super(bfiles_ctx, self).filectx(path,
+ fileid, filelog)
except error.LookupError:
- # Adding a null character will cause Mercurial to identify this
- # as a binary file.
- result = super(bfiles_ctx, self).filectx('.kbf/' + path, fileid, filelog)
+ # Adding a null character will cause Mercurial to
+ # identify this as a binary file.
+ result = super(bfiles_ctx, self).filectx('.kbf/' +\
+ path, fileid, filelog)
olddata = result.data
result.data = lambda: olddata() + '\0'
return result
@@ -93,13 +104,16 @@ # appropriate list in the result. Also removes standin files from
# the listing. This function reverts to the original status if
# self.bfstatus is False
- def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False, subrepos=None):
+ def status(self, node1='.', node2=None, match=None, ignored=False,
+ clean=False, unknown=False, subrepos=None):
listignored, listclean, listunknown = ignored, clean, unknown
if not self.bfstatus:
try:
- return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown, subrepos)
+ return super(bfiles_repo, self).status(node1, node2, match,
+ listignored, listclean, listunknown, subrepos)
except TypeError:
- return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown)
+ return super(bfiles_repo, self).status(node1, node2, match,
+ listignored, listclean, listunknown)
else:
# some calls in this function rely on the old version of status
self.bfstatus = False
@@ -123,8 +137,9 @@ except:
return False
- # create a copy of match that matches standins instead of bfiles
- # if matcher not set then it is the always matcher so overwrite that
+ # create a copy of match that matches standins instead of
+ # bfiles if matcher not set then it is the always matcher so
+ # overwrite that
if match is None:
match = match_.always(self.root, self.getcwd())
@@ -136,25 +151,34 @@ m = copy.copy(match)
m._files = [tostandin(f) for f in m._files]
- # get ignored clean and unknown but remove them later if they were not asked for
+ # get ignored clean and unknown but remove them later if they
+ # were not asked for
try:
- result = super(bfiles_repo, self).status(node1, node2, m, True, True, True, subrepos)
+ result = super(bfiles_repo, self).status(node1, node2, m,
+ True, True, True, subrepos)
except TypeError:
- result = super(bfiles_repo, self).status(node1, node2, m, True, True, True)
+ result = super(bfiles_repo, self).status(node1, node2, m,
+ True, True, True)
if working:
- # Hold the wlock while we read bfiles and update the bfdirstate
+ # Hold the wlock while we read bfiles and update the
+ # bfdirstate
wlock = repo.wlock()
try:
- # Any non bfiles that were explicitly listed must be taken out or
- # bfdirstate.status will report an error. The status of these files
- # was already computed using super's status.
+ # Any non bfiles that were explicitly listed must be
+ # taken out or bfdirstate.status will report an error.
+ # The status of these files was already computed using
+ # super's status.
bfdirstate = bfutil.openbfdirstate(ui, self)
- match._files = [f for f in match._files if f in bfdirstate]
- s = bfdirstate.status(match, [], listignored, listclean, listunknown)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
+ match._files = [f for f in match._files if f in
+ bfdirstate]
+ s = bfdirstate.status(match, [], listignored,
+ listclean, listunknown)
+ (unsure, modified, added, removed, missing, unknown,
+ ignored, clean) = s
if parentworking:
for bfile in unsure:
- if ctx1[bfutil.standin(bfile)].data().strip() != bfutil.hashfile(self.wjoin(bfile)):
+ if ctx1[bfutil.standin(bfile)].data().strip() \
+ != bfutil.hashfile(self.wjoin(bfile)):
modified.append(bfile)
else:
clean.append(bfile)
@@ -167,7 +191,8 @@ for bfile in tocheck:
standin = bfutil.standin(bfile)
if inctx(standin, ctx1):
- if ctx1[standin].data().strip() != bfutil.hashfile(self.wjoin(bfile)):
+ if ctx1[standin].data().strip() != \
+ bfutil.hashfile(self.wjoin(bfile)):
modified.append(bfile)
else:
clean.append(bfile)
@@ -188,12 +213,16 @@ bfiles = (modified, added, removed, missing, [], [], clean)
result = list(result)
# Unknown files
- result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.isstandin(f)]
- # Ignored files must be ignored by both the dirstate and bfdirstate
+ result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
+ and not bfutil.isstandin(f)]
+ # Ignored files must be ignored by both the dirstate and
+ # bfdirstate
result[5] = set(ignored).intersection(set(result[5]))
# combine normal files and bfiles
- normals = [[fn for fn in filelist if not bfutil.isstandin(fn)] for filelist in result]
- result = [sorted(list1 + list2) for (list1, list2) in zip(normals, bfiles)]
+ normals = [[fn for fn in filelist if not \
+ bfutil.isstandin(fn)] for filelist in result]
+ result = [sorted(list1 + list2) for (list1, list2) in \
+ zip(normals, bfiles)]
else:
def toname(f):
if bfutil.isstandin(f):
@@ -223,35 +252,41 @@ return node
# This call happens before a commit has occurred. The bfile standins
- # have not had their contents updated (to reflect the hash of their bfile).
- # Do that here.
- def commit(self, text="", user=None, date=None, match=None, force=False,
- editor=False, extra={}):
+ # have not had their contents updated (to reflect the hash of their
+ # bfile). Do that here.
+ def commit(self, text="", user=None, date=None, match=None,
+ force=False, editor=False, extra={}):
orig = super(bfiles_repo, self).commit
wlock = repo.wlock()
try:
if getattr(repo, "_isrebasing", False):
- # We have to take the time to pull down the new bfiles now. Otherwise
- # if we are rebasing, any bfiles that were modified in the changesets we
- # are rebasing on top of get overwritten either by the rebase or in the
- # first commit after the rebase.
+ # We have to take the time to pull down the new bfiles now.
+ # Otherwise if we are rebasing, any bfiles that were
+ # modified in the changesets we are rebasing on top of get
+ # overwritten either by the rebase or in the first commit
+ # after the rebase.
bfcommands.updatebfiles(repo.ui, repo)
# Case 1: user calls commit with no specific files or
# include/exclude patterns: refresh and commit everything.
- if (match is None) or (not match.anypats() and not match.files()):
+ if (match is None) or (not match.anypats() and not \
+ match.files()):
bfiles = bfutil.listbfiles(self)
bfdirstate = bfutil.openbfdirstate(ui, self)
- # this only loops through bfiles that exist (not removed/renamed)
+ # this only loops through bfiles that exist (not
+ # removed/renamed)
for bfile in bfiles:
if os.path.exists(self.wjoin(bfutil.standin(bfile))):
- # this handles the case where a rebase is being performed and the
- # working copy is not updated yet.
+ # this handles the case where a rebase is being
+ # performed and the working copy is not updated
+ # yet.
if os.path.exists(self.wjoin(bfile)):
- bfutil.updatestandin(self, bfutil.standin(bfile))
+ bfutil.updatestandin(self,
+ bfutil.standin(bfile))
bfdirstate.normal(bfutil.unixpath(bfile))
for bfile in bfdirstate:
- if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
+ if not os.path.exists(
+ repo.wjoin(bfutil.standin(bfile))):
path = bfutil.unixpath(bfile)
try:
# Mercurial >= 1.9
@@ -266,10 +301,11 @@
for file in match.files():
if bfutil.isstandin(file):
- raise util.Abort("Don't commit bfile standin. Commit bfile.")
+ raise util.Abort(
+ "Don't commit bfile standin. Commit bfile.")
- # Case 2: user calls commit with specified patterns: refresh any
- # matching big files.
+ # Case 2: user calls commit with specified patterns: refresh
+ # any matching big files.
smatcher = bfutil.composestandinmatcher(self, match)
standins = bfutil.dirstate_walk(self.dirstate, smatcher)
@@ -279,15 +315,15 @@ return orig(text=text, user=user, date=date, match=match,
force=force, editor=editor, extra=extra)
- # Refresh all matching big files. It's possible that the commit
- # will end up failing, in which case the big files will stay
- # refreshed. No harm done: the user modified them and asked to
- # commit them, so sooner or later we're going to refresh the
- # standins. Might as well leave them refreshed.
+ # Refresh all matching big files. It's possible that the
+ # commit will end up failing, in which case the big files will
+ # stay refreshed. No harm done: the user modified them and
+ # asked to commit them, so sooner or later we're going to
+ # refresh the standins. Might as well leave them refreshed.
bfdirstate = bfutil.openbfdirstate(ui, self)
for standin in standins:
bfile = bfutil.splitstandin(standin)
- if bfdirstate[bfile] is not 'r':
+ if bfdirstate[bfile] <> 'r':
bfutil.updatestandin(self, standin)
bfdirstate.normal(bfutil.unixpath(bfile))
else:
@@ -301,15 +337,16 @@ bfdirstate.write()
# Cook up a new matcher that only matches regular files or
- # standins corresponding to the big files requested by the user.
- # Have to modify _files to prevent commit() from complaining
- # "not tracked" for big files.
+ # standins corresponding to the big files requested by the
+ # user. Have to modify _files to prevent commit() from
+ # complaining "not tracked" for big files.
bfiles = bfutil.listbfiles(repo)
match = copy.copy(match)
orig_matchfn = match.matchfn
- # Check both the list of bfiles and the list of standins because if a bfile was removed, it
- # won't be in the list of bfiles at this point
+ # Check both the list of bfiles and the list of standins
+ # because if a bfile was removed, it won't be in the list of
+ # bfiles at this point
match._files += sorted(standins)
actualfiles = []
@@ -325,7 +362,8 @@ fstandin += os.sep
# Prevalidate matching standin directories
- if any(st for st in match._files if st.startswith(fstandin)):
+ if bfutil.any_(st for st in match._files if \
+ st.startswith(fstandin)):
continue
actualfiles.append(f)
match._files = actualfiles
@@ -348,7 +386,8 @@ toupload = set()
o = repo.changelog.nodesbetween(o, revs)[0]
for n in o:
- parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
+ parents = [p for p in repo.changelog.parents(n) if p != \
+ node.nullid]
ctx = repo[n]
files = set(ctx.files())
if len(parents) == 2:
@@ -362,19 +401,38 @@ if f not in mc:
files.add(f)
for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+ if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
+ None):
files.add(f)
- toupload = toupload.union(set([ctx[f].data().strip() for f in files if bfutil.isstandin(f) and f in ctx]))
+ toupload = toupload.union(set([ctx[f].data().strip() for f\
+ in files if bfutil.isstandin(f) and f in ctx]))
bfcommands.uploadbfiles(ui, self, remote, toupload)
# Mercurial >= 1.6 takes the newbranch argument, try that first.
try:
- return super(bfiles_repo, self).push(remote, force, revs, newbranch)
+ return super(bfiles_repo, self).push(remote, force, revs,
+ newbranch)
except TypeError:
return super(bfiles_repo, self).push(remote, force, revs)
repo.__class__ = bfiles_repo
+ def checkrequireskbfiles(ui, repo, **kwargs):
+ if 'kbfiles' not in repo.requirements and bfutil.any_('.kbf/' in f[0] \
+ for f in repo.store.datafiles()):
+
+ # work around bug in mercurial 1.9 whereby requirements is a list
+ # on newly-cloned repos
+ repo.requirements = set(repo.requirements)
+
+ repo.requirements |= set(['kbfiles'])
+ repo._writerequirements()
+
+ checkrequireskbfiles(ui, repo)
+
+ ui.setconfig('hooks', 'incoming.kbfiles', checkrequireskbfiles)
+ ui.setconfig('hooks', 'commit.kbfiles', checkrequireskbfiles)
+
# Add works by going through the files that the user wanted to add
# and checking if they should be added as bfiles. Then making a new
# matcher which matches only the normal files and running the original
@@ -395,7 +453,8 @@ try:
bfsize = int(bfsize)
except ValueError:
- raise util.Abort(_('bfiles.size must be integer, was %s\n') % bfsize)
+ raise util.Abort(_('bfiles.size must be integer, was %s\n')
+ % bfsize)
bfmatcher = None
if os.path.exists(repo.wjoin(bfutil.shortname)):
@@ -411,7 +470,7 @@ except ImportError:
# Mercurial <= 1.8
m = cmdutil.match(repo, pats, opts)
- m.bad = lambda x,y: None
+ m.bad = lambda x, y: None
wctx = repo[None]
for f in repo.walk(m):
exact = m.exact(f)
@@ -421,13 +480,13 @@ if exact and bfile:
ui.warn(_('%s already a bfile\n') % f)
continue
- # Don't warn the user when they attempt to add a normal tracked file. The normal add code
- # will do that for us.
+ # Don't warn the user when they attempt to add a normal tracked file.
+ # The normal add code will do that for us.
if exact and nfile:
continue
if exact or (not bfile and not nfile):
- if bf or (bfsize and os.path.getsize(repo.wjoin(f)) >= bfsize*1024*1024) \
- or (bfmatcher and bfmatcher(f)):
+ if bf or (bfsize and os.path.getsize(repo.wjoin(f)) >= \
+ bfsize * 1024 * 1024) or (bfmatcher and bfmatcher(f)):
bfnames.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s as bfile\n') % m.rel(f))
@@ -435,22 +494,24 @@ bad = []
standins = []
- # Need to lock otherwise there could be a race condition inbetween when standins are created
- # and added to the repo
+ # Need to lock otherwise there could be a race condition inbetween when
+ # standins are created and added to the repo
wlock = repo.wlock()
try:
if not opts.get('dry_run'):
bfdirstate = bfutil.openbfdirstate(ui, repo)
for f in bfnames:
standinname = bfutil.standin(f)
- bfutil.writestandin(repo, standinname, hash='', executable=bfutil.getexecutable(repo.wjoin(f)))
+ bfutil.writestandin(repo, standinname, hash='',
+ executable=bfutil.getexecutable(repo.wjoin(f)))
standins.append(standinname)
if bfdirstate[bfutil.unixpath(f)] == 'r':
bfdirstate.normallookup(bfutil.unixpath(f))
else:
bfdirstate.add(bfutil.unixpath(f))
bfdirstate.write()
- bad += [bfutil.splitstandin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()]
+ bad += [bfutil.splitstandin(f) for f in bfutil.repo_add(repo,
+ standins) if f in m.files()]
finally:
wlock.release()
@@ -461,10 +522,12 @@ # Mercurial <= 1.8
oldmatch = cmdutil.match
manifest = repo[None].manifest()
- def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
+ def override_match(repo, pats=[], opts={}, globbed=False,
+ default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in manifest
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not\
+ in manifest
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -481,7 +544,7 @@ result = orig(ui, repo, *pats, **opts)
cmdutil.match = oldmatch
- return (result is 1 or bad) and 1 or 0
+ return (result == 1 or bad) and 1 or 0
def override_remove(orig, ui, repo, *pats, **opts):
wctx = repo[None].manifest()
@@ -491,10 +554,12 @@ except ImportError:
# Mercurial <= 1.8
oldmatch = cmdutil.match
- def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
+ def override_match(repo, pats=[], opts={}, globbed=False,
+ default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in wctx
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not\
+ in wctx
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -525,7 +590,8 @@ s = repo.status(match=m, clean=True)
finally:
repo.bfstatus = False
- modified, added, deleted, clean = [[f for f in list if bfutil.standin(f) in wctx] for list in [s[0], s[1], s[3], s[6]]]
+ modified, added, deleted, clean = [[f for f in list if bfutil.standin(f) \
+ in wctx] for list in [s[0], s[1], s[3], s[6]]]
def warn(files, reason):
for f in files:
@@ -546,8 +612,8 @@ if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
- # Need to lock because standin files are deleted then removed from the repository
- # and we could race inbetween.
+ # Need to lock because standin files are deleted then removed from the
+ # repository and we could race inbetween.
wlock = repo.wlock()
try:
bfdirstate = bfutil.openbfdirstate(ui, repo)
@@ -598,17 +664,20 @@# will merge standins correctly.
def override_update(orig, ui, repo, *pats, **opts):
bfdirstate = bfutil.openbfdirstate(ui, repo)
- s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)
+ s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
+ False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
- # Need to lock between the standins getting updated and their bfiles getting updated
+ # Need to lock between the standins getting updated and their bfiles
+ # getting updated
wlock = repo.wlock()
try:
if opts['check']:
mod = len(modified) > 0
for bfile in unsure:
standin = bfutil.standin(bfile)
- if repo['.'][standin].data().strip() != bfutil.hashfile(repo.wjoin(bfile)):
+ if repo['.'][standin].data().strip() != \
+ bfutil.hashfile(repo.wjoin(bfile)):
mod = True
else:
bfdirstate.normal(bfutil.unixpath(bfile))
@@ -624,10 +693,11 @@ return orig(ui, repo, *pats, **opts)
# Override filemerge to prompt the user about how they wish to merge bfiles.
-# This will handle identical edits, and copy/rename + edit without prompting the user.
+# This will handle identical edits, and copy/rename + edit without prompting
+# the user.
def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
- # Use better variable names here. Because this is a wrapper we cannot change
- # the variable names in the function declaration.
+ # Use better variable names here. Because this is a wrapper we cannot
+ # change the variable names in the function declaration.
fcdest, fcother, fcancestor = fcd, fco, fca
if not bfutil.isstandin(orig):
return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
@@ -635,23 +705,30 @@ if not fcother.cmp(fcdest): # files identical?
return None
- if fcancestor == fcother: # backwards, use working dir parent as ancestor
+ # backwards, use working dir parent as ancestor
+ if fcancestor == fcother:
fcancestor = fcdest.parents()[0]
if orig != fcother.path():
repo.ui.status(_('merging %s and %s to %s\n')
- % (bfutil.splitstandin(orig), bfutil.splitstandin(fcother.path()), bfutil.splitstandin(fcdest.path())))
+ % (bfutil.splitstandin(orig),
+ bfutil.splitstandin(fcother.path()),
+ bfutil.splitstandin(fcdest.path())))
else:
- repo.ui.status(_('merging %s\n') % bfutil.splitstandin(fcdest.path()))
+ repo.ui.status(_('merging %s\n')
+ % bfutil.splitstandin(fcdest.path()))
- if fcancestor.path() != fcother.path() and fcother.data() == fcancestor.data():
+ if fcancestor.path() != fcother.path() and fcother.data() == \
+ fcancestor.data():
return 0
- if fcancestor.path() != fcdest.path() and fcdest.data() == fcancestor.data():
+ if fcancestor.path() != fcdest.path() and fcdest.data() == \
+ fcancestor.data():
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
if repo.ui.promptchoice(_('bfile %s has a merge conflict\n'
- 'keep (l)ocal or take (o)ther?') % bfutil.splitstandin(orig),
+ 'keep (l)ocal or take (o)ther?') %
+ bfutil.splitstandin(orig),
(_('&Local'), _('&Other')), 0) == 0:
return 0
else:
@@ -675,7 +752,8 @@ except ImportError:
# Mercurial <= 1.8
path = util.canonpath(repo.root, repo.getcwd(), relpath)
- return os.path.join(os.path.relpath('.', repo.getcwd()), bfutil.standin(path))
+ return os.path.join(os.path.relpath('.', repo.getcwd()),
+ bfutil.standin(path))
try:
# Mercurial >= 1.9
@@ -688,9 +766,9 @@ if os.path.isdir(dest):
if not os.path.isdir(makestandin(dest)):
os.makedirs(makestandin(dest))
- # This could copy both bfiles and normal files in one command, but we don't want
- # to do that first replace their matcher to only match normal files and run it
- # then replace it to just match bfiles and run it again
+ # This could copy both bfiles and normal files in one command, but we don't
+ # want to do that first replace their matcher to only match normal files
+ # and run it then replace it to just match bfiles and run it again
nonormalfiles = False
nobfiles = False
try:
@@ -701,10 +779,12 @@ oldmatch = cmdutil.match
try:
manifest = repo[None].manifest()
- def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
+ def override_match(repo, pats=[], opts={}, globbed=False,
+ default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in manifest
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f)\
+ not in manifest
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -717,7 +797,7 @@ # Mercurial <= 1.8
cmdutil.match = override_match
result = orig(ui, repo, pats, opts, rename)
- except util.Abort as e:
+ except util.Abort, e:
if str(e) != 'no files to copy':
raise e
else:
@@ -731,24 +811,26 @@ # Mercurial <= 1.8
cmdutil.match = oldmatch
- # The first rename can cause our current working directory to be removed. In that case
- # there is nothing left to copy/rename so just quit.
+ # The first rename can cause our current working directory to be removed.
+ # In that case there is nothing left to copy/rename so just quit.
try:
repo.getcwd()
except OSError:
return result
try:
- # When we call orig below it creates the standins but we don't add them to the dir state
- # until later so lock during that time.
+ # When we call orig below it creates the standins but we don't add them
+ # to the dir state until later so lock during that time.
wlock = repo.wlock()
manifest = repo[None].manifest()
- def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
+ def override_match(repo, pats=[], opts={}, globbed=False,
+ default='relpath'):
newpats = []
- # The patterns were previously mangled to add .hgbfiles, we need to remove that now
+ # The patterns were previously mangled to add .hgbfiles, we need to
+ # remove that now
for pat in pats:
- if match_.patkind(pat) == None and bfutil.shortname in pat:
+ if match_.patkind(pat) is None and bfutil.shortname in pat:
newpats.append(pat.replace(bfutil.shortname, ''))
else:
newpats.append(pat)
@@ -758,7 +840,9 @@ m._files = [bfutil.standin(f) for f in m._files if bfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
- m.matchfn = lambda f: bfutil.isstandin(f) and bfile(bfutil.splitstandin(f)) and orig_matchfn(bfutil.splitstandin(f)) or None
+ m.matchfn = lambda f: bfutil.isstandin(f) and \
+ bfile(bfutil.splitstandin(f)) and \
+ orig_matchfn(bfutil.splitstandin(f)) or None
return m
try:
# Mercurial >= 1.9
@@ -768,7 +852,7 @@ cmdutil.match = override_match
listpats = []
for pat in pats:
- if match_.patkind(pat) != None:
+ if match_.patkind(pat) is not None:
listpats.append(pat)
else:
listpats.append(makestandin(pat))
@@ -780,7 +864,8 @@ if bfutil.shortname in src and bfutil.shortname in dest:
destbfile = dest.replace(bfutil.shortname, '')
if not opts['force'] and os.path.exists(destbfile):
- raise IOError('', _('destination bfile already exists'))
+ raise IOError('',
+ _('destination bfile already exists'))
copiedfiles.append((src, dest))
origcopyfile(src, dest)
@@ -799,12 +884,14 @@ os.makedirs(destbfiledir)
if rename:
os.rename(srcbfile, destbfile)
- bfdirstate.remove(bfutil.unixpath(os.path.relpath(srcbfile, repo.root)))
+ bfdirstate.remove(bfutil.unixpath(os.path.relpath(srcbfile,
+ repo.root)))
else:
util.copyfile(srcbfile, destbfile)
- bfdirstate.add(bfutil.unixpath(os.path.relpath(destbfile, repo.root)))
+ bfdirstate.add(bfutil.unixpath(os.path.relpath(destbfile,
+ repo.root)))
bfdirstate.write()
- except util.Abort as e:
+ except util.Abort, e:
if str(e) != 'no files to copy':
raise e
else:
@@ -823,22 +910,23 @@
return result
-# When the user calls revert, we have to be careful to not revert any changes to other
-# bfiles accidentally. This means we have to keep track of the bfiles that are
-# being reverted so we only pull down the necessary bfiles.
+# When the user calls revert, we have to be careful to not revert any changes
+# to other bfiles accidentally. This means we have to keep track of the bfiles
+# that are being reverted so we only pull down the necessary bfiles.
#
# Standins are only updated (to match the hash of bfiles) before commits.
-# Update the standins then run the original revert (changing the matcher to hit standins
-# instead of bfiles). Based on the resulting standins update the bfiles. Then return the
-# standins to their proper state
+# Update the standins then run the original revert (changing the matcher to hit
+# standins instead of bfiles). Based on the resulting standins update the
+# bfiles. Then return the standins to their proper state
def override_revert(orig, ui, repo, *pats, **opts):
- # Because we put the standins in a bad state (by updating them) and then return them
- # to a correct state we need to lock to prevent others from changing them in their
- # incorrect state.
+ # Because we put the standins in a bad state (by updating them) and then
+ # return them to a correct state we need to lock to prevent others from
+ # changing them in their incorrect state.
wlock = repo.wlock()
try:
bfdirstate = bfutil.openbfdirstate(ui, repo)
- (modified, added, removed, missing, unknown, ignored, clean) = bfutil.bfdirstate_status(bfdirstate, repo, repo['.'].rev())
+ (modified, added, removed, missing, unknown, ignored, clean) = \
+ bfutil.bfdirstate_status(bfdirstate, repo, repo['.'].rev())
for bfile in modified:
bfutil.updatestandin(repo, bfutil.standin(bfile))
@@ -850,7 +938,8 @@ oldmatch = cmdutil.match
try:
ctx = repo[opts.get('rev')]
- def override_match(ctxorrepo, pats=[], opts={}, globbed=False, default='relpath'):
+ def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
+ default='relpath'):
if hasattr(ctxorrepo, 'match'):
ctx0 = ctxorrepo
else:
@@ -869,14 +958,17 @@ orig_matchfn = m.matchfn
def matchfn(f):
if bfutil.isstandin(f):
- # We need to keep track of what bfiles are being matched so we know which
- # ones to update later (otherwise we revert changes to other bfiles
- # accidentally). This is repo specific, so duckpunch the repo object to
- # keep the list of bfiles for us later.
- if(orig_matchfn(bfutil.splitstandin(f)) and (f in repo[None] or f in ctx)):
+ # We need to keep track of what bfiles are being
+ # matched so we know which ones to update later
+ # (otherwise we revert changes to other bfiles
+ # accidentally). This is repo specific, so duckpunch
+ # the repo object to keep the list of bfiles for us
+ # later.
+ if orig_matchfn(bfutil.splitstandin(f)) and \
+ (f in repo[None] or f in ctx):
bfileslist = getattr(repo, '_bfilestoupdate', [])
bfileslist.append(bfutil.splitstandin(f))
- repo._bfilestoupdate = bfileslist;
+ repo._bfilestoupdate = bfileslist
return True
else:
return False
@@ -905,8 +997,11 @@ repo._bfilestoupdate = []
for bfile in modified:
if bfile in bfileslist:
- if os.path.exists(repo.wjoin(bfutil.standin(bfile))) and bfile in repo['.']:
- bfutil.writestandin(repo, bfutil.standin(bfile), repo['.'][bfile].data().strip(), 'x' in repo['.'][bfile].flags())
+ if os.path.exists(repo.wjoin(bfutil.standin(bfile))) and bfile\
+ in repo['.']:
+ bfutil.writestandin(repo, bfutil.standin(bfile),
+ repo['.'][bfile].data().strip(),
+ 'x' in repo['.'][bfile].flags())
bfdirstate = bfutil.openbfdirstate(ui, repo)
for bfile in added:
standin = bfutil.standin(bfile)
@@ -1004,7 +1099,8 @@ # if that doesn't work we are probably in Mercurial >= 1.6 where the
# prefix is not handled by the archiver
try:
- archiver = archival.archivers[kind](dest, prefix, mtime or ctx.date()[0])
+ archiver = archival.archivers[kind](dest, prefix, mtime or \
+ ctx.date()[0])
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name):
@@ -1016,7 +1112,8 @@ except TypeError:
if kind == 'files':
if prefix:
- raise util.Abort(_('cannot give prefix when archiving to files'))
+ raise util.Abort(
+ _('cannot give prefix when archiving to files'))
else:
prefix = archival.tidyprefix(dest, kind, prefix)
@@ -1059,8 +1156,11 @@ f = bfutil.splitstandin(f)
def getdatafn():
- with open(path, 'rb') as fd:
+ try:
+ fd = open(path, 'rb')
return fd.read()
+ finally:
+ fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
@@ -1075,9 +1175,10 @@
archiver.done()
-# If a bfile is modified the change is not reflected in its standin until a commit.
-# cmdutil.bailifchanged raises an exception if the repo has uncommitted changes.
-# Wrap it to also check if bfiles were changed. This is used by bisect and backout.
+# If a bfile is modified the change is not reflected in its standin until a
+# commit. cmdutil.bailifchanged raises an exception if the repo has
+# uncommitted changes. Wrap it to also check if bfiles were changed. This is
+# used by bisect and backout.
def override_bailifchanged(orig, repo):
orig(repo)
repo.bfstatus = True
@@ -1103,10 +1204,12 @@ except ImportError:
# Mercurial <= 1.8
oldmatch = cmdutil.match
- def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
+ def override_match(repo, pats=[], opts={}, globbed=False,
+ default='relpath'):
match = oldmatch(repo, pats, opts, globbed, default)
m = copy.copy(match)
- notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in wctx
+ notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not\
+ in wctx
m._files = [f for f in m._files if notbfile(f)]
m._fmap = set(m._files)
orig_matchfn = m.matchfn
@@ -1134,7 +1237,8 @@ forget = [f for f in forget if bfutil.standin(f) in wctx]
for f in forget:
- if bfutil.standin(f) not in repo.dirstate and not os.path.isdir(m.rel(bfutil.standin(f))):
+ if bfutil.standin(f) not in repo.dirstate and not \
+ os.path.isdir(m.rel(bfutil.standin(f))):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
@@ -1142,15 +1246,16 @@ if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
- # Need to lock because standin files are deleted then removed from the repository
- # and we could race inbetween.
+ # Need to lock because standin files are deleted then removed from the
+ # repository and we could race inbetween.
wlock = repo.wlock()
try:
bfdirstate = bfutil.openbfdirstate(ui, repo)
for f in forget:
bfdirstate.remove(bfutil.unixpath(f))
bfdirstate.write()
- bfutil.repo_remove(repo, [bfutil.standin(f) for f in forget], unlink=True)
+ bfutil.repo_remove(repo, [bfutil.standin(f) for f in forget],
+ unlink=True)
finally:
wlock.release()
@@ -1196,7 +1301,8 @@ for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
- toupload = toupload.union(set([f for f in files if bfutil.isstandin(f) and f in ctx]))
+ toupload = toupload.union(set([f for f in files if bfutil.isstandin(f)\
+ and f in ctx]))
return toupload
def override_outgoing(orig, ui, repo, dest=None, **opts):
@@ -1223,10 +1329,10 @@ ui.status(_('kbfiles: %d to upload\n') % len(toupload))
def override_addremove(orig, ui, repo, *pats, **opts):
- # Check if the parent or child has bfiles if they do don't allow it.
- # If there is a symlink in the manifest then getting the manifest throws an exception
- # catch it and let addremove deal with it. This happens in Mercurial's test
- # test-addremove-symlink
+ # Check if the parent or child has bfiles if they do don't allow it. If
+ # there is a symlink in the manifest then getting the manifest throws an
+ # exception catch it and let addremove deal with it. This happens in
+ # Mercurial's test test-addremove-symlink
try:
manifesttip = set(repo['tip'].manifest())
except util.Abort:
@@ -1239,7 +1345,8 @@ # Manifests are only iterable so turn them into sets then union
for file in manifesttip.union(manifestworking):
if file.startswith(bfutil.shortname):
- raise util.Abort(_('addremove cannot be run on a repo with bfiles'))
+ raise util.Abort(
+ _('addremove cannot be run on a repo with bfiles'))
return orig(ui, repo, *pats, **opts)
@@ -1262,7 +1369,8 @@
def override_rollback(orig, ui, repo, **opts):
result = orig(ui, repo, **opts)
- merge.update(repo, node=None, branchmerge=False, force=True, partial=bfutil.isstandin)
+ merge.update(repo, node=None, branchmerge=False, force=True,
+ partial=bfutil.isstandin)
bfdirstate = bfutil.openbfdirstate(ui, repo)
bfiles = bfutil.listbfiles(repo)
oldbfiles = bfutil.listbfiles(repo, repo[None].parents()[0].rev())
@@ -1280,23 +1388,29 @@
entry = extensions.wrapcommand(commands.table, 'add', override_add)
addopt = [('', 'bf', None, _('add as bfile')),
- ('', 'bfsize', '', _('add all files above this size (in megabytes) as bfiles (default: 10)'))]
+ ('', 'bfsize', '', _('add all files above this size (in megabytes)'
+ 'as bfiles (default: 10)'))]
entry[1].extend(addopt)
- entry = extensions.wrapcommand(commands.table, 'addremove', override_addremove)
+ entry = extensions.wrapcommand(commands.table, 'addremove',
+ override_addremove)
entry = extensions.wrapcommand(commands.table, 'remove', override_remove)
entry = extensions.wrapcommand(commands.table, 'forget', override_forget)
entry = extensions.wrapcommand(commands.table, 'status', override_status)
entry = extensions.wrapcommand(commands.table, 'log', override_log)
- entry = extensions.wrapcommand(commands.table, 'rollback', override_rollback)
+ entry = extensions.wrapcommand(commands.table, 'rollback',
+ override_rollback)
entry = extensions.wrapcommand(commands.table, 'verify', override_verify)
verifyopt = [('', 'bf', None, _('verify bfiles')),
- ('', 'bfa', None, _('verify all revisions of bfiles not just current')),
- ('', 'bfc', None, _('verify bfile contents not just existence'))]
+ ('', 'bfa', None,
+ _('verify all revisions of bfiles not just current')),
+ ('', 'bfc', None,
+ _('verify bfile contents not just existence'))]
entry[1].extend(verifyopt)
- entry = extensions.wrapcommand(commands.table, 'outgoing', override_outgoing)
+ entry = extensions.wrapcommand(commands.table, 'outgoing',
+ override_outgoing)
outgoingopt = [('', 'bf', None, _('display outgoing bfiles'))]
entry[1].extend(outgoingopt)
entry = extensions.wrapcommand(commands.table, 'summary', override_summary)
@@ -1308,7 +1422,8 @@ entry = extensions.wrapfunction(filemerge, 'filemerge', override_filemerge)
entry = extensions.wrapfunction(cmdutil, 'copy', override_copy)
- # Backout calls revert so we need to override both the command and the function
+ # Backout calls revert so we need to override both the command and the
+ # function
entry = extensions.wrapcommand(commands.table, 'revert', override_revert)
entry = extensions.wrapfunction(commands, 'revert', override_revert)
@@ -1321,14 +1436,19 @@
extensions.wrapfunction(archival, 'archive', override_archive)
if hasattr(cmdutil, 'bailifchanged'):
- extensions.wrapfunction(cmdutil, 'bailifchanged', override_bailifchanged)
+ extensions.wrapfunction(cmdutil, 'bailifchanged',
+ override_bailifchanged)
else:
- extensions.wrapfunction(cmdutil, 'bail_if_changed', override_bailifchanged)
+ extensions.wrapfunction(cmdutil, 'bail_if_changed',
+ override_bailifchanged)
for name, module in extensions.extensions():
if name == 'fetch':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch', override_fetch)
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
+ override_fetch)
if name == 'purge':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge', override_purge)
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
+ override_purge)
if name == 'rebase':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase', override_rebase)
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
+ override_rebase)
|
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
|
@@ -0,0 +1,45 @@ + '''HTTP-based store for Kiln.'''
+
+import urllib2
+
+from mercurial import util, url as url_
+
+import bfutil
+import remotestore
+
+class kilnstore(remotestore.remotestore):
+ def __init__(self, ui, repo, remote):
+ self.baseurl = bfutil.urljoin(remote.url(), 'bfile')
+ try:
+ # Mercurial >= 1.9
+ self.baseurl, authinfo = util.url(self.baseurl).authinfo()
+ except AttributeError:
+ # Mercurial <= 1.8
+ self.baseurl, authinfo = url_.getauthinfo(self.baseurl)
+ self.opener = url_.opener(repo.ui, authinfo)
+ super(kilnstore, self).__init__(ui, repo, remote.url())
+
+ def _put(self, hash, fd):
+ try:
+ req = urllib2.Request(bfutil.urljoin(self.baseurl, hash), fd)
+ resp = self.opener.open(req)
+ return self._stat(hash) and 1 or 0
+ except urllib2.HTTPError, e:
+ return 1
+
+ def _get(self, hash):
+ req = urllib2.Request(bfutil.urljoin(self.baseurl, hash))
+ return (None, self.opener.open(req))
+
+ # '0' for OK, '1' for invalid checksum, '2' for missing
+ def _stat(self, hash):
+ try:
+ req = urllib2.Request(bfutil.urljoin(self.baseurl, hash))
+ req.add_header('SHA1-Request', hash)
+ return int(hash != \
+ self.opener.open(req).info().getheader('Content-SHA1'))
+ except urllib2.HTTPError, e:
+ if e.code == 404:
+ return 2
+ else:
+ raise
|
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
@@ -12,7 +12,7 @@
DEFAULTRC = {
'extensions': [('kbfiles', kilntest.KBFILESPATH),
- ('rebase', '')],
+ ('rebase', '')],
'kilnbfiles': [('systemcache', os.path.join(os.getcwd(), 'bfilesstore')),],
}
|
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
@@ -1,6 +1,6 @@ #!/usr/bin/env python
-# Test that kbfconvert handles tags properly
+# Test that kbfconvert handles tags properly
import os
import common
|
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
This file's diff was not loaded because this changeset is very large. Load changes Loading... |
Loading...