|
# Copyright (C) 2008-2010 Fog Creek Software. All rights reserved.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
from ConfigParser import RawConfigParser
import cStringIO
import codecs
import os
import re
import shutil
import socket
import sys
import urllib
import urllib2
import urlutil
import Image
import simplejson
from mercurial import cmdutil, context, discovery, encoding, error, hg, \
match, node, patch, pushkey, repair, subrepo, templatekw, \
transaction, util
from hgext import relink as relinker
import settings
from annotationcache import cachedannotate as _cachedannotate
from emptyui import emptyui
from webtasks import asyncpost, queue_repo_index
import bfiles
import tailtracking
hex = node.hex
propertycache = util.propertycache
def cachedannotate(self, follow=False, linenumber=None):
return _cachedannotate(None, self, follow=follow, linenumber=linenumber)
def uc(s):
"""clean the Unicode string, replacing errors"""
if not isinstance(s, unicode):
return unicode(s, encoding='utf8', errors='replace').lstrip(unicode(codecs.BOM_UTF8, 'utf8'))
return s.lstrip(unicode(codecs.BOM_UTF8, 'utf8'))
def ensurenewline(s):
return s if s.endswith('\n') else s + '\n'
def hexencode(bytestr):
"""converts a byte string to its hex string representation"""
if ' ' in bytestr and bytestr.endswith('\t'):
bytestr = bytestr[:-1]
return ''.join(["%02X" % ord(x) for x in bytestr])
def hexdecode(hexstr):
"""converts a hex string to the equivalent byte string"""
return ''.join(chr(int(hexstr[i:i+2], 16)) for i in xrange(0, len(hexstr), 2))
def pathjoin(path, name):
"""returns path/name if path and name are set; otherwise returns whichever is set."""
return path + '/' + name if path and name else path or name
def filetuple(filename):
components = filename.split('/')
path = '/'.join(components[:-1])
name = components[-1]
return {'path': uc(path), 'name': uc(name), 'bytepath': hexencode(pathjoin(path, name))}
def isnumber(x):
"""return whether x is a number
This is taken from Peter Norvig. I therefore assume it's safe."""
return hasattr(x, '__int__')
def reportexception(e):
if settings.DEBUG:
return
def gettraceback():
import traceback
return '\n'.join(traceback.format_exception(*(sys.exc_info())))
traceback = gettraceback()
bug = {'ScoutUserName': settings.FOGBUGZ_USERNAME,
'ScoutProject': settings.FOGBUGZ_PROJECT,
'ScoutArea': settings.FOGBUGZ_AREA,
'Description': 'Backend exception: %s' % e,
'Extra': traceback}
if settings.HOSTED:
try:
urllib2.urlopen(settings.FOGBUGZ_URL, urllib.urlencode(bug))
except:
pass
else:
from errorloggingmiddleware import _log_error
_log_error(bug)
def determinedisplaysize((width, height), max=(500,500)):
if width > height:
if width > max[0]:
return [max[0], height * max[0] / width]
else:
if height > max[1]:
return [width * max[1] / height, max[1]]
return [width, height]
def comparemetadata(im1, im2, pending):
if determinedisplaysize(im1.size) == determinedisplaysize(im2.size):
pending['issamesize'] = True
#try to understand metadata changes, and record in pending
pending['metadiff'] = []
#format
if im1.format != im2.format:
format = {'type': 'format', 'oldvalue': im1.format, 'newvalue': im2.format}
pending['metadiff'].append(format)
else:
#special attributes
if im1.format in ("FLI", "FLC", "GIF") and im1.info['duration'] != im2.info['duration']:
duration = {'type': 'duration', 'oldvalue': im1.info['duration'], 'newvalue': im2.info['duration']}
pending['metadiff'].append(duration)
if im1.format in ("GIF", "PNG", "XPM") and im1.info['transparency'] != im2.info['transparency']:
transparency = {'type': 'transparency', 'oldvalue': im1.info['transparency'], 'newvalue': im2.info['transparency']}
pending['metadiff'].append(transparency)
#size
if im1.size != im2.size:
size = {'type': 'size',
'oldvalue': '%sx%s' % im1.size,
'newvalue': '%sx%s' % im2.size}
pending['metadiff'].append(size)
#mode
if im1.mode != im2.mode:
mode = {'type': 'mode', 'oldvalue': im1.mode, 'newvalue': im2.mode}
pending['metadiff'].append(mode)
class RepositoryNotSubsetException(Exception):
def __init__(self, pusher, pushee):
self.pusher = pusher
self.pushee = pushee
def __str__(self):
return "repository %s is not a strict subset of repository %s" % (self.pushee.uuid, self.pusher.uuid)
class CreatesNewHeadsException(Exception):
def __init__(self, pusher, pushee):
self.pusher = pusher
self.pushee = pushee
def __str__(self):
return "pushing from %s to %s would introduce new heads" % (self.pusher.uuid, self.pushee.uuid)
class Repository(object):
def __init__(self, uuid, suppresshooks=False, suppressoutput=True, repo=None):
self.uuid = uuid
self.suppresshooks = suppresshooks
self.suppressoutput = suppressoutput
if repo is not None:
self.repo = repo
def annotate(self, path, rev, count=None):
"""provide line annotations for the provided file"""
r = self.repo
ctxs = {}
lines = []
line = 0
if count == 0: count = None
for l in cachedannotate(r[rev][path], follow=True, linenumber=True):
line = line + 1
if count is not None and line > count:
break
rev = l[0][0].rev()
if rev not in ctxs:
ctxs[rev] = l[0][0].changectx()
ctx = ctxs[rev]
file = None
if l[0][0].path() != path:
file = filetuple(l[0][0].path())
lines.append({'rev': (rev, ctx.hex()),
'date': ctx.date(),
'user': ctx.user(),
'origline': l[0][1],
'file': file})
return lines
def annotateline(self, path, rev, linenum, count):
"""annotate the provided line for the past count revisions
Because of the DAG nature of Mercurial, what qualifies as the
past three revisions of a given filerev can be somewhat complicated.
We arbitrarily declare it to be the count highest revids with
changes. In the future, this function will change to contain
sufficient information to construct a graph."""
r = self.repo
revs = {}
ctxs = {}
stack = [r[rev][path]]
rev = stack[0].rev()
while len(revs) < count:
fctx = stack.pop()
lines = cachedannotate(fctx, follow=True, linenumber=True)
if len(lines) < linenum:
break
(fctx, origline), line = lines[linenum - 1]
rev = fctx.rev()
if rev not in ctxs:
ctxs[rev] = fctx.changectx()
ctx = ctxs[rev]
file = None
if fctx.path() != path:
file = filetuple(fctx.path())
revs[rev] = {'type': 'lineannotation',
'rev': (ctx.rev(), ctx.hex()),
'date': ctx.date(),
'user': ctx.user(),
'description': ctx.description(),
'file': file,
'origline': origline}
parents = fctx.parents()
if not parents:
break
stack.extend(parents)
revs = revs.values()
revs.sort(lambda r1, r2: r1['rev'][0] - r2['rev'][0], reverse=True)
return revs
def branches(self):
r = self.repo
activebranches = [r[n].branch() for n in r.heads()]
def testactive(tag, node):
realhead = tag in activebranches
open = node in r.branchheads(tag, closed=False)
return realhead and open
branches = dict([(uc(tag),
{'hex': hex(r.lookup(r.changelog.rev(node))),
'rev': r.changelog.rev(node)})
for tag, node in r.branchtags().items()
if testactive(tag, node)])
return branches
def changesbetweentags(self, tag1, tag2, includelow=False):
r = self.repo
if tag1 not in r.tags().keys() or tag2 not in r.tags().keys():
return []
(low, high) = [rev for rev in sorted([r[tag1], r[tag2]], lambda x,y: x.rev() - y.rev())]
lowset = set(low.ancestors())
if not includelow:
lowset.add(low)
highset = set(high.ancestors())
highset.add(high)
return list([self._ctx(rev, None) for rev in highset - lowset])
def create(self, meta):
"""Create or add the repository at the specified path"""
u = self.ui
p = self._hg_path(force_new=True)
try:
os.makedirs(os.path.dirname(p))
except:
pass
hg.repository(u, self._hg_path(force_new=True), create=1)
try:
self._add_meta(meta)
except Exception, e:
reportexception(e)
raise
def _ctx(self, ctx, changedfiles, fnrename=None):
"""turn a Mercurial context into a Python dictionary"""
d = {'user': uc(ctx.user()),
'date': ctx.date(),
'rev': (ctx.rev(), ctx.hex()),
'description': uc(ctx.description()),
'branch': uc(ctx.branch()),
'tags': [uc(s) for s in ctx.tags()],
'extra': dict((uc(k), uc(v)) for k, v in ctx.extra().iteritems()),
'parents': [(p.rev(), p.hex()) for p in ctx.parents()]}
if changedfiles:
# Generate list of changed files
files = set(ctx.files())
if node.nullid not in ctx.parents() and len(ctx.parents()) == 2:
mc = ctx.manifest()
mp1 = ctx.parents()[0].manifest()
mp2 = ctx.parents()[1].manifest()
for f in mp1:
if f not in mc:
files.add(f)
for f in mp2:
if f not in mc:
files.add(f)
for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
d['files'] = [filetuple(f) for f in files]
if fnrename:
rename = fnrename(ctx.hex())
if rename:
d['rename'] = {'file': filetuple(rename[0]),
'rev': hex(self.repo.filectx(rename[0], fileid=rename[1]).node())}
return d
def changeset(self, rev, changedfiles=False):
"""return a dictionary representing a changeset at the specified rev"""
r = self.repo
return self._ctx(r[rev], changedfiles)
def changesets(self, revs, changedfiles=False):
"""return an array of changesets
If revs represents a contiguous enumeration of changesets, you should
use changesetrange instead for higher performance"""
return [self.changeset(rev, changedfiles) for rev in revs]
def changesetrange(self, rev1, rev2, changedfiles=False):
"""return a collection of changesets between the two revisions"""
r = self.repo
rev1 = r[rev1].rev()
rev2 = r[rev2].rev()
return [self._ctx(r[rev], changedfiles) for rev in xrange(rev1, rev2 + 1)]
def cloneto(self, uuid, meta):
"""clone this repository to the supplied UUID"""
try:
u = self.ui
path = Repository(uuid)._hg_path(force_new=True)
try:
os.makedirs(os.path.dirname(path))
except:
pass
hg.clone(u, {}, self._hg_path(), encoding.tolocal(path), update=False)
r = Repository(uuid)
r._add_meta(meta)
if settings.DO_INDEXING:
queue_repo_index(uuid)
return r
except Exception, e:
reportexception(e)
raise
def commontags(self, revids, numtags):
"""return the closest tag which belongs to a common child"""
revs = [self.repo[rev].rev() for rev in revids]
low = min(rev for rev in revs)
ctxs = {}
for rev in xrange(low, len(self.repo)):
ctx = self.repo[rev]
ctxs[rev] = {'children': [], 'tags': ctx.tags()}
for p in self.repo.changelog.parentrevs(rev):
if p >= low:
ctxs[p]['children'].append(rev)
mastertags = None
for rev in revs:
queue = [rev]
tags = []
visited = set()
while queue:
rev = queue.pop()
ctx = ctxs[rev]
if ctx['tags']:
tags.append(rev)
visited.add(rev)
queue.extend(c for c in ctx['children'] if c not in visited)
if not mastertags:
mastertags = set(tags)
else:
mastertags = mastertags.intersection(tags)
mastertags = sorted(mastertags)
closesttags = sum([ctxs[rev]['tags'] for rev in sorted(mastertags)],[])[:numtags]
return closesttags
def delete(self):
"""delete this repository from disk"""
shutil.rmtree(self.path(), True)
def diff(self, rev1, rev2=None, filename=None, maxsize=None, ignorews=False):
"""diff the provided revisions, optionally specific to a filename
Mercurial internally uses git diffs, which, unfortunately, are
rather tempermental. This function therefore attempts to extract
structured data out of Mercurial's diff system, and return it,
ready for consumption, to the calling process."""
filemaxsize = None
gitre = re.compile('diff --git a/(.*) b/(.*)')
def axn(type, oldname=None, modes=None):
d = {'type': type, 'modes': modes}
if oldname:
d['oldname'] = filetuple(oldname)
return d
class bytecounts(object):
def __init__(self):
self.total = 0
def amenddiffs(patch, pending, counts):
# Only the first 5 lines of patch need to be seperated from the rest
# in order to compute a diff. (5 are needed for mode change + rename/copy)
lines = patch.rstrip('\n').split('\n', 4)
if lines[0].startswith('diff --git'):
# new patches start with git patches. Finish the previous
# patch and begin a new one
if pending:
diffs.append(pending)
pending = {'type': 'diff', 'diff': None}
# Take a best guess at the filename now, in case we don't
# get better data (e.g., binary diff)
m = gitre.match(lines[0])
pending['file'] = filetuple(m.group(1))
if len(lines) > 1:
# handle mode change first
if lines[1].startswith('old mode'):
oldmode = lines[1][9:].strip()
newmode = lines[2][9:].strip()
pending['action'] = axn('mode', modes=(oldmode, newmode))
lines = [lines[0]] + lines[3:]
if len(lines) == 1:
# the only thing in the diff was the mode change
return pending
if lines[1].startswith('deleted file'):
pending['action'] = axn('deletion')
elif lines[1].startswith('rename'):
oldfilename = lines[1][12:]
filename = lines[2][10:]
pending['file'] = filetuple(filename)
pending['action'] = axn('rename', oldname=oldfilename)
elif lines[1].startswith('new file'):
pending['file'] = filetuple(m.group(2))
pending['action'] = axn('creation')
elif lines[1].startswith('copy'):
oldfilename = lines[1][10:]
filename = lines[2][8:]
pending['file'] = filetuple(filename)
pending['action'] = axn('copy', oldname=oldfilename)
else:
raise Exception('unknown git patch')
return pending
elif lines[0].startswith('index ') or lines[0].startswith('Binary file '):
# We cannot improve on whatever the git diff parser ripped out;
# just return what we already got
try:
pending['issamesize'] = False
try:
im1 = Image.open(cStringIO.StringIO(self.repo[rev1].filectx(pathjoin(pending['file']['path'], pending['file']['name'])).data()))
try:
im2 = Image.open(cStringIO.StringIO(self.repo[rev2].filectx(pathjoin(pending['file']['path'], pending['file']['name'])).data()))
comparemetadata(im1,im2, pending)
except LookupError:
pass
except LookupError:
im2 = Image.open(cStringIO.StringIO(self.repo[rev2].filectx(pathjoin(pending['file']['path'], pending['file']['name'])).data()))
pending['diff'] = 'Image file'
pending['file']['filetype'] = 'image'
except IOError, e:
pending['diff'] = 'Binary file'
pending['file']['filetype'] = 'binary'
return pending
# Normal diff
if 'action' in pending and pending['action']['type'] in ('rename', 'creation', 'copy'):
filename = lines[1][6:]
else:
filename = lines[0][6:]
if self.isbfilestandin(filename):
return {}
components = filename.split('/')
path = '/'.join(components[:-1])
name = components[-1]
pending['file'] = {'path': uc(path), 'name': uc(name), 'bytepath': hexencode(pathjoin(path, name))}
counts.total += sum(len(l) for l in lines[2:])
if maxsize is None or maxsize >= counts.total:
s = ensurenewline(u'\n'.join([uc(l) for l in lines[2:]]))
if filemaxsize and len(s) > filemaxsize:
s = ensurenewline(s[:filemaxsize])
pending['truncated'] = True
pending['diff'] = s
else:
pending['truncated'] = True
return pending
r = self.repo
if not rev2:
rev2 = r[rev1].rev()
rev1 = r[rev2].parents()[0].rev()
rev1 = r[rev1].hex()
rev2 = r[rev2].hex()
if filename:
m = match.match(r.root, None, patterns=[filename], default='path')
filemaxsize = maxsize
maxsize = None
else:
m = match.always(r.root, None)
patches = patch.diff(r, rev1, rev2, match=m, opts=patch.diffopts(r.ui, {'git': True, 'ignore_all_space': ignorews}))
diffs = []
pending = None
counts = bytecounts()
for p in patches:
pending = amenddiffs(p, pending, counts)
if pending:
diffs.append(pending)
bfilesrev1 = self.listbfiles(rev1)
bfilesrev2 = self.listbfiles(rev2)
def bfilediff(path, rev1=None, rev2=None, action=None):
if rev1 != None:
if rev2 != None:
d = bfilemetaimagediff(path,rev1,rev2)
else:
d = bfileimagediff(path,rev1)
elif rev2 != None:
d = bfileimagediff(path,rev2)
else:
d = {'diff': 'Binary file', 'type': 'diff',
'file': {'path': uc(os.path.dirname(path)), 'bytepath': hexencode(os.path.basename(path)),
'name': uc(os.path.basename(path)), 'filetype': 'binary'}}
if action is not None:
d['action'] = action
return d
def bfileimagediff(path, rev):
try:
Image.open(cStringIO.StringIO(self.filecontents(path, rev, raw=True)))
d = {'diff': 'Image file', 'type': 'diff',
'file': {'path': uc(os.path.dirname(path)), 'bytepath': hexencode(os.path.basename(path)),
'name': uc(os.path.basename(path)), 'filetype': 'image'}}
except IOError:
d = {'diff': 'Binary file', 'type': 'diff',
'file': {'path': uc(os.path.dirname(path)), 'bytepath': hexencode(os.path.basename(path)),
'name': uc(os.path.basename(path)), 'filetype': 'binary'}}
return d
def bfilemetaimagediff(path, rev1, rev2):
try:
im1 = Image.open(cStringIO.StringIO(self.filecontents(path, rev1, raw=True)))
im2 = Image.open(cStringIO.StringIO(self.filecontents(path, rev2, raw=True)))
d = {'diff': 'Image file', 'type': 'diff',
'file': {'path': uc(os.path.dirname(path)), 'bytepath': hexencode(os.path.basename(path)),
'name': uc(os.path.basename(path)), 'filetype': 'image'}}
comparemetadata(im1, im2, d)
except IOError:
d = {'diff': 'Binary file', 'type': 'diff',
'file': {'path': uc(os.path.dirname(path)), 'bytepath': hexencode(os.path.basename(path)),
'name': uc(os.path.basename(path)), 'filetype': 'binary'}}
return d
for bfile in bfilesrev1:
if bfile not in bfilesrev2:
diffs.append(bfilediff(bfile, rev1=rev1, action={'type': 'deletion', 'modes': None}))
elif self.getbfilesha(bfile, rev1) != self.getbfilesha(bfile, rev2):
diffs.append(bfilediff(bfile, rev1, rev2))
for bfile in bfilesrev2:
if bfile not in bfilesrev1:
diffs.append(bfilediff(bfile, rev2=rev2, action={'type': 'creation', 'modes': None}))
return diffs, counts.total
def getbfilesha(self, path, rev='tip'):
return self.repo[rev][pathjoin(bfiles.standinprefix, path)].data().strip()
def isbfile(self, path, rev='tip'):
try:
self.repo[rev][pathjoin(bfiles.standinprefix, path)]
return True
except:
return False
def listbfiles(self, rev='tip'):
return [s[len(bfiles.standinprefix + '/'):]
for s in self.repo[rev].manifest()
if s.startswith(bfiles.standinprefix + '/')]
def isbfilestandin(self, path):
return path.startswith(bfiles.standinprefix + '/')
def directorylisting(self, path, rev='tip'):
"""return a directory listing for the specified path
Dictionaries conforming to the Kiln backend spec are returend."""
r = self.repo
ctx = r[rev]
substate = subrepo.state(ctx, self.repo.ui)
if path and path[-1] != '/':
path += '/'
l = len(path)
candidates = [(s[l:], False) for s in ctx.manifest() if not self.isbfilestandin(s) and s.startswith(path) and len(s) > l]
candidates.extend((s[l:], True) for s in self.listbfiles() if s.startswith(path) and len(s) > l)
candidates.extend((s[l:], False) for s in substate.keys() if s.startswith(path) and len(s) > l and '/' not in s[l:])
if not candidates:
return None if path else []
listing = {}
ctxcache = {}
for s, isbfile in candidates:
components = s.split('/')
f = components[0]
listing[f] = {'path': uc(path), 'name': uc(f), 'bytepath': hexencode(path + f)}
if (path + s) in substate:
listing[f]['type'] = 'subrepo'
listing[f]['subdata'] = substate[path+s]
elif len(components) > 1:
listing[f]['type'] = 'directory'
else:
if isbfile:
fctx = ctx[bfiles.standinprefix + '/' + path + f]
else:
fctx = ctx[path + f]
if fctx.linkrev() not in ctxcache:
ctxcache[fctx.linkrev()] = r[fctx.linkrev()]
c = ctxcache[fctx.linkrev()]
listing[f]['type'] = 'file'
listing[f]['flags'] = fctx.flags()
listing[f]['size'] = int(bfiles.getbfilesize(self.getbfilesha(path + f, rev))) if isbfile else fctx.size()
listing[f]['rev'] = (c.rev(), c.hex())
listing[f]['date'] = c.date()
return listing.values()
def exists(self):
"""Returns true if a repository exists at self.path"""
try:
self.repo
return True
except error.RepoError:
return False
except util.Abort:
return False
def filecontents(self, path, rev='tip', raw=False):
"""return the complete file contents at the provided revision"""
if self.isbfile(path, rev):
fd = bfiles.bfilecontents(self.getbfilesha(path, rev))
data = fd.read()
fd.close()
return data
try:
s = self.repo[rev].filectx(path).data()
if raw or util.binary(s):
return s
return uc(s)
except:
return None
def _filectx(self, fctx):
"""converts a Mercurial file change context to a dictionary"""
ctx = fctx.changectx()
return {'file': filetuple(fctx.path()),
'user': uc(ctx.user()),
'date': ctx.date(),
'rev': (ctx.rev(), ctx.hex()),
'description': uc(ctx.description()),
'branch': ctx.branch(),
'tags': [uc(s) for s in ctx.tags()],
'parents': [(p.rev(), p.changectx().hex()) for p in fctx.parents()]}
def filechangeset(self, path, rev='tip'):
"""return a dictionary representing the file changeset requested"""
if self.isbfile(path, rev):
path = pathjoin(bfiles.standinprefix, path)
r = self.repo
return self._filectx(r[rev][path])
def filechangesets(self, path, rev1, rev2, limit=None):
"""return a history of the given file between the provided revisions"""
if self.isbfile(path, rev1) or self.isbfile(path, rev2):
path = pathjoin(bfiles.standinprefix, path)
r = self.repo
rev = ['%s:%s' % (rev1, rev2)]
if not rev1 and not rev2:
rev = None
removed = False
# walkchangerevs is much slower with removed=True. We can make an
# imperfect assumption that we don't care about removals, except when
# getting all revs or when the file has does not exist at the tip.
# Big speedup without generally losing data.
if not rev or (path not in r[rev2].manifest()):
removed = True
matchfn = match.exact(r.root, None, [path])
log = [c for c in cmdutil.walkchangerevs(r, matchfn, {'rev': rev, 'removed': removed}, lambda *args: None)]
if limit:
log = log[-limit:]
# getrenamedfn answers if a given file was renamed at a given revision, in a cache-friendly way
_fnrename = templatekw.getrenamedfn(r, rev2)
def fnrename(rev):
return _fnrename(path, rev)
return [self._ctx(c, False, fnrename) for c in log]
def hasfile(self, path, rev='tip'):
if self.isbfile(path, rev):
return True
try:
self.repo[rev][path]
return True
except:
return False
def hasrevision(self, rev):
"""returns true if the provided revision exists in this repository
rev may be a revid, nodeid, or tag"""
r = self.repo
try:
r[rev]
return True
except:
return False
def isrelated(self, r2):
"""return true if this repository is related to r2
This test passes if revid 0 refers to the same changeset on both repositories,
or the target repository is empty. No proviso is made for checking all tails
of the repository."""
r1 = self.repo
r2 = r2.repo
return len(r2.changelog) == 0 or r1[0].hex() == r2[0].hex()
def outgoing(self, r2):
"""return a list of dictionaries representing the outgoing changesets"""
r1 = self.repo
r2 = r2.repo
o = discovery.findoutgoing(r1, r2)
if not o:
return []
else:
return [self._ctx(r1[ctx], False) for ctx in r1.changelog.nodesbetween(o, None)[0]]
def manifest(self, rev='tip'):
mani = [{'fullpath': uc(name), 'bytepath': hexencode(name)}
for name in self.repo[rev].manifest().keys()
if not self.isbfilestandin(name)]
bfs = [{'fullpath': uc(name), 'bytepath': hexencode(name)}
for name in self.listbfiles(rev)]
return mani + [d for d in bfs if d not in mani]
@property
def parent(self):
"""return the parent of this repository, based on the hgrc file"""
try:
hgrc = RawConfigParser()
hgrc.read(os.path.join(self.path(), '.hg', 'hgrc'))
return Repository(hgrc.get('paths', 'default'))
except:
return None
def path(self, force_new=False):
"""return the fully qualified local path to this repository"""
new_path = os.path.join(settings.KILN_REPOSITORY_ROOT, self.uuid[:2], self.uuid[2:4], self.uuid)
old_path = os.path.join(settings.KILN_REPOSITORY_ROOT, self.uuid)
if force_new or os.path.exists(new_path):
return new_path
elif os.path.exists(old_path):
return old_path
return new_path
def autopull(self, repo2, url):
"""pull the other repository to this repository
Following lengthy discussions of how auto-pull should work,
the decision at the moment is to always pull, and then to
attempt a merge. If the merge cannot continue, the pulled
files still stay."""
def commitfunc(ui, repo, message, match, opts):
return repo.commit('Automated merge', 'Kiln', None, match)
success = False
try:
r1 = self.repo
r1.ui.setconfig('kiln', 'url', url)
r1.ui.setconfig('kiln', 'source', repo2.uuid)
lock = r1.lock()
r2 = repo2.repo
try:
r1.pull(r2)
hg.clean(r1, 'tip')
# merge iff there are two heads
branch = r1[None].branch()
bheads = r1.branchheads(branch)
if len(bheads) == 2:
rev = bheads[-1] if bheads[0] else bheads[0]
# False means success on merge
if not hg.merge(r1, rev):
cmdutil.commit(r1.ui, r1, commitfunc, [], {})
success = True
except:
pass
finally:
lock.release()
return success
def push(self, r2, url, pusher=None):
"""push this repository to another repository"""
try:
lock1 = self.repo.lock()
lock2 = r2.repo.lock()
# push prints its errors to stderr, annoyingly, so silence it
# the old fashioned way
r1 = self.repo
r1.ui.pushbuffer()
success = False
if r2.outgoing(self):
raise RepositoryNotSubsetException(self, r2)
elif discovery.prepush(r1, r2.repo, False, None, True)[0] is None:
raise CreatesNewHeadsException(self, r2)
else:
r2 = r2.repo
r2.ui.setconfig('kiln', 'ixperson', pusher)
r2.ui.setconfig('kiln', 'url', url)
r2.ui.setconfig('kiln', 'source', self.uuid)
r2.ui.setconfig('kiln', 'client', 'website')
success = r1.push(r2, newbranch=True) != 0
r1.ui.popbuffer()
finally:
try:
lock2.release()
except:
pass
lock1.release()
return success
def pushwouldmakeheads(self, r2):
r1 = self.repo
r2 = r2.repo
r1.ui.pushbuffer()
prepush = discovery.prepush(r1, r2, False, None, True)
r1.ui.popbuffer()
return prepush[0] is None and prepush[1] == 0
def pull(self, url):
"""pull into this repository from another repository"""
r1 = self.repo
r2 = hg.repository(self.ui, urlutil.urljoin(url, 'serve'))
self.recover()
for bfile in simplejson.loads(urllib2.urlopen(urlutil.urljoin(url, 'bfile')).read()):
if bfiles.ishash(bfile) and not bfiles.instore(bfile):
resp = urllib2.urlopen(urlutil.urljoin(url, 'bfile', bfile))
bfiles.storebfile(resp, bfile)
ret = r1.pull(r2)
return ret
def recover(self):
"""recover this repo, if necessary, quietly"""
r = self.repo
r.ui.pushbuffer()
lock = r.lock()
try:
if os.path.exists(r.sjoin('journal')):
return transaction.rollback(r.sopener, r.sjoin('journal'),
r.ui.status)
finally:
lock.release()
r.ui.popbuffer()
def relink(self):
r = self.repo
r.ui.pushbuffer()
try:
tried = set()
# Sometimes the other repo doesn't exist on this machine, so we'll try a few of them.
for i in range(3):
other = tailtracking.random_tail(self)
if not other:
break
if other in tried:
continue
tried.add(other)
other_repo = Repository(other)
if not other_repo.exists():
continue
relinker.relink(r.ui, r, other_repo.path())
finally:
r.ui.popbuffer
@propertycache
def repo(self):
return hg.repository(self.ui, self._hg_path())
def size(self):
total_size = 0
for dirpath, dirnames, filenames in os.walk(os.path.join(self.path(), '.hg')):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
total_size += os.path.getsize(fp)
except:
# Mercurial deleted a journal, lock file, etc.; we don't care
pass
return total_size
def strip(self, rev, url, parent):
"""strip the given revision from the repository"""
self.repo.ui.setconfig('kiln', 'url', url)
self.repo.ui.setconfig('kiln', 'source', parent)
self.repo.ui.setconfig('kiln', 'client', 'website')
node = self.repo[rev].node()
lock = None
try:
lock = self.repo.lock()
self._removeundo()
# Turn off this hook - causes duplicates
self.repo.ui.setconfig('hooks', 'changegroup', None)
# Buffer this output
self.repo.ui.pushbuffer()
# Remove without backup, since this should only be called on a clone.
repair.strip(self.repo.ui, self.repo, node, None)
self.repo.ui.popbuffer()
# strip may have unbundled a set of backed up revisions after
# the actual strip
self._removeundo()
self.repo.ui.setconfig('hooks', 'changegroup', 'python:kilnhook.changehook')
finally:
lock.release()
# Trigger the strip hook (changehook) to get a pingback
self.repo.hook('strip', node=0)
def _removeundo(self):
"""From the mq extension."""
undo = self.repo.sjoin('undo')
if not os.path.exists(undo):
return
try:
os.unlink(undo)
except OSError, inst:
pass
def tag(self, rev, name1, url, ixPerson, username, force, *names):
"""give the given revision a tag
name1 is the first tag, and names is an optional list of additional
tags
"""
self.repo.ui.setconfig('kiln', 'url', url)
self.repo.ui.setconfig('kiln', 'ixperson', ixPerson)
self.repo.ui.setconfig('kiln', 'username', username)
names = (name1,) + names
if len(names) != len(set(names)):
raise ValueError('Tags must be unique')
for n in names:
if n in ('tip', '.', 'null'):
raise ValueError("the name '%s' is reserved" % n)
if not force and n in self.repo.tags():
raise ValueError("tag '%s' already exists " % n)
allchars = ''.join(names)
for c in self.repo.tag_disallowed:
if c in allchars:
raise ValueError('%r cannot be used in a tag name' % c)
node = self.repo[rev].node()
if '.hgtags' in self.repo['tip']:
data = self.repo['tip']['.hgtags'].data()
else:
data = ""
tags = cStringIO.StringIO()
tags.write(data)
if data and data[-1] != '\n':
tags.write('\n')
for name in names:
if force:
tags.write('0000000000000000000000000000000000000000 %s\n' % encoding.fromlocal(name))
tags.write('%s %s\n' % (hexencode(node), encoding.fromlocal(name)))
mfctx = context.memfilectx(".hgtags", tags.getvalue(), False, False, None)
message = ('Added tag %s for changeset %s' %
(', '.join(names), hexencode(node[:6]).lower()))
# each memctx expects a function which maps the repository,
# the current memctx object and a path to a file into a
# filectx object. Since we only ever change .hgtags we use the
# "constant" lambda function which always returns mfctx
mctx = context.memctx(self.repo, (hexencode(self.repo['tip'].node()), None),
message, (".hgtags",),
lambda x, y, z: mfctx, user=username)
self.repo.ui.pushbuffer()
tagnode = self.repo.commitctx(mctx)
for name in names:
self.repo.hook('tag', node=hexencode(node), tag=name, local=False)
self.repo.hook('commit', node=hexencode(tagnode), parent1=hexencode(self.repo['tip'].node()))
self.repo.ui.popbuffer()
return tagnode
def tags(self):
bookmarks = [tag for tag in pushkey.list(self.repo, 'bookmarks').keys()]
tags = [{'tag': tag,
'rev': (self.repo[node].rev(), self.repo[node].hex()),
'bookmark': True}
for (tag, node)
in pushkey.list(self.repo, 'bookmarks').items()
if node in self.repo]
tags.extend(
[{'tag': tag,
'rev': (self.repo[node].rev(), self.repo[node].hex()),
'bookmark': False}
for (tag, node)
in self.repo.tags().iteritems()
if tag not in bookmarks and node in self.repo])
return tags
def sync(self, site, peers=None, bfile=None):
"""syncs repository with other backends"""
hostname = socket.gethostname()
if peers and hostname in peers:
for peer in peers[hostname]:
data = {'remote': site, 'repo': self.uuid}
if bfile:
data['bfile'] = bfile
asyncpost(urlutil.urljoin(peer, 'sync'), data)
def _hgrc_get(self):
try:
hgrc = RawConfigParser()
hgrc.read(self._hgrc_path())
ini = {}
for section in hgrc.sections():
ini[section] = dict((k.decode('unicode_escape'), v.decode('unicode_escape'))
for k, v in hgrc.items(section))
return ini
except:
return {}
def _hgrc_set(self, ini):
hgrc = RawConfigParser()
for section in ini:
hgrc.add_section(section)
for key, val in ini[section].iteritems():
hgrc.set(section, key.encode('unicode_escape'), val.encode('unicode_escape'))
with open(self._hgrc_path(), 'wb') as f:
hgrc.write(f)
hgrc = property(_hgrc_get, _hgrc_set)
def _meta_get(self):
return self.hgrc.get('meta') or {}
def _meta_set(self, meta):
hgrc = self.hgrc
hgrc['meta'] = meta
self.hgrc = hgrc
meta = property(_meta_get, _meta_set)
def meta_deleted(self):
return self.meta.get('deleted', 'false').strip().lower() == 'true'
def _add_meta(self, meta):
m = self.meta
for key in meta:
m[key] = meta[key]
self.meta = m
def _hg_path(self, force_new=False):
return encoding.tolocal(self.path(force_new))
def _hgrc_path(self):
return os.path.join(self.path(), '.hg', 'hgrc')
def _tip(self):
"""Return the tip revision of the repository"""
r = self.repo
return r[r.changelog.tip()].hex()
@propertycache
def ui(self):
u = emptyui(suppressoutput=self.suppressoutput)
u.setconfig('ui', 'quiet', 'True')
u.setconfig('extensions', 'hgext.bookmarks', '')
u.setconfig('web', 'allow_push', '*')
u.setconfig('web', 'push_ssl', 'False')
u.setconfig('web', 'allow_archive', 'zip,gz')
u.setconfig('server', 'validate', '1')
if not self.suppresshooks:
u.setconfig('hooks', 'changegroup', 'python:kilnhook.changehook')
u.setconfig('hooks', 'commit', 'python:kilnhook.changehook')
u.setconfig('hooks', 'strip', 'python:kilnhook.changehook')
u.setconfig('hooks', 'pretxnchangegroup', 'python:kilnhook.prechangehook')
u.setconfig('hooks', 'pretxncommit', 'python:kilnhook.prechangehook')
if settings.HOSTED:
for host, peer in settings.STORAGE_PEERS.iteritems():
u.setconfig('peers', host, peer)
for host, peer in settings.POST_STORAGE_PEERS.iteritems():
u.setconfig('post_peers', host, peer)
return u
def __emittable__(self):
d = {'type': 'repo', 'uuid': self.uuid, 'meta': self.meta}
if self.parent:
d['parent'] = self.parent
return d
|
Loading...