eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.py
changeset 69 c6bca38c1cbf
equal deleted inserted replaced
68:5ff1fc726848 69:c6bca38c1cbf
       
     1 # localrepo.py - read/write repository class for mercurial
       
     2 #
       
     3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
       
     4 #
       
     5 # This software may be used and distributed according to the terms of the
       
     6 # GNU General Public License version 2 or any later version.
       
     7 
       
     8 from node import bin, hex, nullid, nullrev, short
       
     9 from i18n import _
       
    10 import repo, changegroup, subrepo, discovery, pushkey
       
    11 import changelog, dirstate, filelog, manifest, context
       
    12 import lock, transaction, store, encoding
       
    13 import util, extensions, hook, error
       
    14 import match as matchmod
       
    15 import merge as mergemod
       
    16 import tags as tagsmod
       
    17 import url as urlmod
       
    18 from lock import release
       
    19 import weakref, errno, os, time, inspect
       
    20 propertycache = util.propertycache
       
    21 
       
    22 class localrepository(repo.repository):
       
    23     capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
       
    24     supportedformats = set(('revlogv1', 'parentdelta'))
       
    25     supported = supportedformats | set(('store', 'fncache', 'shared',
       
    26                                         'dotencode'))
       
    27 
       
    28     def __init__(self, baseui, path=None, create=0):
       
    29         repo.repository.__init__(self)
       
    30         self.root = os.path.realpath(util.expandpath(path))
       
    31         self.path = os.path.join(self.root, ".hg")
       
    32         self.origroot = path
       
    33         self.auditor = util.path_auditor(self.root, self._checknested)
       
    34         self.opener = util.opener(self.path)
       
    35         self.wopener = util.opener(self.root)
       
    36         self.baseui = baseui
       
    37         self.ui = baseui.copy()
       
    38 
       
    39         try:
       
    40             self.ui.readconfig(self.join("hgrc"), self.root)
       
    41             extensions.loadall(self.ui)
       
    42         except IOError:
       
    43             pass
       
    44 
       
    45         if not os.path.isdir(self.path):
       
    46             if create:
       
    47                 if not os.path.exists(path):
       
    48                     util.makedirs(path)
       
    49                 os.mkdir(self.path)
       
    50                 requirements = ["revlogv1"]
       
    51                 if self.ui.configbool('format', 'usestore', True):
       
    52                     os.mkdir(os.path.join(self.path, "store"))
       
    53                     requirements.append("store")
       
    54                     if self.ui.configbool('format', 'usefncache', True):
       
    55                         requirements.append("fncache")
       
    56                         if self.ui.configbool('format', 'dotencode', True):
       
    57                             requirements.append('dotencode')
       
    58                     # create an invalid changelog
       
    59                     self.opener("00changelog.i", "a").write(
       
    60                         '\0\0\0\2' # represents revlogv2
       
    61                         ' dummy changelog to prevent using the old repo layout'
       
    62                     )
       
    63                 if self.ui.configbool('format', 'parentdelta', False):
       
    64                     requirements.append("parentdelta")
       
    65             else:
       
    66                 raise error.RepoError(_("repository %s not found") % path)
       
    67         elif create:
       
    68             raise error.RepoError(_("repository %s already exists") % path)
       
    69         else:
       
    70             # find requirements
       
    71             requirements = set()
       
    72             try:
       
    73                 requirements = set(self.opener("requires").read().splitlines())
       
    74             except IOError, inst:
       
    75                 if inst.errno != errno.ENOENT:
       
    76                     raise
       
    77             for r in requirements - self.supported:
       
    78                 raise error.RepoError(_("requirement '%s' not supported") % r)
       
    79 
       
    80         self.sharedpath = self.path
       
    81         try:
       
    82             s = os.path.realpath(self.opener("sharedpath").read())
       
    83             if not os.path.exists(s):
       
    84                 raise error.RepoError(
       
    85                     _('.hg/sharedpath points to nonexistent directory %s') % s)
       
    86             self.sharedpath = s
       
    87         except IOError, inst:
       
    88             if inst.errno != errno.ENOENT:
       
    89                 raise
       
    90 
       
    91         self.store = store.store(requirements, self.sharedpath, util.opener)
       
    92         self.spath = self.store.path
       
    93         self.sopener = self.store.opener
       
    94         self.sjoin = self.store.join
       
    95         self.opener.createmode = self.store.createmode
       
    96         self._applyrequirements(requirements)
       
    97         if create:
       
    98             self._writerequirements()
       
    99 
       
   100         # These two define the set of tags for this repository.  _tags
       
   101         # maps tag name to node; _tagtypes maps tag name to 'global' or
       
   102         # 'local'.  (Global tags are defined by .hgtags across all
       
   103         # heads, and local tags are defined in .hg/localtags.)  They
       
   104         # constitute the in-memory cache of tags.
       
   105         self._tags = None
       
   106         self._tagtypes = None
       
   107 
       
   108         self._branchcache = None  # in UTF-8
       
   109         self._branchcachetip = None
       
   110         self.nodetagscache = None
       
   111         self.filterpats = {}
       
   112         self._datafilters = {}
       
   113         self._transref = self._lockref = self._wlockref = None
       
   114 
       
   115     def _applyrequirements(self, requirements):
       
   116         self.requirements = requirements
       
   117         self.sopener.options = {}
       
   118         if 'parentdelta' in requirements:
       
   119             self.sopener.options['parentdelta'] = 1
       
   120 
       
   121     def _writerequirements(self):
       
   122         reqfile = self.opener("requires", "w")
       
   123         for r in self.requirements:
       
   124             reqfile.write("%s\n" % r)
       
   125         reqfile.close()
       
   126 
       
   127     def _checknested(self, path):
       
   128         """Determine if path is a legal nested repository."""
       
   129         if not path.startswith(self.root):
       
   130             return False
       
   131         subpath = path[len(self.root) + 1:]
       
   132 
       
   133         # XXX: Checking against the current working copy is wrong in
       
   134         # the sense that it can reject things like
       
   135         #
       
   136         #   $ hg cat -r 10 sub/x.txt
       
   137         #
       
   138         # if sub/ is no longer a subrepository in the working copy
       
   139         # parent revision.
       
   140         #
       
   141         # However, it can of course also allow things that would have
       
   142         # been rejected before, such as the above cat command if sub/
       
   143         # is a subrepository now, but was a normal directory before.
       
   144         # The old path auditor would have rejected by mistake since it
       
   145         # panics when it sees sub/.hg/.
       
   146         #
       
   147         # All in all, checking against the working copy seems sensible
       
   148         # since we want to prevent access to nested repositories on
       
   149         # the filesystem *now*.
       
   150         ctx = self[None]
       
   151         parts = util.splitpath(subpath)
       
   152         while parts:
       
   153             prefix = os.sep.join(parts)
       
   154             if prefix in ctx.substate:
       
   155                 if prefix == subpath:
       
   156                     return True
       
   157                 else:
       
   158                     sub = ctx.sub(prefix)
       
   159                     return sub.checknested(subpath[len(prefix) + 1:])
       
   160             else:
       
   161                 parts.pop()
       
   162         return False
       
   163 
       
   164 
       
   165     @propertycache
       
   166     def changelog(self):
       
   167         c = changelog.changelog(self.sopener)
       
   168         if 'HG_PENDING' in os.environ:
       
   169             p = os.environ['HG_PENDING']
       
   170             if p.startswith(self.root):
       
   171                 c.readpending('00changelog.i.a')
       
   172         self.sopener.options['defversion'] = c.version
       
   173         return c
       
   174 
       
   175     @propertycache
       
   176     def manifest(self):
       
   177         return manifest.manifest(self.sopener)
       
   178 
       
   179     @propertycache
       
   180     def dirstate(self):
       
   181         return dirstate.dirstate(self.opener, self.ui, self.root)
       
   182 
       
   183     def __getitem__(self, changeid):
       
   184         if changeid is None:
       
   185             return context.workingctx(self)
       
   186         return context.changectx(self, changeid)
       
   187 
       
   188     def __contains__(self, changeid):
       
   189         try:
       
   190             return bool(self.lookup(changeid))
       
   191         except error.RepoLookupError:
       
   192             return False
       
   193 
       
   194     def __nonzero__(self):
       
   195         return True
       
   196 
       
   197     def __len__(self):
       
   198         return len(self.changelog)
       
   199 
       
   200     def __iter__(self):
       
   201         for i in xrange(len(self)):
       
   202             yield i
       
   203 
       
   204     def url(self):
       
   205         return 'file:' + self.root
       
   206 
       
   207     def hook(self, name, throw=False, **args):
       
   208         return hook.hook(self.ui, self, name, throw, **args)
       
   209 
       
   210     tag_disallowed = ':\r\n'
       
   211 
       
   212     def _tag(self, names, node, message, local, user, date, extra={}):
       
   213         if isinstance(names, str):
       
   214             allchars = names
       
   215             names = (names,)
       
   216         else:
       
   217             allchars = ''.join(names)
       
   218         for c in self.tag_disallowed:
       
   219             if c in allchars:
       
   220                 raise util.Abort(_('%r cannot be used in a tag name') % c)
       
   221 
       
   222         branches = self.branchmap()
       
   223         for name in names:
       
   224             self.hook('pretag', throw=True, node=hex(node), tag=name,
       
   225                       local=local)
       
   226             if name in branches:
       
   227                 self.ui.warn(_("warning: tag %s conflicts with existing"
       
   228                 " branch name\n") % name)
       
   229 
       
   230         def writetags(fp, names, munge, prevtags):
       
   231             fp.seek(0, 2)
       
   232             if prevtags and prevtags[-1] != '\n':
       
   233                 fp.write('\n')
       
   234             for name in names:
       
   235                 m = munge and munge(name) or name
       
   236                 if self._tagtypes and name in self._tagtypes:
       
   237                     old = self._tags.get(name, nullid)
       
   238                     fp.write('%s %s\n' % (hex(old), m))
       
   239                 fp.write('%s %s\n' % (hex(node), m))
       
   240             fp.close()
       
   241 
       
   242         prevtags = ''
       
   243         if local:
       
   244             try:
       
   245                 fp = self.opener('localtags', 'r+')
       
   246             except IOError:
       
   247                 fp = self.opener('localtags', 'a')
       
   248             else:
       
   249                 prevtags = fp.read()
       
   250 
       
   251             # local tags are stored in the current charset
       
   252             writetags(fp, names, None, prevtags)
       
   253             for name in names:
       
   254                 self.hook('tag', node=hex(node), tag=name, local=local)
       
   255             return
       
   256 
       
   257         try:
       
   258             fp = self.wfile('.hgtags', 'rb+')
       
   259         except IOError:
       
   260             fp = self.wfile('.hgtags', 'ab')
       
   261         else:
       
   262             prevtags = fp.read()
       
   263 
       
   264         # committed tags are stored in UTF-8
       
   265         writetags(fp, names, encoding.fromlocal, prevtags)
       
   266 
       
   267         if '.hgtags' not in self.dirstate:
       
   268             self[None].add(['.hgtags'])
       
   269 
       
   270         m = matchmod.exact(self.root, '', ['.hgtags'])
       
   271         tagnode = self.commit(message, user, date, extra=extra, match=m)
       
   272 
       
   273         for name in names:
       
   274             self.hook('tag', node=hex(node), tag=name, local=local)
       
   275 
       
   276         return tagnode
       
   277 
       
   278     def tag(self, names, node, message, local, user, date):
       
   279         '''tag a revision with one or more symbolic names.
       
   280 
       
   281         names is a list of strings or, when adding a single tag, names may be a
       
   282         string.
       
   283 
       
   284         if local is True, the tags are stored in a per-repository file.
       
   285         otherwise, they are stored in the .hgtags file, and a new
       
   286         changeset is committed with the change.
       
   287 
       
   288         keyword arguments:
       
   289 
       
   290         local: whether to store tags in non-version-controlled file
       
   291         (default False)
       
   292 
       
   293         message: commit message to use if committing
       
   294 
       
   295         user: name of user to use if committing
       
   296 
       
   297         date: date tuple to use if committing'''
       
   298 
       
   299         if not local:
       
   300             for x in self.status()[:5]:
       
   301                 if '.hgtags' in x:
       
   302                     raise util.Abort(_('working copy of .hgtags is changed '
       
   303                                        '(please commit .hgtags manually)'))
       
   304 
       
   305         self.tags() # instantiate the cache
       
   306         self._tag(names, node, message, local, user, date)
       
   307 
       
   308     def tags(self):
       
   309         '''return a mapping of tag to node'''
       
   310         if self._tags is None:
       
   311             (self._tags, self._tagtypes) = self._findtags()
       
   312 
       
   313         return self._tags
       
   314 
       
   315     def _findtags(self):
       
   316         '''Do the hard work of finding tags.  Return a pair of dicts
       
   317         (tags, tagtypes) where tags maps tag name to node, and tagtypes
       
   318         maps tag name to a string like \'global\' or \'local\'.
       
   319         Subclasses or extensions are free to add their own tags, but
       
   320         should be aware that the returned dicts will be retained for the
       
   321         duration of the localrepo object.'''
       
   322 
       
   323         # XXX what tagtype should subclasses/extensions use?  Currently
       
   324         # mq and bookmarks add tags, but do not set the tagtype at all.
       
   325         # Should each extension invent its own tag type?  Should there
       
   326         # be one tagtype for all such "virtual" tags?  Or is the status
       
   327         # quo fine?
       
   328 
       
   329         alltags = {}                    # map tag name to (node, hist)
       
   330         tagtypes = {}
       
   331 
       
   332         tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
       
   333         tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
       
   334 
       
   335         # Build the return dicts.  Have to re-encode tag names because
       
   336         # the tags module always uses UTF-8 (in order not to lose info
       
   337         # writing to the cache), but the rest of Mercurial wants them in
       
   338         # local encoding.
       
   339         tags = {}
       
   340         for (name, (node, hist)) in alltags.iteritems():
       
   341             if node != nullid:
       
   342                 tags[encoding.tolocal(name)] = node
       
   343         tags['tip'] = self.changelog.tip()
       
   344         tagtypes = dict([(encoding.tolocal(name), value)
       
   345                          for (name, value) in tagtypes.iteritems()])
       
   346         return (tags, tagtypes)
       
   347 
       
   348     def tagtype(self, tagname):
       
   349         '''
       
   350         return the type of the given tag. result can be:
       
   351 
       
   352         'local'  : a local tag
       
   353         'global' : a global tag
       
   354         None     : tag does not exist
       
   355         '''
       
   356 
       
   357         self.tags()
       
   358 
       
   359         return self._tagtypes.get(tagname)
       
   360 
       
   361     def tagslist(self):
       
   362         '''return a list of tags ordered by revision'''
       
   363         l = []
       
   364         for t, n in self.tags().iteritems():
       
   365             try:
       
   366                 r = self.changelog.rev(n)
       
   367             except:
       
   368                 r = -2 # sort to the beginning of the list if unknown
       
   369             l.append((r, t, n))
       
   370         return [(t, n) for r, t, n in sorted(l)]
       
   371 
       
   372     def nodetags(self, node):
       
   373         '''return the tags associated with a node'''
       
   374         if not self.nodetagscache:
       
   375             self.nodetagscache = {}
       
   376             for t, n in self.tags().iteritems():
       
   377                 self.nodetagscache.setdefault(n, []).append(t)
       
   378             for tags in self.nodetagscache.itervalues():
       
   379                 tags.sort()
       
   380         return self.nodetagscache.get(node, [])
       
   381 
       
   382     def _branchtags(self, partial, lrev):
       
   383         # TODO: rename this function?
       
   384         tiprev = len(self) - 1
       
   385         if lrev != tiprev:
       
   386             ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
       
   387             self._updatebranchcache(partial, ctxgen)
       
   388             self._writebranchcache(partial, self.changelog.tip(), tiprev)
       
   389 
       
   390         return partial
       
   391 
       
   392     def updatebranchcache(self):
       
   393         tip = self.changelog.tip()
       
   394         if self._branchcache is not None and self._branchcachetip == tip:
       
   395             return self._branchcache
       
   396 
       
   397         oldtip = self._branchcachetip
       
   398         self._branchcachetip = tip
       
   399         if oldtip is None or oldtip not in self.changelog.nodemap:
       
   400             partial, last, lrev = self._readbranchcache()
       
   401         else:
       
   402             lrev = self.changelog.rev(oldtip)
       
   403             partial = self._branchcache
       
   404 
       
   405         self._branchtags(partial, lrev)
       
   406         # this private cache holds all heads (not just tips)
       
   407         self._branchcache = partial
       
   408 
       
   409     def branchmap(self):
       
   410         '''returns a dictionary {branch: [branchheads]}'''
       
   411         self.updatebranchcache()
       
   412         return self._branchcache
       
   413 
       
   414     def branchtags(self):
       
   415         '''return a dict where branch names map to the tipmost head of
       
   416         the branch, open heads come before closed'''
       
   417         bt = {}
       
   418         for bn, heads in self.branchmap().iteritems():
       
   419             tip = heads[-1]
       
   420             for h in reversed(heads):
       
   421                 if 'close' not in self.changelog.read(h)[5]:
       
   422                     tip = h
       
   423                     break
       
   424             bt[bn] = tip
       
   425         return bt
       
   426 
       
   427 
       
   428     def _readbranchcache(self):
       
   429         partial = {}
       
   430         try:
       
   431             f = self.opener("branchheads.cache")
       
   432             lines = f.read().split('\n')
       
   433             f.close()
       
   434         except (IOError, OSError):
       
   435             return {}, nullid, nullrev
       
   436 
       
   437         try:
       
   438             last, lrev = lines.pop(0).split(" ", 1)
       
   439             last, lrev = bin(last), int(lrev)
       
   440             if lrev >= len(self) or self[lrev].node() != last:
       
   441                 # invalidate the cache
       
   442                 raise ValueError('invalidating branch cache (tip differs)')
       
   443             for l in lines:
       
   444                 if not l:
       
   445                     continue
       
   446                 node, label = l.split(" ", 1)
       
   447                 partial.setdefault(label.strip(), []).append(bin(node))
       
   448         except KeyboardInterrupt:
       
   449             raise
       
   450         except Exception, inst:
       
   451             if self.ui.debugflag:
       
   452                 self.ui.warn(str(inst), '\n')
       
   453             partial, last, lrev = {}, nullid, nullrev
       
   454         return partial, last, lrev
       
   455 
       
   456     def _writebranchcache(self, branches, tip, tiprev):
       
   457         try:
       
   458             f = self.opener("branchheads.cache", "w", atomictemp=True)
       
   459             f.write("%s %s\n" % (hex(tip), tiprev))
       
   460             for label, nodes in branches.iteritems():
       
   461                 for node in nodes:
       
   462                     f.write("%s %s\n" % (hex(node), label))
       
   463             f.rename()
       
   464         except (IOError, OSError):
       
   465             pass
       
   466 
       
   467     def _updatebranchcache(self, partial, ctxgen):
       
   468         # collect new branch entries
       
   469         newbranches = {}
       
   470         for c in ctxgen:
       
   471             newbranches.setdefault(c.branch(), []).append(c.node())
       
   472         # if older branchheads are reachable from new ones, they aren't
       
   473         # really branchheads. Note checking parents is insufficient:
       
   474         # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
       
   475         for branch, newnodes in newbranches.iteritems():
       
   476             bheads = partial.setdefault(branch, [])
       
   477             bheads.extend(newnodes)
       
   478             if len(bheads) <= 1:
       
   479                 continue
       
   480             # starting from tip means fewer passes over reachable
       
   481             while newnodes:
       
   482                 latest = newnodes.pop()
       
   483                 if latest not in bheads:
       
   484                     continue
       
   485                 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
       
   486                 reachable = self.changelog.reachable(latest, minbhrev)
       
   487                 reachable.remove(latest)
       
   488                 bheads = [b for b in bheads if b not in reachable]
       
   489             partial[branch] = bheads
       
   490 
       
   491     def lookup(self, key):
       
   492         if isinstance(key, int):
       
   493             return self.changelog.node(key)
       
   494         elif key == '.':
       
   495             return self.dirstate.parents()[0]
       
   496         elif key == 'null':
       
   497             return nullid
       
   498         elif key == 'tip':
       
   499             return self.changelog.tip()
       
   500         n = self.changelog._match(key)
       
   501         if n:
       
   502             return n
       
   503         if key in self.tags():
       
   504             return self.tags()[key]
       
   505         if key in self.branchtags():
       
   506             return self.branchtags()[key]
       
   507         n = self.changelog._partialmatch(key)
       
   508         if n:
       
   509             return n
       
   510 
       
   511         # can't find key, check if it might have come from damaged dirstate
       
   512         if key in self.dirstate.parents():
       
   513             raise error.Abort(_("working directory has unknown parent '%s'!")
       
   514                               % short(key))
       
   515         try:
       
   516             if len(key) == 20:
       
   517                 key = hex(key)
       
   518         except:
       
   519             pass
       
   520         raise error.RepoLookupError(_("unknown revision '%s'") % key)
       
   521 
       
   522     def lookupbranch(self, key, remote=None):
       
   523         repo = remote or self
       
   524         if key in repo.branchmap():
       
   525             return key
       
   526 
       
   527         repo = (remote and remote.local()) and remote or self
       
   528         return repo[key].branch()
       
   529 
       
   530     def local(self):
       
   531         return True
       
   532 
       
   533     def join(self, f):
       
   534         return os.path.join(self.path, f)
       
   535 
       
   536     def wjoin(self, f):
       
   537         return os.path.join(self.root, f)
       
   538 
       
   539     def file(self, f):
       
   540         if f[0] == '/':
       
   541             f = f[1:]
       
   542         return filelog.filelog(self.sopener, f)
       
   543 
       
   544     def changectx(self, changeid):
       
   545         return self[changeid]
       
   546 
       
   547     def parents(self, changeid=None):
       
   548         '''get list of changectxs for parents of changeid'''
       
   549         return self[changeid].parents()
       
   550 
       
   551     def filectx(self, path, changeid=None, fileid=None):
       
   552         """changeid can be a changeset revision, node, or tag.
       
   553            fileid can be a file revision or node."""
       
   554         return context.filectx(self, path, changeid, fileid)
       
   555 
       
   556     def getcwd(self):
       
   557         return self.dirstate.getcwd()
       
   558 
       
   559     def pathto(self, f, cwd=None):
       
   560         return self.dirstate.pathto(f, cwd)
       
   561 
       
   562     def wfile(self, f, mode='r'):
       
   563         return self.wopener(f, mode)
       
   564 
       
   565     def _link(self, f):
       
   566         return os.path.islink(self.wjoin(f))
       
   567 
       
   568     def _loadfilter(self, filter):
       
   569         if filter not in self.filterpats:
       
   570             l = []
       
   571             for pat, cmd in self.ui.configitems(filter):
       
   572                 if cmd == '!':
       
   573                     continue
       
   574                 mf = matchmod.match(self.root, '', [pat])
       
   575                 fn = None
       
   576                 params = cmd
       
   577                 for name, filterfn in self._datafilters.iteritems():
       
   578                     if cmd.startswith(name):
       
   579                         fn = filterfn
       
   580                         params = cmd[len(name):].lstrip()
       
   581                         break
       
   582                 if not fn:
       
   583                     fn = lambda s, c, **kwargs: util.filter(s, c)
       
   584                 # Wrap old filters not supporting keyword arguments
       
   585                 if not inspect.getargspec(fn)[2]:
       
   586                     oldfn = fn
       
   587                     fn = lambda s, c, **kwargs: oldfn(s, c)
       
   588                 l.append((mf, fn, params))
       
   589             self.filterpats[filter] = l
       
   590         return self.filterpats[filter]
       
   591 
       
   592     def _filter(self, filterpats, filename, data):
       
   593         for mf, fn, cmd in filterpats:
       
   594             if mf(filename):
       
   595                 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
       
   596                 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
       
   597                 break
       
   598 
       
   599         return data
       
   600 
       
   601     @propertycache
       
   602     def _encodefilterpats(self):
       
   603         return self._loadfilter('encode')
       
   604 
       
   605     @propertycache
       
   606     def _decodefilterpats(self):
       
   607         return self._loadfilter('decode')
       
   608 
       
   609     def adddatafilter(self, name, filter):
       
   610         self._datafilters[name] = filter
       
   611 
       
   612     def wread(self, filename):
       
   613         if self._link(filename):
       
   614             data = os.readlink(self.wjoin(filename))
       
   615         else:
       
   616             data = self.wopener(filename, 'r').read()
       
   617         return self._filter(self._encodefilterpats, filename, data)
       
   618 
       
   619     def wwrite(self, filename, data, flags):
       
   620         data = self._filter(self._decodefilterpats, filename, data)
       
   621         try:
       
   622             os.unlink(self.wjoin(filename))
       
   623         except OSError:
       
   624             pass
       
   625         if 'l' in flags:
       
   626             self.wopener.symlink(data, filename)
       
   627         else:
       
   628             self.wopener(filename, 'w').write(data)
       
   629             if 'x' in flags:
       
   630                 util.set_flags(self.wjoin(filename), False, True)
       
   631 
       
   632     def wwritedata(self, filename, data):
       
   633         return self._filter(self._decodefilterpats, filename, data)
       
   634 
       
   635     def transaction(self, desc):
       
   636         tr = self._transref and self._transref() or None
       
   637         if tr and tr.running():
       
   638             return tr.nest()
       
   639 
       
   640         # abort here if the journal already exists
       
   641         if os.path.exists(self.sjoin("journal")):
       
   642             raise error.RepoError(
       
   643                 _("abandoned transaction found - run hg recover"))
       
   644 
       
   645         # save dirstate for rollback
       
   646         try:
       
   647             ds = self.opener("dirstate").read()
       
   648         except IOError:
       
   649             ds = ""
       
   650         self.opener("journal.dirstate", "w").write(ds)
       
   651         self.opener("journal.branch", "w").write(self.dirstate.branch())
       
   652         self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
       
   653 
       
   654         renames = [(self.sjoin("journal"), self.sjoin("undo")),
       
   655                    (self.join("journal.dirstate"), self.join("undo.dirstate")),
       
   656                    (self.join("journal.branch"), self.join("undo.branch")),
       
   657                    (self.join("journal.desc"), self.join("undo.desc"))]
       
   658         tr = transaction.transaction(self.ui.warn, self.sopener,
       
   659                                      self.sjoin("journal"),
       
   660                                      aftertrans(renames),
       
   661                                      self.store.createmode)
       
   662         self._transref = weakref.ref(tr)
       
   663         return tr
       
   664 
       
   665     def recover(self):
       
   666         lock = self.lock()
       
   667         try:
       
   668             if os.path.exists(self.sjoin("journal")):
       
   669                 self.ui.status(_("rolling back interrupted transaction\n"))
       
   670                 transaction.rollback(self.sopener, self.sjoin("journal"),
       
   671                                      self.ui.warn)
       
   672                 self.invalidate()
       
   673                 return True
       
   674             else:
       
   675                 self.ui.warn(_("no interrupted transaction available\n"))
       
   676                 return False
       
   677         finally:
       
   678             lock.release()
       
   679 
       
   680     def rollback(self, dryrun=False):
       
   681         wlock = lock = None
       
   682         try:
       
   683             wlock = self.wlock()
       
   684             lock = self.lock()
       
   685             if os.path.exists(self.sjoin("undo")):
       
   686                 try:
       
   687                     args = self.opener("undo.desc", "r").read().splitlines()
       
   688                     if len(args) >= 3 and self.ui.verbose:
       
   689                         desc = _("rolling back to revision %s"
       
   690                                  " (undo %s: %s)\n") % (
       
   691                                  int(args[0]) - 1, args[1], args[2])
       
   692                     elif len(args) >= 2:
       
   693                         desc = _("rolling back to revision %s (undo %s)\n") % (
       
   694                                  int(args[0]) - 1, args[1])
       
   695                 except IOError:
       
   696                     desc = _("rolling back unknown transaction\n")
       
   697                 self.ui.status(desc)
       
   698                 if dryrun:
       
   699                     return
       
   700                 transaction.rollback(self.sopener, self.sjoin("undo"),
       
   701                                      self.ui.warn)
       
   702                 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
       
   703                 try:
       
   704                     branch = self.opener("undo.branch").read()
       
   705                     self.dirstate.setbranch(branch)
       
   706                 except IOError:
       
   707                     self.ui.warn(_("Named branch could not be reset, "
       
   708                                    "current branch still is: %s\n")
       
   709                                  % encoding.tolocal(self.dirstate.branch()))
       
   710                 self.invalidate()
       
   711                 self.dirstate.invalidate()
       
   712                 self.destroyed()
       
   713             else:
       
   714                 self.ui.warn(_("no rollback information available\n"))
       
   715                 return 1
       
   716         finally:
       
   717             release(lock, wlock)
       
   718 
       
   719     def invalidatecaches(self):
       
   720         self._tags = None
       
   721         self._tagtypes = None
       
   722         self.nodetagscache = None
       
   723         self._branchcache = None # in UTF-8
       
   724         self._branchcachetip = None
       
   725 
       
   726     def invalidate(self):
       
   727         for a in "changelog manifest".split():
       
   728             if a in self.__dict__:
       
   729                 delattr(self, a)
       
   730         self.invalidatecaches()
       
   731 
       
   732     def _lock(self, lockname, wait, releasefn, acquirefn, desc):
       
   733         try:
       
   734             l = lock.lock(lockname, 0, releasefn, desc=desc)
       
   735         except error.LockHeld, inst:
       
   736             if not wait:
       
   737                 raise
       
   738             self.ui.warn(_("waiting for lock on %s held by %r\n") %
       
   739                          (desc, inst.locker))
       
   740             # default to 600 seconds timeout
       
   741             l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
       
   742                           releasefn, desc=desc)
       
   743         if acquirefn:
       
   744             acquirefn()
       
   745         return l
       
   746 
       
   747     def lock(self, wait=True):
       
   748         '''Lock the repository store (.hg/store) and return a weak reference
       
   749         to the lock. Use this before modifying the store (e.g. committing or
       
   750         stripping). If you are opening a transaction, get a lock as well.)'''
       
   751         l = self._lockref and self._lockref()
       
   752         if l is not None and l.held:
       
   753             l.lock()
       
   754             return l
       
   755 
       
   756         l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
       
   757                        _('repository %s') % self.origroot)
       
   758         self._lockref = weakref.ref(l)
       
   759         return l
       
   760 
       
   761     def wlock(self, wait=True):
       
   762         '''Lock the non-store parts of the repository (everything under
       
   763         .hg except .hg/store) and return a weak reference to the lock.
       
   764         Use this before modifying files in .hg.'''
       
   765         l = self._wlockref and self._wlockref()
       
   766         if l is not None and l.held:
       
   767             l.lock()
       
   768             return l
       
   769 
       
   770         l = self._lock(self.join("wlock"), wait, self.dirstate.write,
       
   771                        self.dirstate.invalidate, _('working directory of %s') %
       
   772                        self.origroot)
       
   773         self._wlockref = weakref.ref(l)
       
   774         return l
       
   775 
       
   776     def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
       
   777         """
       
   778         commit an individual file as part of a larger transaction
       
   779         """
       
   780 
       
   781         fname = fctx.path()
       
   782         text = fctx.data()
       
   783         flog = self.file(fname)
       
   784         fparent1 = manifest1.get(fname, nullid)
       
   785         fparent2 = fparent2o = manifest2.get(fname, nullid)
       
   786 
       
   787         meta = {}
       
   788         copy = fctx.renamed()
       
   789         if copy and copy[0] != fname:
       
   790             # Mark the new revision of this file as a copy of another
       
   791             # file.  This copy data will effectively act as a parent
       
   792             # of this new revision.  If this is a merge, the first
       
   793             # parent will be the nullid (meaning "look up the copy data")
       
   794             # and the second one will be the other parent.  For example:
       
   795             #
       
   796             # 0 --- 1 --- 3   rev1 changes file foo
       
   797             #   \       /     rev2 renames foo to bar and changes it
       
   798             #    \- 2 -/      rev3 should have bar with all changes and
       
   799             #                      should record that bar descends from
       
   800             #                      bar in rev2 and foo in rev1
       
   801             #
       
   802             # this allows this merge to succeed:
       
   803             #
       
   804             # 0 --- 1 --- 3   rev4 reverts the content change from rev2
       
   805             #   \       /     merging rev3 and rev4 should use bar@rev2
       
   806             #    \- 2 --- 4        as the merge base
       
   807             #
       
   808 
       
   809             cfname = copy[0]
       
   810             crev = manifest1.get(cfname)
       
   811             newfparent = fparent2
       
   812 
       
   813             if manifest2: # branch merge
       
   814                 if fparent2 == nullid or crev is None: # copied on remote side
       
   815                     if cfname in manifest2:
       
   816                         crev = manifest2[cfname]
       
   817                         newfparent = fparent1
       
   818 
       
   819             # find source in nearest ancestor if we've lost track
       
   820             if not crev:
       
   821                 self.ui.debug(" %s: searching for copy revision for %s\n" %
       
   822                               (fname, cfname))
       
   823                 for ancestor in self[None].ancestors():
       
   824                     if cfname in ancestor:
       
   825                         crev = ancestor[cfname].filenode()
       
   826                         break
       
   827 
       
   828             if crev:
       
   829                 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
       
   830                 meta["copy"] = cfname
       
   831                 meta["copyrev"] = hex(crev)
       
   832                 fparent1, fparent2 = nullid, newfparent
       
   833             else:
       
   834                 self.ui.warn(_("warning: can't find ancestor for '%s' "
       
   835                                "copied from '%s'!\n") % (fname, cfname))
       
   836 
       
   837         elif fparent2 != nullid:
       
   838             # is one parent an ancestor of the other?
       
   839             fparentancestor = flog.ancestor(fparent1, fparent2)
       
   840             if fparentancestor == fparent1:
       
   841                 fparent1, fparent2 = fparent2, nullid
       
   842             elif fparentancestor == fparent2:
       
   843                 fparent2 = nullid
       
   844 
       
   845         # is the file changed?
       
   846         if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
       
   847             changelist.append(fname)
       
   848             return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
       
   849 
       
   850         # are just the flags changed during merge?
       
   851         if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
       
   852             changelist.append(fname)
       
   853 
       
   854         return fparent1
       
   855 
       
   856     def commit(self, text="", user=None, date=None, match=None, force=False,
       
   857                editor=False, extra={}):
       
   858         """Add a new revision to current repository.
       
   859 
       
   860         Revision information is gathered from the working directory,
       
   861         match can be used to filter the committed files. If editor is
       
   862         supplied, it is called to get a commit message.
       
   863         """
       
   864 
       
   865         def fail(f, msg):
       
   866             raise util.Abort('%s: %s' % (f, msg))
       
   867 
       
   868         if not match:
       
   869             match = matchmod.always(self.root, '')
       
   870 
       
   871         if not force:
       
   872             vdirs = []
       
   873             match.dir = vdirs.append
       
   874             match.bad = fail
       
   875 
       
   876         wlock = self.wlock()
       
   877         try:
       
   878             wctx = self[None]
       
   879             merge = len(wctx.parents()) > 1
       
   880 
       
   881             if (not force and merge and match and
       
   882                 (match.files() or match.anypats())):
       
   883                 raise util.Abort(_('cannot partially commit a merge '
       
   884                                    '(do not specify files or patterns)'))
       
   885 
       
   886             changes = self.status(match=match, clean=force)
       
   887             if force:
       
   888                 changes[0].extend(changes[6]) # mq may commit unchanged files
       
   889 
       
   890             # check subrepos
       
   891             subs = []
       
   892             removedsubs = set()
       
   893             for p in wctx.parents():
       
   894                 removedsubs.update(s for s in p.substate if match(s))
       
   895             for s in wctx.substate:
       
   896                 removedsubs.discard(s)
       
   897                 if match(s) and wctx.sub(s).dirty():
       
   898                     subs.append(s)
       
   899             if (subs or removedsubs):
       
   900                 if (not match('.hgsub') and
       
   901                     '.hgsub' in (wctx.modified() + wctx.added())):
       
   902                     raise util.Abort(_("can't commit subrepos without .hgsub"))
       
   903                 if '.hgsubstate' not in changes[0]:
       
   904                     changes[0].insert(0, '.hgsubstate')
       
   905 
       
   906             # make sure all explicit patterns are matched
       
   907             if not force and match.files():
       
   908                 matched = set(changes[0] + changes[1] + changes[2])
       
   909 
       
   910                 for f in match.files():
       
   911                     if f == '.' or f in matched or f in wctx.substate:
       
   912                         continue
       
   913                     if f in changes[3]: # missing
       
   914                         fail(f, _('file not found!'))
       
   915                     if f in vdirs: # visited directory
       
   916                         d = f + '/'
       
   917                         for mf in matched:
       
   918                             if mf.startswith(d):
       
   919                                 break
       
   920                         else:
       
   921                             fail(f, _("no match under directory!"))
       
   922                     elif f not in self.dirstate:
       
   923                         fail(f, _("file not tracked!"))
       
   924 
       
   925             if (not force and not extra.get("close") and not merge
       
   926                 and not (changes[0] or changes[1] or changes[2])
       
   927                 and wctx.branch() == wctx.p1().branch()):
       
   928                 return None
       
   929 
       
   930             ms = mergemod.mergestate(self)
       
   931             for f in changes[0]:
       
   932                 if f in ms and ms[f] == 'u':
       
   933                     raise util.Abort(_("unresolved merge conflicts "
       
   934                                                     "(see hg resolve)"))
       
   935 
       
   936             cctx = context.workingctx(self, text, user, date, extra, changes)
       
   937             if editor:
       
   938                 cctx._text = editor(self, cctx, subs)
       
   939             edited = (text != cctx._text)
       
   940 
       
   941             # commit subs
       
   942             if subs or removedsubs:
       
   943                 state = wctx.substate.copy()
       
   944                 for s in sorted(subs):
       
   945                     sub = wctx.sub(s)
       
   946                     self.ui.status(_('committing subrepository %s\n') %
       
   947                         subrepo.subrelpath(sub))
       
   948                     sr = sub.commit(cctx._text, user, date)
       
   949                     state[s] = (state[s][0], sr)
       
   950                 subrepo.writestate(self, state)
       
   951 
       
   952             # Save commit message in case this transaction gets rolled back
       
   953             # (e.g. by a pretxncommit hook).  Leave the content alone on
       
   954             # the assumption that the user will use the same editor again.
       
   955             msgfile = self.opener('last-message.txt', 'wb')
       
   956             msgfile.write(cctx._text)
       
   957             msgfile.close()
       
   958 
       
   959             p1, p2 = self.dirstate.parents()
       
   960             hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
       
   961             try:
       
   962                 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
       
   963                 ret = self.commitctx(cctx, True)
       
   964             except:
       
   965                 if edited:
       
   966                     msgfn = self.pathto(msgfile.name[len(self.root)+1:])
       
   967                     self.ui.write(
       
   968                         _('note: commit message saved in %s\n') % msgfn)
       
   969                 raise
       
   970 
       
   971             # update dirstate and mergestate
       
   972             for f in changes[0] + changes[1]:
       
   973                 self.dirstate.normal(f)
       
   974             for f in changes[2]:
       
   975                 self.dirstate.forget(f)
       
   976             self.dirstate.setparents(ret)
       
   977             ms.reset()
       
   978         finally:
       
   979             wlock.release()
       
   980 
       
   981         self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
       
   982         return ret
       
   983 
       
   984     def commitctx(self, ctx, error=False):
       
   985         """Add a new revision to current repository.
       
   986         Revision information is passed via the context argument.
       
   987         """
       
   988 
       
   989         tr = lock = None
       
   990         removed = list(ctx.removed())
       
   991         p1, p2 = ctx.p1(), ctx.p2()
       
   992         m1 = p1.manifest().copy()
       
   993         m2 = p2.manifest()
       
   994         user = ctx.user()
       
   995 
       
   996         lock = self.lock()
       
   997         try:
       
   998             tr = self.transaction("commit")
       
   999             trp = weakref.proxy(tr)
       
  1000 
       
  1001             # check in files
       
  1002             new = {}
       
  1003             changed = []
       
  1004             linkrev = len(self)
       
  1005             for f in sorted(ctx.modified() + ctx.added()):
       
  1006                 self.ui.note(f + "\n")
       
  1007                 try:
       
  1008                     fctx = ctx[f]
       
  1009                     new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
       
  1010                                               changed)
       
  1011                     m1.set(f, fctx.flags())
       
  1012                 except OSError, inst:
       
  1013                     self.ui.warn(_("trouble committing %s!\n") % f)
       
  1014                     raise
       
  1015                 except IOError, inst:
       
  1016                     errcode = getattr(inst, 'errno', errno.ENOENT)
       
  1017                     if error or errcode and errcode != errno.ENOENT:
       
  1018                         self.ui.warn(_("trouble committing %s!\n") % f)
       
  1019                         raise
       
  1020                     else:
       
  1021                         removed.append(f)
       
  1022 
       
  1023             # update manifest
       
  1024             m1.update(new)
       
  1025             removed = [f for f in sorted(removed) if f in m1 or f in m2]
       
  1026             drop = [f for f in removed if f in m1]
       
  1027             for f in drop:
       
  1028                 del m1[f]
       
  1029             mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
       
  1030                                    p2.manifestnode(), (new, drop))
       
  1031 
       
  1032             # update changelog
       
  1033             self.changelog.delayupdate()
       
  1034             n = self.changelog.add(mn, changed + removed, ctx.description(),
       
  1035                                    trp, p1.node(), p2.node(),
       
  1036                                    user, ctx.date(), ctx.extra().copy())
       
  1037             p = lambda: self.changelog.writepending() and self.root or ""
       
  1038             xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
       
  1039             self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
       
  1040                       parent2=xp2, pending=p)
       
  1041             self.changelog.finalize(trp)
       
  1042             tr.close()
       
  1043 
       
  1044             if self._branchcache:
       
  1045                 self.updatebranchcache()
       
  1046             return n
       
  1047         finally:
       
  1048             if tr:
       
  1049                 tr.release()
       
  1050             lock.release()
       
  1051 
       
  1052     def destroyed(self):
       
  1053         '''Inform the repository that nodes have been destroyed.
       
  1054         Intended for use by strip and rollback, so there's a common
       
  1055         place for anything that has to be done after destroying history.'''
       
  1056         # XXX it might be nice if we could take the list of destroyed
       
  1057         # nodes, but I don't see an easy way for rollback() to do that
       
  1058 
       
  1059         # Ensure the persistent tag cache is updated.  Doing it now
       
  1060         # means that the tag cache only has to worry about destroyed
       
  1061         # heads immediately after a strip/rollback.  That in turn
       
  1062         # guarantees that "cachetip == currenttip" (comparing both rev
       
  1063         # and node) always means no nodes have been added or destroyed.
       
  1064 
       
  1065         # XXX this is suboptimal when qrefresh'ing: we strip the current
       
  1066         # head, refresh the tag cache, then immediately add a new head.
       
  1067         # But I think doing it this way is necessary for the "instant
       
  1068         # tag cache retrieval" case to work.
       
  1069         self.invalidatecaches()
       
  1070 
       
  1071     def walk(self, match, node=None):
       
  1072         '''
       
  1073         walk recursively through the directory tree or a given
       
  1074         changeset, finding all files matched by the match
       
  1075         function
       
  1076         '''
       
  1077         return self[node].walk(match)
       
  1078 
       
  1079     def status(self, node1='.', node2=None, match=None,
       
  1080                ignored=False, clean=False, unknown=False,
       
  1081                listsubrepos=False):
       
  1082         """return status of files between two nodes or node and working directory
       
  1083 
       
  1084         If node1 is None, use the first dirstate parent instead.
       
  1085         If node2 is None, compare node1 with working directory.
       
  1086         """
       
  1087 
       
  1088         def mfmatches(ctx):
       
  1089             mf = ctx.manifest().copy()
       
  1090             for fn in mf.keys():
       
  1091                 if not match(fn):
       
  1092                     del mf[fn]
       
  1093             return mf
       
  1094 
       
  1095         if isinstance(node1, context.changectx):
       
  1096             ctx1 = node1
       
  1097         else:
       
  1098             ctx1 = self[node1]
       
  1099         if isinstance(node2, context.changectx):
       
  1100             ctx2 = node2
       
  1101         else:
       
  1102             ctx2 = self[node2]
       
  1103 
       
  1104         working = ctx2.rev() is None
       
  1105         parentworking = working and ctx1 == self['.']
       
  1106         match = match or matchmod.always(self.root, self.getcwd())
       
  1107         listignored, listclean, listunknown = ignored, clean, unknown
       
  1108 
       
  1109         # load earliest manifest first for caching reasons
       
  1110         if not working and ctx2.rev() < ctx1.rev():
       
  1111             ctx2.manifest()
       
  1112 
       
  1113         if not parentworking:
       
  1114             def bad(f, msg):
       
  1115                 if f not in ctx1:
       
  1116                     self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
       
  1117             match.bad = bad
       
  1118 
       
  1119         if working: # we need to scan the working dir
       
  1120             subrepos = []
       
  1121             if '.hgsub' in self.dirstate:
       
  1122                 subrepos = ctx1.substate.keys()
       
  1123             s = self.dirstate.status(match, subrepos, listignored,
       
  1124                                      listclean, listunknown)
       
  1125             cmp, modified, added, removed, deleted, unknown, ignored, clean = s
       
  1126 
       
  1127             # check for any possibly clean files
       
  1128             if parentworking and cmp:
       
  1129                 fixup = []
       
  1130                 # do a full compare of any files that might have changed
       
  1131                 for f in sorted(cmp):
       
  1132                     if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
       
  1133                         or ctx1[f].cmp(ctx2[f])):
       
  1134                         modified.append(f)
       
  1135                     else:
       
  1136                         fixup.append(f)
       
  1137 
       
  1138                 # update dirstate for files that are actually clean
       
  1139                 if fixup:
       
  1140                     if listclean:
       
  1141                         clean += fixup
       
  1142 
       
  1143                     try:
       
  1144                         # updating the dirstate is optional
       
  1145                         # so we don't wait on the lock
       
  1146                         wlock = self.wlock(False)
       
  1147                         try:
       
  1148                             for f in fixup:
       
  1149                                 self.dirstate.normal(f)
       
  1150                         finally:
       
  1151                             wlock.release()
       
  1152                     except error.LockError:
       
  1153                         pass
       
  1154 
       
  1155         if not parentworking:
       
  1156             mf1 = mfmatches(ctx1)
       
  1157             if working:
       
  1158                 # we are comparing working dir against non-parent
       
  1159                 # generate a pseudo-manifest for the working dir
       
  1160                 mf2 = mfmatches(self['.'])
       
  1161                 for f in cmp + modified + added:
       
  1162                     mf2[f] = None
       
  1163                     mf2.set(f, ctx2.flags(f))
       
  1164                 for f in removed:
       
  1165                     if f in mf2:
       
  1166                         del mf2[f]
       
  1167             else:
       
  1168                 # we are comparing two revisions
       
  1169                 deleted, unknown, ignored = [], [], []
       
  1170                 mf2 = mfmatches(ctx2)
       
  1171 
       
  1172             modified, added, clean = [], [], []
       
  1173             for fn in mf2:
       
  1174                 if fn in mf1:
       
  1175                     if (mf1.flags(fn) != mf2.flags(fn) or
       
  1176                         (mf1[fn] != mf2[fn] and
       
  1177                          (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
       
  1178                         modified.append(fn)
       
  1179                     elif listclean:
       
  1180                         clean.append(fn)
       
  1181                     del mf1[fn]
       
  1182                 else:
       
  1183                     added.append(fn)
       
  1184             removed = mf1.keys()
       
  1185 
       
  1186         r = modified, added, removed, deleted, unknown, ignored, clean
       
  1187 
       
  1188         if listsubrepos:
       
  1189             for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
       
  1190                 if working:
       
  1191                     rev2 = None
       
  1192                 else:
       
  1193                     rev2 = ctx2.substate[subpath][1]
       
  1194                 try:
       
  1195                     submatch = matchmod.narrowmatcher(subpath, match)
       
  1196                     s = sub.status(rev2, match=submatch, ignored=listignored,
       
  1197                                    clean=listclean, unknown=listunknown,
       
  1198                                    listsubrepos=True)
       
  1199                     for rfiles, sfiles in zip(r, s):
       
  1200                         rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
       
  1201                 except error.LookupError:
       
  1202                     self.ui.status(_("skipping missing subrepository: %s\n")
       
  1203                                    % subpath)
       
  1204 
       
  1205         [l.sort() for l in r]
       
  1206         return r
       
  1207 
       
  1208     def heads(self, start=None):
       
  1209         heads = self.changelog.heads(start)
       
  1210         # sort the output in rev descending order
       
  1211         heads = [(-self.changelog.rev(h), h) for h in heads]
       
  1212         return [n for (r, n) in sorted(heads)]
       
  1213 
       
  1214     def branchheads(self, branch=None, start=None, closed=False):
       
  1215         '''return a (possibly filtered) list of heads for the given branch
       
  1216 
       
  1217         Heads are returned in topological order, from newest to oldest.
       
  1218         If branch is None, use the dirstate branch.
       
  1219         If start is not None, return only heads reachable from start.
       
  1220         If closed is True, return heads that are marked as closed as well.
       
  1221         '''
       
  1222         if branch is None:
       
  1223             branch = self[None].branch()
       
  1224         branches = self.branchmap()
       
  1225         if branch not in branches:
       
  1226             return []
       
  1227         # the cache returns heads ordered lowest to highest
       
  1228         bheads = list(reversed(branches[branch]))
       
  1229         if start is not None:
       
  1230             # filter out the heads that cannot be reached from startrev
       
  1231             fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
       
  1232             bheads = [h for h in bheads if h in fbheads]
       
  1233         if not closed:
       
  1234             bheads = [h for h in bheads if
       
  1235                       ('close' not in self.changelog.read(h)[5])]
       
  1236         return bheads
       
  1237 
       
  1238     def branches(self, nodes):
       
  1239         if not nodes:
       
  1240             nodes = [self.changelog.tip()]
       
  1241         b = []
       
  1242         for n in nodes:
       
  1243             t = n
       
  1244             while 1:
       
  1245                 p = self.changelog.parents(n)
       
  1246                 if p[1] != nullid or p[0] == nullid:
       
  1247                     b.append((t, n, p[0], p[1]))
       
  1248                     break
       
  1249                 n = p[0]
       
  1250         return b
       
  1251 
       
  1252     def between(self, pairs):
       
  1253         r = []
       
  1254 
       
  1255         for top, bottom in pairs:
       
  1256             n, l, i = top, [], 0
       
  1257             f = 1
       
  1258 
       
  1259             while n != bottom and n != nullid:
       
  1260                 p = self.changelog.parents(n)[0]
       
  1261                 if i == f:
       
  1262                     l.append(n)
       
  1263                     f = f * 2
       
  1264                 n = p
       
  1265                 i += 1
       
  1266 
       
  1267             r.append(l)
       
  1268 
       
  1269         return r
       
  1270 
       
  1271     def pull(self, remote, heads=None, force=False):
       
  1272         lock = self.lock()
       
  1273         try:
       
  1274             tmp = discovery.findcommonincoming(self, remote, heads=heads,
       
  1275                                                force=force)
       
  1276             common, fetch, rheads = tmp
       
  1277             if not fetch:
       
  1278                 self.ui.status(_("no changes found\n"))
       
  1279                 return 0
       
  1280 
       
  1281             if heads is None and fetch == [nullid]:
       
  1282                 self.ui.status(_("requesting all changes\n"))
       
  1283             elif heads is None and remote.capable('changegroupsubset'):
       
  1284                 # issue1320, avoid a race if remote changed after discovery
       
  1285                 heads = rheads
       
  1286 
       
  1287             if heads is None:
       
  1288                 cg = remote.changegroup(fetch, 'pull')
       
  1289             else:
       
  1290                 if not remote.capable('changegroupsubset'):
       
  1291                     raise util.Abort(_("partial pull cannot be done because "
       
  1292                                        "other repository doesn't support "
       
  1293                                        "changegroupsubset."))
       
  1294                 cg = remote.changegroupsubset(fetch, heads, 'pull')
       
  1295             return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
       
  1296         finally:
       
  1297             lock.release()
       
  1298 
       
  1299     def push(self, remote, force=False, revs=None, newbranch=False):
       
  1300         '''Push outgoing changesets (limited by revs) from the current
       
  1301         repository to remote. Return an integer:
       
  1302           - 0 means HTTP error *or* nothing to push
       
  1303           - 1 means we pushed and remote head count is unchanged *or*
       
  1304             we have outgoing changesets but refused to push
       
  1305           - other values as described by addchangegroup()
       
  1306         '''
       
  1307         # there are two ways to push to remote repo:
       
  1308         #
       
  1309         # addchangegroup assumes local user can lock remote
       
  1310         # repo (local filesystem, old ssh servers).
       
  1311         #
       
  1312         # unbundle assumes local user cannot lock remote repo (new ssh
       
  1313         # servers, http servers).
       
  1314 
       
  1315         lock = None
       
  1316         unbundle = remote.capable('unbundle')
       
  1317         if not unbundle:
       
  1318             lock = remote.lock()
       
  1319         try:
       
  1320             ret = discovery.prepush(self, remote, force, revs, newbranch)
       
  1321             if ret[0] is None:
       
  1322                 # and here we return 0 for "nothing to push" or 1 for
       
  1323                 # "something to push but I refuse"
       
  1324                 return ret[1]
       
  1325 
       
  1326             cg, remote_heads = ret
       
  1327             if unbundle:
       
  1328                 # local repo finds heads on server, finds out what revs it must
       
  1329                 # push.  once revs transferred, if server finds it has
       
  1330                 # different heads (someone else won commit/push race), server
       
  1331                 # aborts.
       
  1332                 if force:
       
  1333                     remote_heads = ['force']
       
  1334                 # ssh: return remote's addchangegroup()
       
  1335                 # http: return remote's addchangegroup() or 0 for error
       
  1336                 return remote.unbundle(cg, remote_heads, 'push')
       
  1337             else:
       
  1338                 # we return an integer indicating remote head count change
       
  1339                 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
       
  1340         finally:
       
  1341             if lock is not None:
       
  1342                 lock.release()
       
  1343 
       
  1344     def changegroupinfo(self, nodes, source):
       
  1345         if self.ui.verbose or source == 'bundle':
       
  1346             self.ui.status(_("%d changesets found\n") % len(nodes))
       
  1347         if self.ui.debugflag:
       
  1348             self.ui.debug("list of changesets:\n")
       
  1349             for node in nodes:
       
  1350                 self.ui.debug("%s\n" % hex(node))
       
  1351 
       
  1352     def changegroupsubset(self, bases, heads, source, extranodes=None):
       
  1353         """Compute a changegroup consisting of all the nodes that are
       
  1354         descendents of any of the bases and ancestors of any of the heads.
       
  1355         Return a chunkbuffer object whose read() method will return
       
  1356         successive changegroup chunks.
       
  1357 
       
  1358         It is fairly complex as determining which filenodes and which
       
  1359         manifest nodes need to be included for the changeset to be complete
       
  1360         is non-trivial.
       
  1361 
       
  1362         Another wrinkle is doing the reverse, figuring out which changeset in
       
  1363         the changegroup a particular filenode or manifestnode belongs to.
       
  1364 
       
  1365         The caller can specify some nodes that must be included in the
       
  1366         changegroup using the extranodes argument.  It should be a dict
       
  1367         where the keys are the filenames (or 1 for the manifest), and the
       
  1368         values are lists of (node, linknode) tuples, where node is a wanted
       
  1369         node and linknode is the changelog node that should be transmitted as
       
  1370         the linkrev.
       
  1371         """
       
  1372 
       
  1373         # Set up some initial variables
       
  1374         # Make it easy to refer to self.changelog
       
  1375         cl = self.changelog
       
  1376         # Compute the list of changesets in this changegroup.
       
  1377         # Some bases may turn out to be superfluous, and some heads may be
       
  1378         # too.  nodesbetween will return the minimal set of bases and heads
       
  1379         # necessary to re-create the changegroup.
       
  1380         if not bases:
       
  1381             bases = [nullid]
       
  1382         msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
       
  1383 
       
  1384         if extranodes is None:
       
  1385             # can we go through the fast path ?
       
  1386             heads.sort()
       
  1387             allheads = self.heads()
       
  1388             allheads.sort()
       
  1389             if heads == allheads:
       
  1390                 return self._changegroup(msng_cl_lst, source)
       
  1391 
       
  1392         # slow path
       
  1393         self.hook('preoutgoing', throw=True, source=source)
       
  1394 
       
  1395         self.changegroupinfo(msng_cl_lst, source)
       
  1396 
       
  1397         # We assume that all ancestors of bases are known
       
  1398         commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
       
  1399 
       
  1400         # Make it easy to refer to self.manifest
       
  1401         mnfst = self.manifest
       
  1402         # We don't know which manifests are missing yet
       
  1403         msng_mnfst_set = {}
       
  1404         # Nor do we know which filenodes are missing.
       
  1405         msng_filenode_set = {}
       
  1406 
       
  1407         junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
       
  1408         junk = None
       
  1409 
       
  1410         # A changeset always belongs to itself, so the changenode lookup
       
  1411         # function for a changenode is identity.
       
  1412         def identity(x):
       
  1413             return x
       
  1414 
       
  1415         # A function generating function that sets up the initial environment
       
  1416         # the inner function.
       
  1417         def filenode_collector(changedfiles):
       
  1418             # This gathers information from each manifestnode included in the
       
  1419             # changegroup about which filenodes the manifest node references
       
  1420             # so we can include those in the changegroup too.
       
  1421             #
       
  1422             # It also remembers which changenode each filenode belongs to.  It
       
  1423             # does this by assuming the a filenode belongs to the changenode
       
  1424             # the first manifest that references it belongs to.
       
  1425             def collect_msng_filenodes(mnfstnode):
       
  1426                 r = mnfst.rev(mnfstnode)
       
  1427                 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
       
  1428                     # If the previous rev is one of the parents,
       
  1429                     # we only need to see a diff.
       
  1430                     deltamf = mnfst.readdelta(mnfstnode)
       
  1431                     # For each line in the delta
       
  1432                     for f, fnode in deltamf.iteritems():
       
  1433                         # And if the file is in the list of files we care
       
  1434                         # about.
       
  1435                         if f in changedfiles:
       
  1436                             # Get the changenode this manifest belongs to
       
  1437                             clnode = msng_mnfst_set[mnfstnode]
       
  1438                             # Create the set of filenodes for the file if
       
  1439                             # there isn't one already.
       
  1440                             ndset = msng_filenode_set.setdefault(f, {})
       
  1441                             # And set the filenode's changelog node to the
       
  1442                             # manifest's if it hasn't been set already.
       
  1443                             ndset.setdefault(fnode, clnode)
       
  1444                 else:
       
  1445                     # Otherwise we need a full manifest.
       
  1446                     m = mnfst.read(mnfstnode)
       
  1447                     # For every file in we care about.
       
  1448                     for f in changedfiles:
       
  1449                         fnode = m.get(f, None)
       
  1450                         # If it's in the manifest
       
  1451                         if fnode is not None:
       
  1452                             # See comments above.
       
  1453                             clnode = msng_mnfst_set[mnfstnode]
       
  1454                             ndset = msng_filenode_set.setdefault(f, {})
       
  1455                             ndset.setdefault(fnode, clnode)
       
  1456             return collect_msng_filenodes
       
  1457 
       
  1458         # If we determine that a particular file or manifest node must be a
       
  1459         # node that the recipient of the changegroup will already have, we can
       
  1460         # also assume the recipient will have all the parents.  This function
       
  1461         # prunes them from the set of missing nodes.
       
  1462         def prune(revlog, missingnodes):
       
  1463             hasset = set()
       
  1464             # If a 'missing' filenode thinks it belongs to a changenode we
       
  1465             # assume the recipient must have, then the recipient must have
       
  1466             # that filenode.
       
  1467             for n in missingnodes:
       
  1468                 clrev = revlog.linkrev(revlog.rev(n))
       
  1469                 if clrev in commonrevs:
       
  1470                     hasset.add(n)
       
  1471             for n in hasset:
       
  1472                 missingnodes.pop(n, None)
       
  1473             for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
       
  1474                 missingnodes.pop(revlog.node(r), None)
       
  1475 
       
  1476         # Add the nodes that were explicitly requested.
       
  1477         def add_extra_nodes(name, nodes):
       
  1478             if not extranodes or name not in extranodes:
       
  1479                 return
       
  1480 
       
  1481             for node, linknode in extranodes[name]:
       
  1482                 if node not in nodes:
       
  1483                     nodes[node] = linknode
       
  1484 
       
  1485         # Now that we have all theses utility functions to help out and
       
  1486         # logically divide up the task, generate the group.
       
  1487         def gengroup():
       
  1488             # The set of changed files starts empty.
       
  1489             changedfiles = set()
       
  1490             collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
       
  1491 
       
  1492             # Create a changenode group generator that will call our functions
       
  1493             # back to lookup the owning changenode and collect information.
       
  1494             group = cl.group(msng_cl_lst, identity, collect)
       
  1495             for cnt, chnk in enumerate(group):
       
  1496                 yield chnk
       
  1497                 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
       
  1498             self.ui.progress(_('bundling changes'), None)
       
  1499 
       
  1500             prune(mnfst, msng_mnfst_set)
       
  1501             add_extra_nodes(1, msng_mnfst_set)
       
  1502             msng_mnfst_lst = msng_mnfst_set.keys()
       
  1503             # Sort the manifestnodes by revision number.
       
  1504             msng_mnfst_lst.sort(key=mnfst.rev)
       
  1505             # Create a generator for the manifestnodes that calls our lookup
       
  1506             # and data collection functions back.
       
  1507             group = mnfst.group(msng_mnfst_lst,
       
  1508                                 lambda mnode: msng_mnfst_set[mnode],
       
  1509                                 filenode_collector(changedfiles))
       
  1510             for cnt, chnk in enumerate(group):
       
  1511                 yield chnk
       
  1512                 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
       
  1513             self.ui.progress(_('bundling manifests'), None)
       
  1514 
       
  1515             # These are no longer needed, dereference and toss the memory for
       
  1516             # them.
       
  1517             msng_mnfst_lst = None
       
  1518             msng_mnfst_set.clear()
       
  1519 
       
  1520             if extranodes:
       
  1521                 for fname in extranodes:
       
  1522                     if isinstance(fname, int):
       
  1523                         continue
       
  1524                     msng_filenode_set.setdefault(fname, {})
       
  1525                     changedfiles.add(fname)
       
  1526             # Go through all our files in order sorted by name.
       
  1527             cnt = 0
       
  1528             for fname in sorted(changedfiles):
       
  1529                 filerevlog = self.file(fname)
       
  1530                 if not len(filerevlog):
       
  1531                     raise util.Abort(_("empty or missing revlog for %s") % fname)
       
  1532                 # Toss out the filenodes that the recipient isn't really
       
  1533                 # missing.
       
  1534                 missingfnodes = msng_filenode_set.pop(fname, {})
       
  1535                 prune(filerevlog, missingfnodes)
       
  1536                 add_extra_nodes(fname, missingfnodes)
       
  1537                 # If any filenodes are left, generate the group for them,
       
  1538                 # otherwise don't bother.
       
  1539                 if missingfnodes:
       
  1540                     yield changegroup.chunkheader(len(fname))
       
  1541                     yield fname
       
  1542                     # Sort the filenodes by their revision # (topological order)
       
  1543                     nodeiter = list(missingfnodes)
       
  1544                     nodeiter.sort(key=filerevlog.rev)
       
  1545                     # Create a group generator and only pass in a changenode
       
  1546                     # lookup function as we need to collect no information
       
  1547                     # from filenodes.
       
  1548                     group = filerevlog.group(nodeiter,
       
  1549                                              lambda fnode: missingfnodes[fnode])
       
  1550                     for chnk in group:
       
  1551                         self.ui.progress(
       
  1552                             _('bundling files'), cnt, item=fname, unit=_('chunks'))
       
  1553                         cnt += 1
       
  1554                         yield chnk
       
  1555             # Signal that no more groups are left.
       
  1556             yield changegroup.closechunk()
       
  1557             self.ui.progress(_('bundling files'), None)
       
  1558 
       
  1559             if msng_cl_lst:
       
  1560                 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
       
  1561 
       
  1562         return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
       
  1563 
       
  1564     def changegroup(self, basenodes, source):
       
  1565         # to avoid a race we use changegroupsubset() (issue1320)
       
  1566         return self.changegroupsubset(basenodes, self.heads(), source)
       
  1567 
       
  1568     def _changegroup(self, nodes, source):
       
  1569         """Compute the changegroup of all nodes that we have that a recipient
       
  1570         doesn't.  Return a chunkbuffer object whose read() method will return
       
  1571         successive changegroup chunks.
       
  1572 
       
  1573         This is much easier than the previous function as we can assume that
       
  1574         the recipient has any changenode we aren't sending them.
       
  1575 
       
  1576         nodes is the set of nodes to send"""
       
  1577 
       
  1578         self.hook('preoutgoing', throw=True, source=source)
       
  1579 
       
  1580         cl = self.changelog
       
  1581         revset = set([cl.rev(n) for n in nodes])
       
  1582         self.changegroupinfo(nodes, source)
       
  1583 
       
  1584         def identity(x):
       
  1585             return x
       
  1586 
       
  1587         def gennodelst(log):
       
  1588             for r in log:
       
  1589                 if log.linkrev(r) in revset:
       
  1590                     yield log.node(r)
       
  1591 
       
  1592         def lookuplinkrev_func(revlog):
       
  1593             def lookuplinkrev(n):
       
  1594                 return cl.node(revlog.linkrev(revlog.rev(n)))
       
  1595             return lookuplinkrev
       
  1596 
       
  1597         def gengroup():
       
  1598             '''yield a sequence of changegroup chunks (strings)'''
       
  1599             # construct a list of all changed files
       
  1600             changedfiles = set()
       
  1601             mmfs = {}
       
  1602             collect = changegroup.collector(cl, mmfs, changedfiles)
       
  1603 
       
  1604             for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
       
  1605                 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
       
  1606                 yield chnk
       
  1607             self.ui.progress(_('bundling changes'), None)
       
  1608 
       
  1609             mnfst = self.manifest
       
  1610             nodeiter = gennodelst(mnfst)
       
  1611             for cnt, chnk in enumerate(mnfst.group(nodeiter,
       
  1612                                                    lookuplinkrev_func(mnfst))):
       
  1613                 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
       
  1614                 yield chnk
       
  1615             self.ui.progress(_('bundling manifests'), None)
       
  1616 
       
  1617             cnt = 0
       
  1618             for fname in sorted(changedfiles):
       
  1619                 filerevlog = self.file(fname)
       
  1620                 if not len(filerevlog):
       
  1621                     raise util.Abort(_("empty or missing revlog for %s") % fname)
       
  1622                 nodeiter = gennodelst(filerevlog)
       
  1623                 nodeiter = list(nodeiter)
       
  1624                 if nodeiter:
       
  1625                     yield changegroup.chunkheader(len(fname))
       
  1626                     yield fname
       
  1627                     lookup = lookuplinkrev_func(filerevlog)
       
  1628                     for chnk in filerevlog.group(nodeiter, lookup):
       
  1629                         self.ui.progress(
       
  1630                             _('bundling files'), cnt, item=fname, unit=_('chunks'))
       
  1631                         cnt += 1
       
  1632                         yield chnk
       
  1633             self.ui.progress(_('bundling files'), None)
       
  1634 
       
  1635             yield changegroup.closechunk()
       
  1636 
       
  1637             if nodes:
       
  1638                 self.hook('outgoing', node=hex(nodes[0]), source=source)
       
  1639 
       
  1640         return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
       
  1641 
       
  1642     def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
       
  1643         """Add the changegroup returned by source.read() to this repo.
       
  1644         srctype is a string like 'push', 'pull', or 'unbundle'.  url is
       
  1645         the URL of the repo where this changegroup is coming from.
       
  1646 
       
  1647         Return an integer summarizing the change to this repo:
       
  1648         - nothing changed or no source: 0
       
  1649         - more heads than before: 1+added heads (2..n)
       
  1650         - fewer heads than before: -1-removed heads (-2..-n)
       
  1651         - number of heads stays the same: 1
       
  1652         """
       
  1653         def csmap(x):
       
  1654             self.ui.debug("add changeset %s\n" % short(x))
       
  1655             return len(cl)
       
  1656 
       
  1657         def revmap(x):
       
  1658             return cl.rev(x)
       
  1659 
       
  1660         if not source:
       
  1661             return 0
       
  1662 
       
  1663         self.hook('prechangegroup', throw=True, source=srctype, url=url)
       
  1664 
       
  1665         changesets = files = revisions = 0
       
  1666         efiles = set()
       
  1667 
       
  1668         # write changelog data to temp files so concurrent readers will not see
       
  1669         # inconsistent view
       
  1670         cl = self.changelog
       
  1671         cl.delayupdate()
       
  1672         oldheads = len(cl.heads())
       
  1673 
       
  1674         tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
       
  1675         try:
       
  1676             trp = weakref.proxy(tr)
       
  1677             # pull off the changeset group
       
  1678             self.ui.status(_("adding changesets\n"))
       
  1679             clstart = len(cl)
       
  1680             class prog(object):
       
  1681                 step = _('changesets')
       
  1682                 count = 1
       
  1683                 ui = self.ui
       
  1684                 total = None
       
  1685                 def __call__(self):
       
  1686                     self.ui.progress(self.step, self.count, unit=_('chunks'),
       
  1687                                      total=self.total)
       
  1688                     self.count += 1
       
  1689             pr = prog()
       
  1690             source.callback = pr
       
  1691 
       
  1692             if (cl.addgroup(source, csmap, trp) is None
       
  1693                 and not emptyok):
       
  1694                 raise util.Abort(_("received changelog group is empty"))
       
  1695             clend = len(cl)
       
  1696             changesets = clend - clstart
       
  1697             for c in xrange(clstart, clend):
       
  1698                 efiles.update(self[c].files())
       
  1699             efiles = len(efiles)
       
  1700             self.ui.progress(_('changesets'), None)
       
  1701 
       
  1702             # pull off the manifest group
       
  1703             self.ui.status(_("adding manifests\n"))
       
  1704             pr.step = _('manifests')
       
  1705             pr.count = 1
       
  1706             pr.total = changesets # manifests <= changesets
       
  1707             # no need to check for empty manifest group here:
       
  1708             # if the result of the merge of 1 and 2 is the same in 3 and 4,
       
  1709             # no new manifest will be created and the manifest group will
       
  1710             # be empty during the pull
       
  1711             self.manifest.addgroup(source, revmap, trp)
       
  1712             self.ui.progress(_('manifests'), None)
       
  1713 
       
  1714             needfiles = {}
       
  1715             if self.ui.configbool('server', 'validate', default=False):
       
  1716                 # validate incoming csets have their manifests
       
  1717                 for cset in xrange(clstart, clend):
       
  1718                     mfest = self.changelog.read(self.changelog.node(cset))[0]
       
  1719                     mfest = self.manifest.readdelta(mfest)
       
  1720                     # store file nodes we must see
       
  1721                     for f, n in mfest.iteritems():
       
  1722                         needfiles.setdefault(f, set()).add(n)
       
  1723 
       
  1724             # process the files
       
  1725             self.ui.status(_("adding file changes\n"))
       
  1726             pr.step = 'files'
       
  1727             pr.count = 1
       
  1728             pr.total = efiles
       
  1729             source.callback = None
       
  1730 
       
  1731             while 1:
       
  1732                 f = source.chunk()
       
  1733                 if not f:
       
  1734                     break
       
  1735                 self.ui.debug("adding %s revisions\n" % f)
       
  1736                 pr()
       
  1737                 fl = self.file(f)
       
  1738                 o = len(fl)
       
  1739                 if fl.addgroup(source, revmap, trp) is None:
       
  1740                     raise util.Abort(_("received file revlog group is empty"))
       
  1741                 revisions += len(fl) - o
       
  1742                 files += 1
       
  1743                 if f in needfiles:
       
  1744                     needs = needfiles[f]
       
  1745                     for new in xrange(o, len(fl)):
       
  1746                         n = fl.node(new)
       
  1747                         if n in needs:
       
  1748                             needs.remove(n)
       
  1749                     if not needs:
       
  1750                         del needfiles[f]
       
  1751             self.ui.progress(_('files'), None)
       
  1752 
       
  1753             for f, needs in needfiles.iteritems():
       
  1754                 fl = self.file(f)
       
  1755                 for n in needs:
       
  1756                     try:
       
  1757                         fl.rev(n)
       
  1758                     except error.LookupError:
       
  1759                         raise util.Abort(
       
  1760                             _('missing file data for %s:%s - run hg verify') %
       
  1761                             (f, hex(n)))
       
  1762 
       
  1763             newheads = len(cl.heads())
       
  1764             heads = ""
       
  1765             if oldheads and newheads != oldheads:
       
  1766                 heads = _(" (%+d heads)") % (newheads - oldheads)
       
  1767 
       
  1768             self.ui.status(_("added %d changesets"
       
  1769                              " with %d changes to %d files%s\n")
       
  1770                              % (changesets, revisions, files, heads))
       
  1771 
       
  1772             if changesets > 0:
       
  1773                 p = lambda: cl.writepending() and self.root or ""
       
  1774                 self.hook('pretxnchangegroup', throw=True,
       
  1775                           node=hex(cl.node(clstart)), source=srctype,
       
  1776                           url=url, pending=p)
       
  1777 
       
  1778             # make changelog see real files again
       
  1779             cl.finalize(trp)
       
  1780 
       
  1781             tr.close()
       
  1782         finally:
       
  1783             tr.release()
       
  1784             if lock:
       
  1785                 lock.release()
       
  1786 
       
  1787         if changesets > 0:
       
  1788             # forcefully update the on-disk branch cache
       
  1789             self.ui.debug("updating the branch cache\n")
       
  1790             self.updatebranchcache()
       
  1791             self.hook("changegroup", node=hex(cl.node(clstart)),
       
  1792                       source=srctype, url=url)
       
  1793 
       
  1794             for i in xrange(clstart, clend):
       
  1795                 self.hook("incoming", node=hex(cl.node(i)),
       
  1796                           source=srctype, url=url)
       
  1797 
       
  1798         # never return 0 here:
       
  1799         if newheads < oldheads:
       
  1800             return newheads - oldheads - 1
       
  1801         else:
       
  1802             return newheads - oldheads + 1
       
  1803 
       
  1804 
       
  1805     def stream_in(self, remote, requirements):
       
  1806         fp = remote.stream_out()
       
  1807         l = fp.readline()
       
  1808         try:
       
  1809             resp = int(l)
       
  1810         except ValueError:
       
  1811             raise error.ResponseError(
       
  1812                 _('Unexpected response from remote server:'), l)
       
  1813         if resp == 1:
       
  1814             raise util.Abort(_('operation forbidden by server'))
       
  1815         elif resp == 2:
       
  1816             raise util.Abort(_('locking the remote repository failed'))
       
  1817         elif resp != 0:
       
  1818             raise util.Abort(_('the server sent an unknown error code'))
       
  1819         self.ui.status(_('streaming all changes\n'))
       
  1820         l = fp.readline()
       
  1821         try:
       
  1822             total_files, total_bytes = map(int, l.split(' ', 1))
       
  1823         except (ValueError, TypeError):
       
  1824             raise error.ResponseError(
       
  1825                 _('Unexpected response from remote server:'), l)
       
  1826         self.ui.status(_('%d files to transfer, %s of data\n') %
       
  1827                        (total_files, util.bytecount(total_bytes)))
       
  1828         start = time.time()
       
  1829         for i in xrange(total_files):
       
  1830             # XXX doesn't support '\n' or '\r' in filenames
       
  1831             l = fp.readline()
       
  1832             try:
       
  1833                 name, size = l.split('\0', 1)
       
  1834                 size = int(size)
       
  1835             except (ValueError, TypeError):
       
  1836                 raise error.ResponseError(
       
  1837                     _('Unexpected response from remote server:'), l)
       
  1838             self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
       
  1839             # for backwards compat, name was partially encoded
       
  1840             ofp = self.sopener(store.decodedir(name), 'w')
       
  1841             for chunk in util.filechunkiter(fp, limit=size):
       
  1842                 ofp.write(chunk)
       
  1843             ofp.close()
       
  1844         elapsed = time.time() - start
       
  1845         if elapsed <= 0:
       
  1846             elapsed = 0.001
       
  1847         self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
       
  1848                        (util.bytecount(total_bytes), elapsed,
       
  1849                         util.bytecount(total_bytes / elapsed)))
       
  1850 
       
  1851         # new requirements = old non-format requirements + new format-related
       
  1852         # requirements from the streamed-in repository
       
  1853         requirements.update(set(self.requirements) - self.supportedformats)
       
  1854         self._applyrequirements(requirements)
       
  1855         self._writerequirements()
       
  1856 
       
  1857         self.invalidate()
       
  1858         return len(self.heads()) + 1
       
  1859 
       
  1860     def clone(self, remote, heads=[], stream=False):
       
  1861         '''clone remote repository.
       
  1862 
       
  1863         keyword arguments:
       
  1864         heads: list of revs to clone (forces use of pull)
       
  1865         stream: use streaming clone if possible'''
       
  1866 
       
  1867         # now, all clients that can request uncompressed clones can
       
  1868         # read repo formats supported by all servers that can serve
       
  1869         # them.
       
  1870 
       
  1871         # if revlog format changes, client will have to check version
       
  1872         # and format flags on "stream" capability, and use
       
  1873         # uncompressed only if compatible.
       
  1874 
       
  1875         if stream and not heads:
       
  1876             # 'stream' means remote revlog format is revlogv1 only
       
  1877             if remote.capable('stream'):
       
  1878                 return self.stream_in(remote, set(('revlogv1',)))
       
  1879             # otherwise, 'streamreqs' contains the remote revlog format
       
  1880             streamreqs = remote.capable('streamreqs')
       
  1881             if streamreqs:
       
  1882                 streamreqs = set(streamreqs.split(','))
       
  1883                 # if we support it, stream in and adjust our requirements
       
  1884                 if not streamreqs - self.supportedformats:
       
  1885                     return self.stream_in(remote, streamreqs)
       
  1886         return self.pull(remote, heads)
       
  1887 
       
  1888     def pushkey(self, namespace, key, old, new):
       
  1889         return pushkey.push(self, namespace, key, old, new)
       
  1890 
       
  1891     def listkeys(self, namespace):
       
  1892         return pushkey.list(self, namespace)
       
  1893 
       
  1894 # used to avoid circular references so destructors work
       
  1895 def aftertrans(files):
       
  1896     renamefiles = [tuple(t) for t in files]
       
  1897     def a():
       
  1898         for src, dest in renamefiles:
       
  1899             util.rename(src, dest)
       
  1900     return a
       
  1901 
       
  1902 def instance(ui, path, create):
       
  1903     return localrepository(ui, util.drop_scheme('file', path), create)
       
  1904 
       
  1905 def islocal(path):
       
  1906     return True