PageRenderTime 61ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 0ms

/mercurial/localrepo.py

https://bitbucket.org/mirror/mercurial/
Python | 1781 lines | 1523 code | 113 blank | 145 comment | 112 complexity | 6f96be86fe98bfa5cd2abe2fc22a8ecf MD5 | raw file
Possible License(s): GPL-2.0
  1. # localrepo.py - read/write repository class for mercurial
  2. #
  3. # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
  4. #
  5. # This software may be used and distributed according to the terms of the
  6. # GNU General Public License version 2 or any later version.
  7. from node import hex, nullid, short
  8. from i18n import _
  9. import urllib
  10. import peer, changegroup, subrepo, pushkey, obsolete, repoview
  11. import changelog, dirstate, filelog, manifest, context, bookmarks, phases
  12. import lock as lockmod
  13. import transaction, store, encoding, exchange, bundle2
  14. import scmutil, util, extensions, hook, error, revset
  15. import match as matchmod
  16. import merge as mergemod
  17. import tags as tagsmod
  18. from lock import release
  19. import weakref, errno, os, time, inspect
  20. import branchmap, pathutil
  21. propertycache = util.propertycache
  22. filecache = scmutil.filecache
  23. class repofilecache(filecache):
  24. """All filecache usage on repo are done for logic that should be unfiltered
  25. """
  26. def __get__(self, repo, type=None):
  27. return super(repofilecache, self).__get__(repo.unfiltered(), type)
  28. def __set__(self, repo, value):
  29. return super(repofilecache, self).__set__(repo.unfiltered(), value)
  30. def __delete__(self, repo):
  31. return super(repofilecache, self).__delete__(repo.unfiltered())
  32. class storecache(repofilecache):
  33. """filecache for files in the store"""
  34. def join(self, obj, fname):
  35. return obj.sjoin(fname)
  36. class unfilteredpropertycache(propertycache):
  37. """propertycache that apply to unfiltered repo only"""
  38. def __get__(self, repo, type=None):
  39. unfi = repo.unfiltered()
  40. if unfi is repo:
  41. return super(unfilteredpropertycache, self).__get__(unfi)
  42. return getattr(unfi, self.name)
  43. class filteredpropertycache(propertycache):
  44. """propertycache that must take filtering in account"""
  45. def cachevalue(self, obj, value):
  46. object.__setattr__(obj, self.name, value)
  47. def hasunfilteredcache(repo, name):
  48. """check if a repo has an unfilteredpropertycache value for <name>"""
  49. return name in vars(repo.unfiltered())
  50. def unfilteredmethod(orig):
  51. """decorate method that always need to be run on unfiltered version"""
  52. def wrapper(repo, *args, **kwargs):
  53. return orig(repo.unfiltered(), *args, **kwargs)
  54. return wrapper
  55. moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
  56. 'unbundle'))
  57. legacycaps = moderncaps.union(set(['changegroupsubset']))
  58. class localpeer(peer.peerrepository):
  59. '''peer for a local repo; reflects only the most recent API'''
  60. def __init__(self, repo, caps=moderncaps):
  61. peer.peerrepository.__init__(self)
  62. self._repo = repo.filtered('served')
  63. self.ui = repo.ui
  64. self._caps = repo._restrictcapabilities(caps)
  65. self.requirements = repo.requirements
  66. self.supportedformats = repo.supportedformats
  67. def close(self):
  68. self._repo.close()
  69. def _capabilities(self):
  70. return self._caps
  71. def local(self):
  72. return self._repo
  73. def canpush(self):
  74. return True
  75. def url(self):
  76. return self._repo.url()
  77. def lookup(self, key):
  78. return self._repo.lookup(key)
  79. def branchmap(self):
  80. return self._repo.branchmap()
  81. def heads(self):
  82. return self._repo.heads()
  83. def known(self, nodes):
  84. return self._repo.known(nodes)
  85. def getbundle(self, source, heads=None, common=None, bundlecaps=None,
  86. format='HG10', **kwargs):
  87. cg = exchange.getbundle(self._repo, source, heads=heads,
  88. common=common, bundlecaps=bundlecaps, **kwargs)
  89. if bundlecaps is not None and 'HG2X' in bundlecaps:
  90. # When requesting a bundle2, getbundle returns a stream to make the
  91. # wire level function happier. We need to build a proper object
  92. # from it in local peer.
  93. cg = bundle2.unbundle20(self.ui, cg)
  94. return cg
  95. # TODO We might want to move the next two calls into legacypeer and add
  96. # unbundle instead.
  97. def unbundle(self, cg, heads, url):
  98. """apply a bundle on a repo
  99. This function handles the repo locking itself."""
  100. try:
  101. cg = exchange.readbundle(self.ui, cg, None)
  102. ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
  103. if util.safehasattr(ret, 'getchunks'):
  104. # This is a bundle20 object, turn it into an unbundler.
  105. # This little dance should be dropped eventually when the API
  106. # is finally improved.
  107. stream = util.chunkbuffer(ret.getchunks())
  108. ret = bundle2.unbundle20(self.ui, stream)
  109. return ret
  110. except error.PushRaced, exc:
  111. raise error.ResponseError(_('push failed:'), str(exc))
  112. def lock(self):
  113. return self._repo.lock()
  114. def addchangegroup(self, cg, source, url):
  115. return changegroup.addchangegroup(self._repo, cg, source, url)
  116. def pushkey(self, namespace, key, old, new):
  117. return self._repo.pushkey(namespace, key, old, new)
  118. def listkeys(self, namespace):
  119. return self._repo.listkeys(namespace)
  120. def debugwireargs(self, one, two, three=None, four=None, five=None):
  121. '''used to test argument passing over the wire'''
  122. return "%s %s %s %s %s" % (one, two, three, four, five)
  123. class locallegacypeer(localpeer):
  124. '''peer extension which implements legacy methods too; used for tests with
  125. restricted capabilities'''
  126. def __init__(self, repo):
  127. localpeer.__init__(self, repo, caps=legacycaps)
  128. def branches(self, nodes):
  129. return self._repo.branches(nodes)
  130. def between(self, pairs):
  131. return self._repo.between(pairs)
  132. def changegroup(self, basenodes, source):
  133. return changegroup.changegroup(self._repo, basenodes, source)
  134. def changegroupsubset(self, bases, heads, source):
  135. return changegroup.changegroupsubset(self._repo, bases, heads, source)
  136. class localrepository(object):
  137. supportedformats = set(('revlogv1', 'generaldelta'))
  138. _basesupported = supportedformats | set(('store', 'fncache', 'shared',
  139. 'dotencode'))
  140. openerreqs = set(('revlogv1', 'generaldelta'))
  141. requirements = ['revlogv1']
  142. filtername = None
  143. bundle2caps = {'HG2X': (),
  144. 'b2x:listkeys': (),
  145. 'b2x:pushkey': ()}
  146. # a list of (ui, featureset) functions.
  147. # only functions defined in module of enabled extensions are invoked
  148. featuresetupfuncs = set()
  149. def _baserequirements(self, create):
  150. return self.requirements[:]
  151. def __init__(self, baseui, path=None, create=False):
  152. self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
  153. self.wopener = self.wvfs
  154. self.root = self.wvfs.base
  155. self.path = self.wvfs.join(".hg")
  156. self.origroot = path
  157. self.auditor = pathutil.pathauditor(self.root, self._checknested)
  158. self.vfs = scmutil.vfs(self.path)
  159. self.opener = self.vfs
  160. self.baseui = baseui
  161. self.ui = baseui.copy()
  162. self.ui.copy = baseui.copy # prevent copying repo configuration
  163. # A list of callback to shape the phase if no data were found.
  164. # Callback are in the form: func(repo, roots) --> processed root.
  165. # This list it to be filled by extension during repo setup
  166. self._phasedefaults = []
  167. try:
  168. self.ui.readconfig(self.join("hgrc"), self.root)
  169. extensions.loadall(self.ui)
  170. except IOError:
  171. pass
  172. if self.featuresetupfuncs:
  173. self.supported = set(self._basesupported) # use private copy
  174. extmods = set(m.__name__ for n, m
  175. in extensions.extensions(self.ui))
  176. for setupfunc in self.featuresetupfuncs:
  177. if setupfunc.__module__ in extmods:
  178. setupfunc(self.ui, self.supported)
  179. else:
  180. self.supported = self._basesupported
  181. if not self.vfs.isdir():
  182. if create:
  183. if not self.wvfs.exists():
  184. self.wvfs.makedirs()
  185. self.vfs.makedir(notindexed=True)
  186. requirements = self._baserequirements(create)
  187. if self.ui.configbool('format', 'usestore', True):
  188. self.vfs.mkdir("store")
  189. requirements.append("store")
  190. if self.ui.configbool('format', 'usefncache', True):
  191. requirements.append("fncache")
  192. if self.ui.configbool('format', 'dotencode', True):
  193. requirements.append('dotencode')
  194. # create an invalid changelog
  195. self.vfs.append(
  196. "00changelog.i",
  197. '\0\0\0\2' # represents revlogv2
  198. ' dummy changelog to prevent using the old repo layout'
  199. )
  200. if self.ui.configbool('format', 'generaldelta', False):
  201. requirements.append("generaldelta")
  202. requirements = set(requirements)
  203. else:
  204. raise error.RepoError(_("repository %s not found") % path)
  205. elif create:
  206. raise error.RepoError(_("repository %s already exists") % path)
  207. else:
  208. try:
  209. requirements = scmutil.readrequires(self.vfs, self.supported)
  210. except IOError, inst:
  211. if inst.errno != errno.ENOENT:
  212. raise
  213. requirements = set()
  214. self.sharedpath = self.path
  215. try:
  216. vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
  217. realpath=True)
  218. s = vfs.base
  219. if not vfs.exists():
  220. raise error.RepoError(
  221. _('.hg/sharedpath points to nonexistent directory %s') % s)
  222. self.sharedpath = s
  223. except IOError, inst:
  224. if inst.errno != errno.ENOENT:
  225. raise
  226. self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
  227. self.spath = self.store.path
  228. self.svfs = self.store.vfs
  229. self.sopener = self.svfs
  230. self.sjoin = self.store.join
  231. self.vfs.createmode = self.store.createmode
  232. self._applyrequirements(requirements)
  233. if create:
  234. self._writerequirements()
  235. self._branchcaches = {}
  236. self.filterpats = {}
  237. self._datafilters = {}
  238. self._transref = self._lockref = self._wlockref = None
  239. # A cache for various files under .hg/ that tracks file changes,
  240. # (used by the filecache decorator)
  241. #
  242. # Maps a property name to its util.filecacheentry
  243. self._filecache = {}
  244. # hold sets of revision to be filtered
  245. # should be cleared when something might have changed the filter value:
  246. # - new changesets,
  247. # - phase change,
  248. # - new obsolescence marker,
  249. # - working directory parent change,
  250. # - bookmark changes
  251. self.filteredrevcache = {}
  252. def close(self):
  253. pass
  254. def _restrictcapabilities(self, caps):
  255. # bundle2 is not ready for prime time, drop it unless explicitly
  256. # required by the tests (or some brave tester)
  257. if self.ui.configbool('experimental', 'bundle2-exp', False):
  258. caps = set(caps)
  259. capsblob = bundle2.encodecaps(self.bundle2caps)
  260. caps.add('bundle2-exp=' + urllib.quote(capsblob))
  261. return caps
  262. def _applyrequirements(self, requirements):
  263. self.requirements = requirements
  264. self.sopener.options = dict((r, 1) for r in requirements
  265. if r in self.openerreqs)
  266. chunkcachesize = self.ui.configint('format', 'chunkcachesize')
  267. if chunkcachesize is not None:
  268. self.sopener.options['chunkcachesize'] = chunkcachesize
  269. def _writerequirements(self):
  270. reqfile = self.opener("requires", "w")
  271. for r in sorted(self.requirements):
  272. reqfile.write("%s\n" % r)
  273. reqfile.close()
  274. def _checknested(self, path):
  275. """Determine if path is a legal nested repository."""
  276. if not path.startswith(self.root):
  277. return False
  278. subpath = path[len(self.root) + 1:]
  279. normsubpath = util.pconvert(subpath)
  280. # XXX: Checking against the current working copy is wrong in
  281. # the sense that it can reject things like
  282. #
  283. # $ hg cat -r 10 sub/x.txt
  284. #
  285. # if sub/ is no longer a subrepository in the working copy
  286. # parent revision.
  287. #
  288. # However, it can of course also allow things that would have
  289. # been rejected before, such as the above cat command if sub/
  290. # is a subrepository now, but was a normal directory before.
  291. # The old path auditor would have rejected by mistake since it
  292. # panics when it sees sub/.hg/.
  293. #
  294. # All in all, checking against the working copy seems sensible
  295. # since we want to prevent access to nested repositories on
  296. # the filesystem *now*.
  297. ctx = self[None]
  298. parts = util.splitpath(subpath)
  299. while parts:
  300. prefix = '/'.join(parts)
  301. if prefix in ctx.substate:
  302. if prefix == normsubpath:
  303. return True
  304. else:
  305. sub = ctx.sub(prefix)
  306. return sub.checknested(subpath[len(prefix) + 1:])
  307. else:
  308. parts.pop()
  309. return False
  310. def peer(self):
  311. return localpeer(self) # not cached to avoid reference cycle
  312. def unfiltered(self):
  313. """Return unfiltered version of the repository
  314. Intended to be overwritten by filtered repo."""
  315. return self
  316. def filtered(self, name):
  317. """Return a filtered version of a repository"""
  318. # build a new class with the mixin and the current class
  319. # (possibly subclass of the repo)
  320. class proxycls(repoview.repoview, self.unfiltered().__class__):
  321. pass
  322. return proxycls(self, name)
  323. @repofilecache('bookmarks')
  324. def _bookmarks(self):
  325. return bookmarks.bmstore(self)
  326. @repofilecache('bookmarks.current')
  327. def _bookmarkcurrent(self):
  328. return bookmarks.readcurrent(self)
  329. def bookmarkheads(self, bookmark):
  330. name = bookmark.split('@', 1)[0]
  331. heads = []
  332. for mark, n in self._bookmarks.iteritems():
  333. if mark.split('@', 1)[0] == name:
  334. heads.append(n)
  335. return heads
  336. @storecache('phaseroots')
  337. def _phasecache(self):
  338. return phases.phasecache(self, self._phasedefaults)
  339. @storecache('obsstore')
  340. def obsstore(self):
  341. store = obsolete.obsstore(self.sopener)
  342. if store and not obsolete._enabled:
  343. # message is rare enough to not be translated
  344. msg = 'obsolete feature not enabled but %i markers found!\n'
  345. self.ui.warn(msg % len(list(store)))
  346. return store
  347. @storecache('00changelog.i')
  348. def changelog(self):
  349. c = changelog.changelog(self.sopener)
  350. if 'HG_PENDING' in os.environ:
  351. p = os.environ['HG_PENDING']
  352. if p.startswith(self.root):
  353. c.readpending('00changelog.i.a')
  354. return c
  355. @storecache('00manifest.i')
  356. def manifest(self):
  357. return manifest.manifest(self.sopener)
  358. @repofilecache('dirstate')
  359. def dirstate(self):
  360. warned = [0]
  361. def validate(node):
  362. try:
  363. self.changelog.rev(node)
  364. return node
  365. except error.LookupError:
  366. if not warned[0]:
  367. warned[0] = True
  368. self.ui.warn(_("warning: ignoring unknown"
  369. " working parent %s!\n") % short(node))
  370. return nullid
  371. return dirstate.dirstate(self.opener, self.ui, self.root, validate)
  372. def __getitem__(self, changeid):
  373. if changeid is None:
  374. return context.workingctx(self)
  375. return context.changectx(self, changeid)
  376. def __contains__(self, changeid):
  377. try:
  378. return bool(self.lookup(changeid))
  379. except error.RepoLookupError:
  380. return False
  381. def __nonzero__(self):
  382. return True
  383. def __len__(self):
  384. return len(self.changelog)
  385. def __iter__(self):
  386. return iter(self.changelog)
  387. def revs(self, expr, *args):
  388. '''Return a list of revisions matching the given revset'''
  389. expr = revset.formatspec(expr, *args)
  390. m = revset.match(None, expr)
  391. return m(self, revset.spanset(self))
  392. def set(self, expr, *args):
  393. '''
  394. Yield a context for each matching revision, after doing arg
  395. replacement via revset.formatspec
  396. '''
  397. for r in self.revs(expr, *args):
  398. yield self[r]
  399. def url(self):
  400. return 'file:' + self.root
  401. def hook(self, name, throw=False, **args):
  402. """Call a hook, passing this repo instance.
  403. This a convenience method to aid invoking hooks. Extensions likely
  404. won't call this unless they have registered a custom hook or are
  405. replacing code that is expected to call a hook.
  406. """
  407. return hook.hook(self.ui, self, name, throw, **args)
  408. @unfilteredmethod
  409. def _tag(self, names, node, message, local, user, date, extra={},
  410. editor=False):
  411. if isinstance(names, str):
  412. names = (names,)
  413. branches = self.branchmap()
  414. for name in names:
  415. self.hook('pretag', throw=True, node=hex(node), tag=name,
  416. local=local)
  417. if name in branches:
  418. self.ui.warn(_("warning: tag %s conflicts with existing"
  419. " branch name\n") % name)
  420. def writetags(fp, names, munge, prevtags):
  421. fp.seek(0, 2)
  422. if prevtags and prevtags[-1] != '\n':
  423. fp.write('\n')
  424. for name in names:
  425. m = munge and munge(name) or name
  426. if (self._tagscache.tagtypes and
  427. name in self._tagscache.tagtypes):
  428. old = self.tags().get(name, nullid)
  429. fp.write('%s %s\n' % (hex(old), m))
  430. fp.write('%s %s\n' % (hex(node), m))
  431. fp.close()
  432. prevtags = ''
  433. if local:
  434. try:
  435. fp = self.opener('localtags', 'r+')
  436. except IOError:
  437. fp = self.opener('localtags', 'a')
  438. else:
  439. prevtags = fp.read()
  440. # local tags are stored in the current charset
  441. writetags(fp, names, None, prevtags)
  442. for name in names:
  443. self.hook('tag', node=hex(node), tag=name, local=local)
  444. return
  445. try:
  446. fp = self.wfile('.hgtags', 'rb+')
  447. except IOError, e:
  448. if e.errno != errno.ENOENT:
  449. raise
  450. fp = self.wfile('.hgtags', 'ab')
  451. else:
  452. prevtags = fp.read()
  453. # committed tags are stored in UTF-8
  454. writetags(fp, names, encoding.fromlocal, prevtags)
  455. fp.close()
  456. self.invalidatecaches()
  457. if '.hgtags' not in self.dirstate:
  458. self[None].add(['.hgtags'])
  459. m = matchmod.exact(self.root, '', ['.hgtags'])
  460. tagnode = self.commit(message, user, date, extra=extra, match=m,
  461. editor=editor)
  462. for name in names:
  463. self.hook('tag', node=hex(node), tag=name, local=local)
  464. return tagnode
  465. def tag(self, names, node, message, local, user, date, editor=False):
  466. '''tag a revision with one or more symbolic names.
  467. names is a list of strings or, when adding a single tag, names may be a
  468. string.
  469. if local is True, the tags are stored in a per-repository file.
  470. otherwise, they are stored in the .hgtags file, and a new
  471. changeset is committed with the change.
  472. keyword arguments:
  473. local: whether to store tags in non-version-controlled file
  474. (default False)
  475. message: commit message to use if committing
  476. user: name of user to use if committing
  477. date: date tuple to use if committing'''
  478. if not local:
  479. for x in self.status()[:5]:
  480. if '.hgtags' in x:
  481. raise util.Abort(_('working copy of .hgtags is changed '
  482. '(please commit .hgtags manually)'))
  483. self.tags() # instantiate the cache
  484. self._tag(names, node, message, local, user, date, editor=editor)
  485. @filteredpropertycache
  486. def _tagscache(self):
  487. '''Returns a tagscache object that contains various tags related
  488. caches.'''
  489. # This simplifies its cache management by having one decorated
  490. # function (this one) and the rest simply fetch things from it.
  491. class tagscache(object):
  492. def __init__(self):
  493. # These two define the set of tags for this repository. tags
  494. # maps tag name to node; tagtypes maps tag name to 'global' or
  495. # 'local'. (Global tags are defined by .hgtags across all
  496. # heads, and local tags are defined in .hg/localtags.)
  497. # They constitute the in-memory cache of tags.
  498. self.tags = self.tagtypes = None
  499. self.nodetagscache = self.tagslist = None
  500. cache = tagscache()
  501. cache.tags, cache.tagtypes = self._findtags()
  502. return cache
  503. def tags(self):
  504. '''return a mapping of tag to node'''
  505. t = {}
  506. if self.changelog.filteredrevs:
  507. tags, tt = self._findtags()
  508. else:
  509. tags = self._tagscache.tags
  510. for k, v in tags.iteritems():
  511. try:
  512. # ignore tags to unknown nodes
  513. self.changelog.rev(v)
  514. t[k] = v
  515. except (error.LookupError, ValueError):
  516. pass
  517. return t
  518. def _findtags(self):
  519. '''Do the hard work of finding tags. Return a pair of dicts
  520. (tags, tagtypes) where tags maps tag name to node, and tagtypes
  521. maps tag name to a string like \'global\' or \'local\'.
  522. Subclasses or extensions are free to add their own tags, but
  523. should be aware that the returned dicts will be retained for the
  524. duration of the localrepo object.'''
  525. # XXX what tagtype should subclasses/extensions use? Currently
  526. # mq and bookmarks add tags, but do not set the tagtype at all.
  527. # Should each extension invent its own tag type? Should there
  528. # be one tagtype for all such "virtual" tags? Or is the status
  529. # quo fine?
  530. alltags = {} # map tag name to (node, hist)
  531. tagtypes = {}
  532. tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
  533. tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
  534. # Build the return dicts. Have to re-encode tag names because
  535. # the tags module always uses UTF-8 (in order not to lose info
  536. # writing to the cache), but the rest of Mercurial wants them in
  537. # local encoding.
  538. tags = {}
  539. for (name, (node, hist)) in alltags.iteritems():
  540. if node != nullid:
  541. tags[encoding.tolocal(name)] = node
  542. tags['tip'] = self.changelog.tip()
  543. tagtypes = dict([(encoding.tolocal(name), value)
  544. for (name, value) in tagtypes.iteritems()])
  545. return (tags, tagtypes)
  546. def tagtype(self, tagname):
  547. '''
  548. return the type of the given tag. result can be:
  549. 'local' : a local tag
  550. 'global' : a global tag
  551. None : tag does not exist
  552. '''
  553. return self._tagscache.tagtypes.get(tagname)
  554. def tagslist(self):
  555. '''return a list of tags ordered by revision'''
  556. if not self._tagscache.tagslist:
  557. l = []
  558. for t, n in self.tags().iteritems():
  559. r = self.changelog.rev(n)
  560. l.append((r, t, n))
  561. self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
  562. return self._tagscache.tagslist
  563. def nodetags(self, node):
  564. '''return the tags associated with a node'''
  565. if not self._tagscache.nodetagscache:
  566. nodetagscache = {}
  567. for t, n in self._tagscache.tags.iteritems():
  568. nodetagscache.setdefault(n, []).append(t)
  569. for tags in nodetagscache.itervalues():
  570. tags.sort()
  571. self._tagscache.nodetagscache = nodetagscache
  572. return self._tagscache.nodetagscache.get(node, [])
  573. def nodebookmarks(self, node):
  574. marks = []
  575. for bookmark, n in self._bookmarks.iteritems():
  576. if n == node:
  577. marks.append(bookmark)
  578. return sorted(marks)
  579. def branchmap(self):
  580. '''returns a dictionary {branch: [branchheads]} with branchheads
  581. ordered by increasing revision number'''
  582. branchmap.updatecache(self)
  583. return self._branchcaches[self.filtername]
  584. def branchtip(self, branch):
  585. '''return the tip node for a given branch'''
  586. try:
  587. return self.branchmap().branchtip(branch)
  588. except KeyError:
  589. raise error.RepoLookupError(_("unknown branch '%s'") % branch)
  590. def lookup(self, key):
  591. return self[key].node()
  592. def lookupbranch(self, key, remote=None):
  593. repo = remote or self
  594. if key in repo.branchmap():
  595. return key
  596. repo = (remote and remote.local()) and remote or self
  597. return repo[key].branch()
  598. def known(self, nodes):
  599. nm = self.changelog.nodemap
  600. pc = self._phasecache
  601. result = []
  602. for n in nodes:
  603. r = nm.get(n)
  604. resp = not (r is None or pc.phase(self, r) >= phases.secret)
  605. result.append(resp)
  606. return result
  607. def local(self):
  608. return self
  609. def cancopy(self):
  610. # so statichttprepo's override of local() works
  611. if not self.local():
  612. return False
  613. if not self.ui.configbool('phases', 'publish', True):
  614. return True
  615. # if publishing we can't copy if there is filtered content
  616. return not self.filtered('visible').changelog.filteredrevs
  617. def join(self, f):
  618. return os.path.join(self.path, f)
  619. def wjoin(self, f):
  620. return os.path.join(self.root, f)
  621. def file(self, f):
  622. if f[0] == '/':
  623. f = f[1:]
  624. return filelog.filelog(self.sopener, f)
  625. def changectx(self, changeid):
  626. return self[changeid]
  627. def parents(self, changeid=None):
  628. '''get list of changectxs for parents of changeid'''
  629. return self[changeid].parents()
  630. def setparents(self, p1, p2=nullid):
  631. copies = self.dirstate.setparents(p1, p2)
  632. pctx = self[p1]
  633. if copies:
  634. # Adjust copy records, the dirstate cannot do it, it
  635. # requires access to parents manifests. Preserve them
  636. # only for entries added to first parent.
  637. for f in copies:
  638. if f not in pctx and copies[f] in pctx:
  639. self.dirstate.copy(copies[f], f)
  640. if p2 == nullid:
  641. for f, s in sorted(self.dirstate.copies().items()):
  642. if f not in pctx and s not in pctx:
  643. self.dirstate.copy(None, f)
  644. def filectx(self, path, changeid=None, fileid=None):
  645. """changeid can be a changeset revision, node, or tag.
  646. fileid can be a file revision or node."""
  647. return context.filectx(self, path, changeid, fileid)
  648. def getcwd(self):
  649. return self.dirstate.getcwd()
  650. def pathto(self, f, cwd=None):
  651. return self.dirstate.pathto(f, cwd)
  652. def wfile(self, f, mode='r'):
  653. return self.wopener(f, mode)
  654. def _link(self, f):
  655. return self.wvfs.islink(f)
  656. def _loadfilter(self, filter):
  657. if filter not in self.filterpats:
  658. l = []
  659. for pat, cmd in self.ui.configitems(filter):
  660. if cmd == '!':
  661. continue
  662. mf = matchmod.match(self.root, '', [pat])
  663. fn = None
  664. params = cmd
  665. for name, filterfn in self._datafilters.iteritems():
  666. if cmd.startswith(name):
  667. fn = filterfn
  668. params = cmd[len(name):].lstrip()
  669. break
  670. if not fn:
  671. fn = lambda s, c, **kwargs: util.filter(s, c)
  672. # Wrap old filters not supporting keyword arguments
  673. if not inspect.getargspec(fn)[2]:
  674. oldfn = fn
  675. fn = lambda s, c, **kwargs: oldfn(s, c)
  676. l.append((mf, fn, params))
  677. self.filterpats[filter] = l
  678. return self.filterpats[filter]
  679. def _filter(self, filterpats, filename, data):
  680. for mf, fn, cmd in filterpats:
  681. if mf(filename):
  682. self.ui.debug("filtering %s through %s\n" % (filename, cmd))
  683. data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
  684. break
  685. return data
  686. @unfilteredpropertycache
  687. def _encodefilterpats(self):
  688. return self._loadfilter('encode')
  689. @unfilteredpropertycache
  690. def _decodefilterpats(self):
  691. return self._loadfilter('decode')
  692. def adddatafilter(self, name, filter):
  693. self._datafilters[name] = filter
  694. def wread(self, filename):
  695. if self._link(filename):
  696. data = self.wvfs.readlink(filename)
  697. else:
  698. data = self.wopener.read(filename)
  699. return self._filter(self._encodefilterpats, filename, data)
  700. def wwrite(self, filename, data, flags):
  701. data = self._filter(self._decodefilterpats, filename, data)
  702. if 'l' in flags:
  703. self.wopener.symlink(data, filename)
  704. else:
  705. self.wopener.write(filename, data)
  706. if 'x' in flags:
  707. self.wvfs.setflags(filename, False, True)
  708. def wwritedata(self, filename, data):
  709. return self._filter(self._decodefilterpats, filename, data)
  710. def transaction(self, desc, report=None):
  711. tr = self._transref and self._transref() or None
  712. if tr and tr.running():
  713. return tr.nest()
  714. # abort here if the journal already exists
  715. if self.svfs.exists("journal"):
  716. raise error.RepoError(
  717. _("abandoned transaction found"),
  718. hint=_("run 'hg recover' to clean up transaction"))
  719. def onclose():
  720. self.store.write(self._transref())
  721. self._writejournal(desc)
  722. renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
  723. rp = report and report or self.ui.warn
  724. tr = transaction.transaction(rp, self.sopener,
  725. "journal",
  726. aftertrans(renames),
  727. self.store.createmode,
  728. onclose)
  729. self._transref = weakref.ref(tr)
  730. return tr
  731. def _journalfiles(self):
  732. return ((self.svfs, 'journal'),
  733. (self.vfs, 'journal.dirstate'),
  734. (self.vfs, 'journal.branch'),
  735. (self.vfs, 'journal.desc'),
  736. (self.vfs, 'journal.bookmarks'),
  737. (self.svfs, 'journal.phaseroots'))
  738. def undofiles(self):
  739. return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
  740. def _writejournal(self, desc):
  741. self.opener.write("journal.dirstate",
  742. self.opener.tryread("dirstate"))
  743. self.opener.write("journal.branch",
  744. encoding.fromlocal(self.dirstate.branch()))
  745. self.opener.write("journal.desc",
  746. "%d\n%s\n" % (len(self), desc))
  747. self.opener.write("journal.bookmarks",
  748. self.opener.tryread("bookmarks"))
  749. self.sopener.write("journal.phaseroots",
  750. self.sopener.tryread("phaseroots"))
  751. def recover(self):
  752. lock = self.lock()
  753. try:
  754. if self.svfs.exists("journal"):
  755. self.ui.status(_("rolling back interrupted transaction\n"))
  756. transaction.rollback(self.sopener, "journal",
  757. self.ui.warn)
  758. self.invalidate()
  759. return True
  760. else:
  761. self.ui.warn(_("no interrupted transaction available\n"))
  762. return False
  763. finally:
  764. lock.release()
  765. def rollback(self, dryrun=False, force=False):
  766. wlock = lock = None
  767. try:
  768. wlock = self.wlock()
  769. lock = self.lock()
  770. if self.svfs.exists("undo"):
  771. return self._rollback(dryrun, force)
  772. else:
  773. self.ui.warn(_("no rollback information available\n"))
  774. return 1
  775. finally:
  776. release(lock, wlock)
  777. @unfilteredmethod # Until we get smarter cache management
  778. def _rollback(self, dryrun, force):
  779. ui = self.ui
  780. try:
  781. args = self.opener.read('undo.desc').splitlines()
  782. (oldlen, desc, detail) = (int(args[0]), args[1], None)
  783. if len(args) >= 3:
  784. detail = args[2]
  785. oldtip = oldlen - 1
  786. if detail and ui.verbose:
  787. msg = (_('repository tip rolled back to revision %s'
  788. ' (undo %s: %s)\n')
  789. % (oldtip, desc, detail))
  790. else:
  791. msg = (_('repository tip rolled back to revision %s'
  792. ' (undo %s)\n')
  793. % (oldtip, desc))
  794. except IOError:
  795. msg = _('rolling back unknown transaction\n')
  796. desc = None
  797. if not force and self['.'] != self['tip'] and desc == 'commit':
  798. raise util.Abort(
  799. _('rollback of last commit while not checked out '
  800. 'may lose data'), hint=_('use -f to force'))
  801. ui.status(msg)
  802. if dryrun:
  803. return 0
  804. parents = self.dirstate.parents()
  805. self.destroying()
  806. transaction.rollback(self.sopener, 'undo', ui.warn)
  807. if self.vfs.exists('undo.bookmarks'):
  808. self.vfs.rename('undo.bookmarks', 'bookmarks')
  809. if self.svfs.exists('undo.phaseroots'):
  810. self.svfs.rename('undo.phaseroots', 'phaseroots')
  811. self.invalidate()
  812. parentgone = (parents[0] not in self.changelog.nodemap or
  813. parents[1] not in self.changelog.nodemap)
  814. if parentgone:
  815. self.vfs.rename('undo.dirstate', 'dirstate')
  816. try:
  817. branch = self.opener.read('undo.branch')
  818. self.dirstate.setbranch(encoding.tolocal(branch))
  819. except IOError:
  820. ui.warn(_('named branch could not be reset: '
  821. 'current branch is still \'%s\'\n')
  822. % self.dirstate.branch())
  823. self.dirstate.invalidate()
  824. parents = tuple([p.rev() for p in self.parents()])
  825. if len(parents) > 1:
  826. ui.status(_('working directory now based on '
  827. 'revisions %d and %d\n') % parents)
  828. else:
  829. ui.status(_('working directory now based on '
  830. 'revision %d\n') % parents)
  831. # TODO: if we know which new heads may result from this rollback, pass
  832. # them to destroy(), which will prevent the branchhead cache from being
  833. # invalidated.
  834. self.destroyed()
  835. return 0
  836. def invalidatecaches(self):
  837. if '_tagscache' in vars(self):
  838. # can't use delattr on proxy
  839. del self.__dict__['_tagscache']
  840. self.unfiltered()._branchcaches.clear()
  841. self.invalidatevolatilesets()
  842. def invalidatevolatilesets(self):
  843. self.filteredrevcache.clear()
  844. obsolete.clearobscaches(self)
  845. def invalidatedirstate(self):
  846. '''Invalidates the dirstate, causing the next call to dirstate
  847. to check if it was modified since the last time it was read,
  848. rereading it if it has.
  849. This is different to dirstate.invalidate() that it doesn't always
  850. rereads the dirstate. Use dirstate.invalidate() if you want to
  851. explicitly read the dirstate again (i.e. restoring it to a previous
  852. known good state).'''
  853. if hasunfilteredcache(self, 'dirstate'):
  854. for k in self.dirstate._filecache:
  855. try:
  856. delattr(self.dirstate, k)
  857. except AttributeError:
  858. pass
  859. delattr(self.unfiltered(), 'dirstate')
  860. def invalidate(self):
  861. unfiltered = self.unfiltered() # all file caches are stored unfiltered
  862. for k in self._filecache:
  863. # dirstate is invalidated separately in invalidatedirstate()
  864. if k == 'dirstate':
  865. continue
  866. try:
  867. delattr(unfiltered, k)
  868. except AttributeError:
  869. pass
  870. self.invalidatecaches()
  871. self.store.invalidatecaches()
  872. def invalidateall(self):
  873. '''Fully invalidates both store and non-store parts, causing the
  874. subsequent operation to reread any outside changes.'''
  875. # extension should hook this to invalidate its caches
  876. self.invalidate()
  877. self.invalidatedirstate()
  878. def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
  879. try:
  880. l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
  881. except error.LockHeld, inst:
  882. if not wait:
  883. raise
  884. self.ui.warn(_("waiting for lock on %s held by %r\n") %
  885. (desc, inst.locker))
  886. # default to 600 seconds timeout
  887. l = lockmod.lock(vfs, lockname,
  888. int(self.ui.config("ui", "timeout", "600")),
  889. releasefn, desc=desc)
  890. self.ui.warn(_("got lock after %s seconds\n") % l.delay)
  891. if acquirefn:
  892. acquirefn()
  893. return l
  894. def _afterlock(self, callback):
  895. """add a callback to the current repository lock.
  896. The callback will be executed on lock release."""
  897. l = self._lockref and self._lockref()
  898. if l:
  899. l.postrelease.append(callback)
  900. else:
  901. callback()
  902. def lock(self, wait=True):
  903. '''Lock the repository store (.hg/store) and return a weak reference
  904. to the lock. Use this before modifying the store (e.g. committing or
  905. stripping). If you are opening a transaction, get a lock as well.)'''
  906. l = self._lockref and self._lockref()
  907. if l is not None and l.held:
  908. l.lock()
  909. return l
  910. def unlock():
  911. if hasunfilteredcache(self, '_phasecache'):
  912. self._phasecache.write()
  913. for k, ce in self._filecache.items():
  914. if k == 'dirstate' or k not in self.__dict__:
  915. continue
  916. ce.refresh()
  917. l = self._lock(self.svfs, "lock", wait, unlock,
  918. self.invalidate, _('repository %s') % self.origroot)
  919. self._lockref = weakref.ref(l)
  920. return l
  921. def wlock(self, wait=True):
  922. '''Lock the non-store parts of the repository (everything under
  923. .hg except .hg/store) and return a weak reference to the lock.
  924. Use this before modifying files in .hg.'''
  925. l = self._wlockref and self._wlockref()
  926. if l is not None and l.held:
  927. l.lock()
  928. return l
  929. def unlock():
  930. self.dirstate.write()
  931. self._filecache['dirstate'].refresh()
  932. l = self._lock(self.vfs, "wlock", wait, unlock,
  933. self.invalidatedirstate, _('working directory of %s') %
  934. self.origroot)
  935. self._wlockref = weakref.ref(l)
  936. return l
  937. def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
  938. """
  939. commit an individual file as part of a larger transaction
  940. """
  941. fname = fctx.path()
  942. text = fctx.data()
  943. flog = self.file(fname)
  944. fparent1 = manifest1.get(fname, nullid)
  945. fparent2 = fparent2o = manifest2.get(fname, nullid)
  946. meta = {}
  947. copy = fctx.renamed()
  948. if copy and copy[0] != fname:
  949. # Mark the new revision of this file as a copy of another
  950. # file. This copy data will effectively act as a parent
  951. # of this new revision. If this is a merge, the first
  952. # parent will be the nullid (meaning "look up the copy data")
  953. # and the second one will be the other parent. For example:
  954. #
  955. # 0 --- 1 --- 3 rev1 changes file foo
  956. # \ / rev2 renames foo to bar and changes it
  957. # \- 2 -/ rev3 should have bar with all changes and
  958. # should record that bar descends from
  959. # bar in rev2 and foo in rev1
  960. #
  961. # this allows this merge to succeed:
  962. #
  963. # 0 --- 1 --- 3 rev4 reverts the content change from rev2
  964. # \ / merging rev3 and rev4 should use bar@rev2
  965. # \- 2 --- 4 as the merge base
  966. #
  967. cfname = copy[0]
  968. crev = manifest1.get(cfname)
  969. newfparent = fparent2
  970. if manifest2: # branch merge
  971. if fparent2 == nullid or crev is None: # copied on remote side
  972. if cfname in manifest2:
  973. crev = manifest2[cfname]
  974. newfparent = fparent1
  975. # find source in nearest ancestor if we've lost track
  976. if not crev:
  977. self.ui.debug(" %s: searching for copy revision for %s\n" %
  978. (fname, cfname))
  979. for ancestor in self[None].ancestors():
  980. if cfname in ancestor:
  981. crev = ancestor[cfname].filenode()
  982. break
  983. if crev:
  984. self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
  985. meta["copy"] = cfname
  986. meta["copyrev"] = hex(crev)
  987. fparent1, fparent2 = nullid, newfparent
  988. else:
  989. self.ui.warn(_("warning: can't find ancestor for '%s' "
  990. "copied from '%s'!\n") % (fname, cfname))
  991. elif fparent1 == nullid:
  992. fparent1, fparent2 = fparent2, nullid
  993. elif fparent2 != nullid:
  994. # is one parent an ancestor of the other?
  995. fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
  996. if fparent1 in fparentancestors:
  997. fparent1, fparent2 = fparent2, nullid
  998. elif fparent2 in fparentancestors:
  999. fparent2 = nullid
  1000. # is the file changed?
  1001. if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
  1002. changelist.append(fname)
  1003. return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
  1004. # are just the flags changed during merge?
  1005. if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
  1006. changelist.append(fname)
  1007. return fparent1
  1008. @unfilteredmethod
  1009. def commit(self, text="", user=None, date=None, match=None, force=False,
  1010. editor=False, extra={}):
  1011. """Add a new revision to current repository.
  1012. Revision information is gathered from the working directory,
  1013. match can be used to filter the committed files. If editor is
  1014. supplied, it is called to get a commit message.
  1015. """
  1016. def fail(f, msg):
  1017. raise util.Abort('%s: %s' % (f, msg))
  1018. if not match:
  1019. match = matchmod.always(self.root, '')
  1020. if not force:
  1021. vdirs = []
  1022. match.explicitdir = vdirs.append
  1023. match.bad = fail
  1024. wlock = self.wlock()
  1025. try:
  1026. wctx = self[None]
  1027. merge = len(wctx.parents()) > 1
  1028. if (not force and merge and match and
  1029. (match.files() or match.anypats())):
  1030. raise util.Abort(_('cannot partially commit a merge '
  1031. '(do not specify files or patterns)'))
  1032. changes = self.status(match=match, clean=force)
  1033. if force:
  1034. changes[0].extend(changes[6]) # mq may commit unchanged files
  1035. # check subrepos
  1036. subs = []
  1037. commitsubs = set()
  1038. newstate = wctx.substate.copy()
  1039. # only manage subrepos and .hgsubstate if .hgsub is present
  1040. if '.hgsub' in wctx:
  1041. # we'll decide whether to track this ourselves, thanks
  1042. for c in changes[:3]:
  1043. if '.hgsubstate' in c:
  1044. c.remove('.hgsubstate')
  1045. # compare current state to last committed state
  1046. # build new substate based on last committed state
  1047. oldstate = wctx.p1().substate
  1048. for s in sorted(newstate.keys()):
  1049. if not match(s):
  1050. # ignore working copy, use old state if present
  1051. if s in oldstate:
  1052. newstate[s] = oldstate[s]
  1053. continue
  1054. if not force:
  1055. raise util.Abort(
  1056. _("commit with new subrepo %s excluded") % s)
  1057. if wctx.sub(s).dirty(True):
  1058. if not self.ui.configbool('ui', 'commitsubrepos'):
  1059. raise util.Abort(
  1060. _("uncommitted changes in subrepo %s") % s,
  1061. hint=_("use --subrepos for recursive commit"))
  1062. subs.append(s)
  1063. commitsubs.add(s)
  1064. else:
  1065. bs = wctx.sub(s).basestate()
  1066. newstate[s] = (newstate[s][0], bs, newstate[s][2])
  1067. if oldstate.get(s, (None, None, None))[1] != bs:
  1068. subs.append(s)
  1069. # check for removed subrepos
  1070. for p in wctx.parents():
  1071. r = [s for s in p.substate if s not in newstate]
  1072. subs += [s for s in r if match(s)]
  1073. if subs:
  1074. if (not match('.hgsub') and
  1075. '.hgsub' in (wctx.modified() + wctx.added())):
  1076. raise util.Abort(
  1077. _("can't commit subrepos without .hgsub"))
  1078. changes[0].insert(0, '.hgsubstate')
  1079. elif '.hgsub' in changes[2]:
  1080. # clean up .hgsubstate when .hgsub is removed
  1081. if ('.hgsubstate' in wctx and
  1082. '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
  1083. changes[2].insert(0, '.hgsubstate')
  1084. # make sure all explicit patterns are matched
  1085. if not force and match.files():
  1086. matched = set(changes[0] + changes[1] + changes[2])
  1087. for f in match.files():
  1088. f = self.dirstate.normalize(f)
  1089. if f == '.' or f in matched or f in wctx.substate:
  1090. continue
  1091. if f in changes[3]: # missing
  1092. fail(f, _('file not found!'))
  1093. if f in vdirs: # visited directory
  1094. d = f + '/'
  1095. for mf in matched:
  1096. if mf.startswith(d):
  1097. break
  1098. else:
  1099. fail(f, _("no match under directory!"))
  1100. elif f not in self.dirstate:
  1101. fail(f, _("file not tracked!"))
  1102. cctx = context.workingctx(self, text, user, date, extra, changes)
  1103. if (not force and not extra.get("close") and not merge
  1104. and not cctx.files()
  1105. and wctx.branch() == wctx.p1().branch()):
  1106. return None
  1107. if merge and cctx.deleted():
  1108. raise util.Abort(_("cannot commit merge with missing files"))
  1109. ms = mergemod.mergestate(self)
  1110. for f in changes[0]:
  1111. if f in ms and ms[f] == 'u':
  1112. raise util.Abort(_("unresolved merge conflicts "
  1113. "(see hg help resolve)"))
  1114. if editor:
  1115. cctx._text = editor(self, cctx, subs)
  1116. edited = (text != cctx._text)
  1117. # Save commit message in case this transaction gets rolled back
  1118. # (e.g. by a pretxncommit hook). Leave the content alone on
  1119. # the assumption that the user will use the same editor again.
  1120. msgfn = self.savecommitmessage(cctx._text)
  1121. # commit subs and write new state
  1122. if subs:
  1123. for s in sorted(commitsubs):
  1124. sub = wctx.sub(s)
  1125. self.ui.status(_('committing subrepository %s\n') %
  1126. subrepo.subrelpath(sub))
  1127. sr = sub.commit(cctx._text, user, date)
  1128. newstate[s] = (newstate[s][0], sr)
  1129. subrepo.writestate(self, newstate)
  1130. p1, p2 = self.dirstate.parents()
  1131. hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
  1132. try:
  1133. self.hook("precommit", throw=True, parent1=hookp1,
  1134. parent2=hookp2)
  1135. ret = self.commitctx(cctx, True)
  1136. except: # re-raises
  1137. if edited:
  1138. self.ui.write(
  1139. _('note: commit message saved in %s\n') % msgfn)
  1140. raise
  1141. # update bookmarks, dirstate and mergestate
  1142. bookmarks.update(self, [p1, p2], ret)
  1143. cctx.markcommitted(ret)
  1144. ms.reset()
  1145. finally:
  1146. wlock.release()
  1147. def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
  1148. self.hook("commit", node=node, parent1=parent1, parent2=parent2)
  1149. self._afterlock(commithook)
  1150. return ret
  1151. @unfilteredmethod
  1152. def commitctx(self, ctx, error=False):
  1153. """Add a new revision to current repository.
  1154. Revision information is passed via the context argument.
  1155. """
  1156. tr = lock = None
  1157. removed = list(ctx.removed())
  1158. p1, p2 = ctx.p1(), ctx.p2()
  1159. user = ctx.user()
  1160. lock = self.lock()
  1161. try:
  1162. tr = self.transaction("commit")
  1163. trp = weakref.proxy(tr)
  1164. if ctx.files():
  1165. m1 = p1.manifest().copy()
  1166. m2 = p2.manifest()
  1167. # check in files
  1168. new = {}
  1169. changed = []
  1170. linkrev = len(self)
  1171. for f in sorted(ctx.modified() + ctx.added()):
  1172. self.ui.note(f + "\n")
  1173. try:
  1174. fctx = ctx[f]
  1175. new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
  1176. changed)
  1177. m1.set(f, fctx.flags())
  1178. except OSError, inst:
  1179. self.ui.warn(_("trouble committing %s!\n") % f)
  1180. raise
  1181. except IOError, inst:
  1182. errcode = getattr(inst, 'errno', errno.ENOENT)
  1183. if error or errcode and errcode != errno.ENOENT:
  1184. self.ui.warn(_("trouble committing %s!\n") % f)
  1185. raise
  1186. else:
  1187. removed.append(f)
  1188. # update manifest
  1189. m1.update(new)
  1190. removed = [f for f in sorted(removed) if f in m1 or f in m2]
  1191. drop = [f for f in removed if f in m1]
  1192. for f in drop:
  1193. del m1[f]
  1194. mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
  1195. p2.manifestnode(), (new, drop))
  1196. files = changed + removed
  1197. else:
  1198. mn = p1.manifestnode()
  1199. files = []
  1200. # update changelog
  1201. self.changelog.delayupdate()
  1202. n = self.changelog.add(mn, files, ctx.description(),
  1203. trp, p1.node(), p2.node(),
  1204. user, ctx.date(), ctx.extra().copy())
  1205. p = lambda: self.changelog.writepending() and self.root or ""
  1206. xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
  1207. self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
  1208. parent2=xp2, pending=p)
  1209. self.changelog.finalize(trp)
  1210. # set the new commit is proper phase
  1211. targetphase = subrepo.newcommitphase(self.ui, ctx)
  1212. if targetphase:
  1213. # retract boundary do not alter parent changeset.
  1214. # if a parent have higher the resulting phase will
  1215. # be compliant anyway
  1216. #
  1217. # if minimal phase was 0 we don't need to retract anything
  1218. phases.retractboundary(self, targetphase, [n])
  1219. tr.close()
  1220. branchmap.updatecache(self.filtered('served'))
  1221. return n
  1222. finally:
  1223. if tr:
  1224. tr.release()
  1225. lock.release()
  1226. @unfilteredmethod
  1227. def destroying(self):
  1228. '''Inform the repository that nodes are about to be destroyed.
  1229. Intended for use by strip and rollback, so there's a common
  1230. place for anything that has to be done before destroying history.
  1231. This is mostly useful for saving state that is in memory and waiting
  1232. to be flushed when the current lock is released. Because a call to
  1233. destroyed is imminent, the repo will be invalidated causing those
  1234. changes to stay in memory (waiting for the next unlock), or vanish
  1235. completely.
  1236. '''
  1237. # When using the same lock to commit and strip, the phasecache is left
  1238. # dirty after committing. Then when we strip, the repo is invalidated,
  1239. # causing those changes to disappear.
  1240. if '_phasecache' in vars(self):
  1241. self._phasecache.write()
  1242. @unfilteredmethod
  1243. def destroyed(self):
  1244. '''Inform the repository that nodes have been destroyed.
  1245. Intended for use by strip and rollback, so there's a common
  1246. place for anything that has to be done after destroying history.
  1247. '''
  1248. # When one tries to:
  1249. # 1) destroy nodes thus calling this method (e.g. strip)
  1250. # 2) use phasecache somewhere (e.g. commit)
  1251. #
  1252. # then 2) will fail because the phasecache contains nodes that were
  1253. # removed. We can either remove phasecache from the filecache,
  1254. # causing it to reload next time it is accessed, or simply filter
  1255. # the removed nodes now and write the updated cache.
  1256. self._phasecache.filterunknown(self)
  1257. self._phasecache.write()
  1258. # update the 'served' branch cache to help read only server process
  1259. # Thanks to branchcache collaboration this is done from the nearest
  1260. # filtered subset and it is expected to be fast.
  1261. branchmap.updatecache(self.filtered('served'))
  1262. # Ensure the persistent tag cache is updated. Doing it now
  1263. # means that the tag cache only has to worry about destroyed
  1264. # heads immediately after a strip/rollback. That in turn
  1265. # guarantees that "cachetip == currenttip" (comparing both rev
  1266. # and node) always means no nodes have been added or destroyed.
  1267. # XXX this is suboptimal when qrefresh'ing: we strip the current
  1268. # head, refresh the tag cache, then immediately add a new head.
  1269. # But I think doing it this way is necessary for the "instant
  1270. # tag cache retrieval" case to work.
  1271. self.invalidate()
  1272. def walk(self, match, node=None):
  1273. '''
  1274. walk recursively through the directory tree or a given
  1275. changeset, finding all files matched by the match
  1276. function
  1277. '''
  1278. return self[node].walk(match)
  1279. def status(self, node1='.', node2=None, match=None,
  1280. ignored=False, clean=False, unknown=False,
  1281. listsubrepos=False):
  1282. '''a convenience method that calls node1.status(node2)'''
  1283. return self[node1].status(node2, match, ignored, clean, unknown,
  1284. listsubrepos)
  1285. def heads(self, start=None):
  1286. heads = self.changelog.heads(start)
  1287. # sort the output in rev descending order
  1288. return sorted(heads, key=self.changelog.rev, reverse=True)
  1289. def branchheads(self, branch=None, start=None, closed=False):
  1290. '''return a (possibly filtered) list of heads for the given branch
  1291. Heads are returned in topological order, from newest to oldest.
  1292. If branch is None, use the dirstate branch.
  1293. If start is not None, return only heads reachable from start.
  1294. If closed is True, return heads that are marked as closed as well.
  1295. '''
  1296. if branch is None:
  1297. branch = self[None].branch()
  1298. branches = self.branchmap()
  1299. if branch not in branches:
  1300. return []
  1301. # the cache returns heads ordered lowest to highest
  1302. bheads = list(reversed(branches.branchheads(branch, closed=closed)))
  1303. if start is not None:
  1304. # filter out the heads that cannot be reached from startrev
  1305. fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
  1306. bheads = [h for h in bheads if h in fbheads]
  1307. return bheads
  1308. def branches(self, nodes):
  1309. if not nodes:
  1310. nodes = [self.changelog.tip()]
  1311. b = []
  1312. for n in nodes:
  1313. t = n
  1314. while True:
  1315. p = self.changelog.parents(n)
  1316. if p[1] != nullid or p[0] == nullid:
  1317. b.append((t, n, p[0], p[1]))
  1318. break
  1319. n = p[0]
  1320. return b
  1321. def between(self, pairs):
  1322. r = []
  1323. for top, bottom in pairs:
  1324. n, l, i = top, [], 0
  1325. f = 1
  1326. while n != bottom and n != nullid:
  1327. p = self.changelog.parents(n)[0]
  1328. if i == f:
  1329. l.append(n)
  1330. f = f * 2
  1331. n = p
  1332. i += 1
  1333. r.append(l)
  1334. return r
  1335. def pull(self, remote, heads=None, force=False):
  1336. return exchange.pull (self, remote, heads, force)
  1337. def checkpush(self, pushop):
  1338. """Extensions can override this function if additional checks have
  1339. to be performed before pushing, or call it if they override push
  1340. command.
  1341. """
  1342. pass
  1343. @unfilteredpropertycache
  1344. def prepushoutgoinghooks(self):
  1345. """Return util.hooks consists of "(repo, remote, outgoing)"
  1346. functions, which are called before pushing changesets.
  1347. """
  1348. return util.hooks()
  1349. def push(self, remote, force=False, revs=None, newbranch=False):
  1350. return exchange.push(self, remote, force, revs, newbranch)
  1351. def stream_in(self, remote, requirements):
  1352. lock = self.lock()
  1353. try:
  1354. # Save remote branchmap. We will use it later
  1355. # to speed up branchcache creation
  1356. rbranchmap = None
  1357. if remote.capable("branchmap"):
  1358. rbranchmap = remote.branchmap()
  1359. fp = remote.stream_out()
  1360. l = fp.readline()
  1361. try:
  1362. resp = int(l)
  1363. except ValueError:
  1364. raise error.ResponseError(
  1365. _('unexpected response from remote server:'), l)
  1366. if resp == 1:
  1367. raise util.Abort(_('operation forbidden by server'))
  1368. elif resp == 2:
  1369. raise util.Abort(_('locking the remote repository failed'))
  1370. elif resp != 0:
  1371. raise util.Abort(_('the server sent an unknown error code'))
  1372. self.ui.status(_('streaming all changes\n'))
  1373. l = fp.readline()
  1374. try:
  1375. total_files, total_bytes = map(int, l.split(' ', 1))
  1376. except (ValueError, TypeError):
  1377. raise error.ResponseError(
  1378. _('unexpected response from remote server:'), l)
  1379. self.ui.status(_('%d files to transfer, %s of data\n') %
  1380. (total_files, util.bytecount(total_bytes)))
  1381. handled_bytes = 0
  1382. self.ui.progress(_('clone'), 0, total=total_bytes)
  1383. start = time.time()
  1384. tr = self.transaction(_('clone'))
  1385. try:
  1386. for i in xrange(total_files):
  1387. # XXX doesn't support '\n' or '\r' in filenames
  1388. l = fp.readline()
  1389. try:
  1390. name, size = l.split('\0', 1)
  1391. size = int(size)
  1392. except (ValueError, TypeError):
  1393. raise error.ResponseError(
  1394. _('unexpected response from remote server:'), l)
  1395. if self.ui.debugflag:
  1396. self.ui.debug('adding %s (%s)\n' %
  1397. (name, util.bytecount(size)))
  1398. # for backwards compat, name was partially encoded
  1399. ofp = self.sopener(store.decodedir(name), 'w')
  1400. for chunk in util.filechunkiter(fp, limit=size):
  1401. handled_bytes += len(chunk)
  1402. self.ui.progress(_('clone'), handled_bytes,
  1403. total=total_bytes)
  1404. ofp.write(chunk)
  1405. ofp.close()
  1406. tr.close()
  1407. finally:
  1408. tr.release()
  1409. # Writing straight to files circumvented the inmemory caches
  1410. self.invalidate()
  1411. elapsed = time.time() - start
  1412. if elapsed <= 0:
  1413. elapsed = 0.001
  1414. self.ui.progress(_('clone'), None)
  1415. self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
  1416. (util.bytecount(total_bytes), elapsed,
  1417. util.bytecount(total_bytes / elapsed)))
  1418. # new requirements = old non-format requirements +
  1419. # new format-related
  1420. # requirements from the streamed-in repository
  1421. requirements.update(set(self.requirements) - self.supportedformats)
  1422. self._applyrequirements(requirements)
  1423. self._writerequirements()
  1424. if rbranchmap:
  1425. rbheads = []
  1426. for bheads in rbranchmap.itervalues():
  1427. rbheads.extend(bheads)
  1428. if rbheads:
  1429. rtiprev = max((int(self.changelog.rev(node))
  1430. for node in rbheads))
  1431. cache = branchmap.branchcache(rbranchmap,
  1432. self[rtiprev].node(),
  1433. rtiprev)
  1434. # Try to stick it as low as possible
  1435. # filter above served are unlikely to be fetch from a clone
  1436. for candidate in ('base', 'immutable', 'served'):
  1437. rview = self.filtered(candidate)
  1438. if cache.validfor(rview):
  1439. self._branchcaches[candidate] = cache
  1440. cache.write(rview)
  1441. break
  1442. self.invalidate()
  1443. return len(self.heads()) + 1
  1444. finally:
  1445. lock.release()
  1446. def clone(self, remote, heads=[], stream=False):
  1447. '''clone remote repository.
  1448. keyword arguments:
  1449. heads: list of revs to clone (forces use of pull)
  1450. stream: use streaming clone if possible'''
  1451. # now, all clients that can request uncompressed clones can
  1452. # read repo formats supported by all servers that can serve
  1453. # them.
  1454. # if revlog format changes, client will have to check version
  1455. # and format flags on "stream" capability, and use
  1456. # uncompressed only if compatible.
  1457. if not stream:
  1458. # if the server explicitly prefers to stream (for fast LANs)
  1459. stream = remote.capable('stream-preferred')
  1460. if stream and not heads:
  1461. # 'stream' means remote revlog format is revlogv1 only
  1462. if remote.capable('stream'):
  1463. return self.stream_in(remote, set(('revlogv1',)))
  1464. # otherwise, 'streamreqs' contains the remote revlog format
  1465. streamreqs = remote.capable('streamreqs')
  1466. if streamreqs:
  1467. streamreqs = set(streamreqs.split(','))
  1468. # if we support it, stream in and adjust our requirements
  1469. if not streamreqs - self.supportedformats:
  1470. return self.stream_in(remote, streamreqs)
  1471. return self.pull(remote, heads)
  1472. def pushkey(self, namespace, key, old, new):
  1473. self.hook('prepushkey', throw=True, namespace=namespace, key=key,
  1474. old=old, new=new)
  1475. self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
  1476. ret = pushkey.push(self, namespace, key, old, new)
  1477. self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
  1478. ret=ret)
  1479. return ret
  1480. def listkeys(self, namespace):
  1481. self.hook('prelistkeys', throw=True, namespace=namespace)
  1482. self.ui.debug('listing keys for "%s"\n' % namespace)
  1483. values = pushkey.list(self, namespace)
  1484. self.hook('listkeys', namespace=namespace, values=values)
  1485. return values
  1486. def debugwireargs(self, one, two, three=None, four=None, five=None):
  1487. '''used to test argument passing over the wire'''
  1488. return "%s %s %s %s %s" % (one, two, three, four, five)
  1489. def savecommitmessage(self, text):
  1490. fp = self.opener('last-message.txt', 'wb')
  1491. try:
  1492. fp.write(text)
  1493. finally:
  1494. fp.close()
  1495. return self.pathto(fp.name[len(self.root) + 1:])
  1496. # used to avoid circular references so destructors work
  1497. def aftertrans(files):
  1498. renamefiles = [tuple(t) for t in files]
  1499. def a():
  1500. for vfs, src, dest in renamefiles:
  1501. try:
  1502. vfs.rename(src, dest)
  1503. except OSError: # journal file does not yet exist
  1504. pass
  1505. return a
  1506. def undoname(fn):
  1507. base, name = os.path.split(fn)
  1508. assert name.startswith('journal')
  1509. return os.path.join(base, name.replace('journal', 'undo', 1))
  1510. def instance(ui, path, create):
  1511. return localrepository(ui, util.urllocalpath(path), create)
  1512. def islocal(path):
  1513. return True