PageRenderTime 61ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/mercurial/localrepo.py

https://bitbucket.org/mirror/mercurial/
Python | 1781 lines | 1523 code | 113 blank | 145 comment | 112 complexity | 6f96be86fe98bfa5cd2abe2fc22a8ecf MD5 | raw file
Possible License(s): GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. # localrepo.py - read/write repository class for mercurial
  2. #
  3. # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
  4. #
  5. # This software may be used and distributed according to the terms of the
  6. # GNU General Public License version 2 or any later version.
  7. from node import hex, nullid, short
  8. from i18n import _
  9. import urllib
  10. import peer, changegroup, subrepo, pushkey, obsolete, repoview
  11. import changelog, dirstate, filelog, manifest, context, bookmarks, phases
  12. import lock as lockmod
  13. import transaction, store, encoding, exchange, bundle2
  14. import scmutil, util, extensions, hook, error, revset
  15. import match as matchmod
  16. import merge as mergemod
  17. import tags as tagsmod
  18. from lock import release
  19. import weakref, errno, os, time, inspect
  20. import branchmap, pathutil
  21. propertycache = util.propertycache
  22. filecache = scmutil.filecache
  23. class repofilecache(filecache):
  24. """All filecache usage on repo are done for logic that should be unfiltered
  25. """
  26. def __get__(self, repo, type=None):
  27. return super(repofilecache, self).__get__(repo.unfiltered(), type)
  28. def __set__(self, repo, value):
  29. return super(repofilecache, self).__set__(repo.unfiltered(), value)
  30. def __delete__(self, repo):
  31. return super(repofilecache, self).__delete__(repo.unfiltered())
  32. class storecache(repofilecache):
  33. """filecache for files in the store"""
  34. def join(self, obj, fname):
  35. return obj.sjoin(fname)
  36. class unfilteredpropertycache(propertycache):
  37. """propertycache that apply to unfiltered repo only"""
  38. def __get__(self, repo, type=None):
  39. unfi = repo.unfiltered()
  40. if unfi is repo:
  41. return super(unfilteredpropertycache, self).__get__(unfi)
  42. return getattr(unfi, self.name)
  43. class filteredpropertycache(propertycache):
  44. """propertycache that must take filtering in account"""
  45. def cachevalue(self, obj, value):
  46. object.__setattr__(obj, self.name, value)
  47. def hasunfilteredcache(repo, name):
  48. """check if a repo has an unfilteredpropertycache value for <name>"""
  49. return name in vars(repo.unfiltered())
  50. def unfilteredmethod(orig):
  51. """decorate method that always need to be run on unfiltered version"""
  52. def wrapper(repo, *args, **kwargs):
  53. return orig(repo.unfiltered(), *args, **kwargs)
  54. return wrapper
  55. moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
  56. 'unbundle'))
  57. legacycaps = moderncaps.union(set(['changegroupsubset']))
  58. class localpeer(peer.peerrepository):
  59. '''peer for a local repo; reflects only the most recent API'''
  60. def __init__(self, repo, caps=moderncaps):
  61. peer.peerrepository.__init__(self)
  62. self._repo = repo.filtered('served')
  63. self.ui = repo.ui
  64. self._caps = repo._restrictcapabilities(caps)
  65. self.requirements = repo.requirements
  66. self.supportedformats = repo.supportedformats
  67. def close(self):
  68. self._repo.close()
  69. def _capabilities(self):
  70. return self._caps
  71. def local(self):
  72. return self._repo
  73. def canpush(self):
  74. return True
  75. def url(self):
  76. return self._repo.url()
  77. def lookup(self, key):
  78. return self._repo.lookup(key)
  79. def branchmap(self):
  80. return self._repo.branchmap()
  81. def heads(self):
  82. return self._repo.heads()
  83. def known(self, nodes):
  84. return self._repo.known(nodes)
  85. def getbundle(self, source, heads=None, common=None, bundlecaps=None,
  86. format='HG10', **kwargs):
  87. cg = exchange.getbundle(self._repo, source, heads=heads,
  88. common=common, bundlecaps=bundlecaps, **kwargs)
  89. if bundlecaps is not None and 'HG2X' in bundlecaps:
  90. # When requesting a bundle2, getbundle returns a stream to make the
  91. # wire level function happier. We need to build a proper object
  92. # from it in local peer.
  93. cg = bundle2.unbundle20(self.ui, cg)
  94. return cg
  95. # TODO We might want to move the next two calls into legacypeer and add
  96. # unbundle instead.
  97. def unbundle(self, cg, heads, url):
  98. """apply a bundle on a repo
  99. This function handles the repo locking itself."""
  100. try:
  101. cg = exchange.readbundle(self.ui, cg, None)
  102. ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
  103. if util.safehasattr(ret, 'getchunks'):
  104. # This is a bundle20 object, turn it into an unbundler.
  105. # This little dance should be dropped eventually when the API
  106. # is finally improved.
  107. stream = util.chunkbuffer(ret.getchunks())
  108. ret = bundle2.unbundle20(self.ui, stream)
  109. return ret
  110. except error.PushRaced, exc:
  111. raise error.ResponseError(_('push failed:'), str(exc))
  112. def lock(self):
  113. return self._repo.lock()
  114. def addchangegroup(self, cg, source, url):
  115. return changegroup.addchangegroup(self._repo, cg, source, url)
  116. def pushkey(self, namespace, key, old, new):
  117. return self._repo.pushkey(namespace, key, old, new)
  118. def listkeys(self, namespace):
  119. return self._repo.listkeys(namespace)
  120. def debugwireargs(self, one, two, three=None, four=None, five=None):
  121. '''used to test argument passing over the wire'''
  122. return "%s %s %s %s %s" % (one, two, three, four, five)
  123. class locallegacypeer(localpeer):
  124. '''peer extension which implements legacy methods too; used for tests with
  125. restricted capabilities'''
  126. def __init__(self, repo):
  127. localpeer.__init__(self, repo, caps=legacycaps)
  128. def branches(self, nodes):
  129. return self._repo.branches(nodes)
  130. def between(self, pairs):
  131. return self._repo.between(pairs)
  132. def changegroup(self, basenodes, source):
  133. return changegroup.changegroup(self._repo, basenodes, source)
  134. def changegroupsubset(self, bases, heads, source):
  135. return changegroup.changegroupsubset(self._repo, bases, heads, source)
  136. class localrepository(object):
  137. supportedformats = set(('revlogv1', 'generaldelta'))
  138. _basesupported = supportedformats | set(('store', 'fncache', 'shared',
  139. 'dotencode'))
  140. openerreqs = set(('revlogv1', 'generaldelta'))
  141. requirements = ['revlogv1']
  142. filtername = None
  143. bundle2caps = {'HG2X': (),
  144. 'b2x:listkeys': (),
  145. 'b2x:pushkey': ()}
  146. # a list of (ui, featureset) functions.
  147. # only functions defined in module of enabled extensions are invoked
  148. featuresetupfuncs = set()
  149. def _baserequirements(self, create):
  150. return self.requirements[:]
  151. def __init__(self, baseui, path=None, create=False):
  152. self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
  153. self.wopener = self.wvfs
  154. self.root = self.wvfs.base
  155. self.path = self.wvfs.join(".hg")
  156. self.origroot = path
  157. self.auditor = pathutil.pathauditor(self.root, self._checknested)
  158. self.vfs = scmutil.vfs(self.path)
  159. self.opener = self.vfs
  160. self.baseui = baseui
  161. self.ui = baseui.copy()
  162. self.ui.copy = baseui.copy # prevent copying repo configuration
  163. # A list of callback to shape the phase if no data were found.
  164. # Callback are in the form: func(repo, roots) --> processed root.
  165. # This list it to be filled by extension during repo setup
  166. self._phasedefaults = []
  167. try:
  168. self.ui.readconfig(self.join("hgrc"), self.root)
  169. extensions.loadall(self.ui)
  170. except IOError:
  171. pass
  172. if self.featuresetupfuncs:
  173. self.supported = set(self._basesupported) # use private copy
  174. extmods = set(m.__name__ for n, m
  175. in extensions.extensions(self.ui))
  176. for setupfunc in self.featuresetupfuncs:
  177. if setupfunc.__module__ in extmods:
  178. setupfunc(self.ui, self.supported)
  179. else:
  180. self.supported = self._basesupported
  181. if not self.vfs.isdir():
  182. if create:
  183. if not self.wvfs.exists():
  184. self.wvfs.makedirs()
  185. self.vfs.makedir(notindexed=True)
  186. requirements = self._baserequirements(create)
  187. if self.ui.configbool('format', 'usestore', True):
  188. self.vfs.mkdir("store")
  189. requirements.append("store")
  190. if self.ui.configbool('format', 'usefncache', True):
  191. requirements.append("fncache")
  192. if self.ui.configbool('format', 'dotencode', True):
  193. requirements.append('dotencode')
  194. # create an invalid changelog
  195. self.vfs.append(
  196. "00changelog.i",
  197. '\0\0\0\2' # represents revlogv2
  198. ' dummy changelog to prevent using the old repo layout'
  199. )
  200. if self.ui.configbool('format', 'generaldelta', False):
  201. requirements.append("generaldelta")
  202. requirements = set(requirements)
  203. else:
  204. raise error.RepoError(_("repository %s not found") % path)
  205. elif create:
  206. raise error.RepoError(_("repository %s already exists") % path)
  207. else:
  208. try:
  209. requirements = scmutil.readrequires(self.vfs, self.supported)
  210. except IOError, inst:
  211. if inst.errno != errno.ENOENT:
  212. raise
  213. requirements = set()
  214. self.sharedpath = self.path
  215. try:
  216. vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
  217. realpath=True)
  218. s = vfs.base
  219. if not vfs.exists():
  220. raise error.RepoError(
  221. _('.hg/sharedpath points to nonexistent directory %s') % s)
  222. self.sharedpath = s
  223. except IOError, inst:
  224. if inst.errno != errno.ENOENT:
  225. raise
  226. self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
  227. self.spath = self.store.path
  228. self.svfs = self.store.vfs
  229. self.sopener = self.svfs
  230. self.sjoin = self.store.join
  231. self.vfs.createmode = self.store.createmode
  232. self._applyrequirements(requirements)
  233. if create:
  234. self._writerequirements()
  235. self._branchcaches = {}
  236. self.filterpats = {}
  237. self._datafilters = {}
  238. self._transref = self._lockref = self._wlockref = None
  239. # A cache for various files under .hg/ that tracks file changes,
  240. # (used by the filecache decorator)
  241. #
  242. # Maps a property name to its util.filecacheentry
  243. self._filecache = {}
  244. # hold sets of revision to be filtered
  245. # should be cleared when something might have changed the filter value:
  246. # - new changesets,
  247. # - phase change,
  248. # - new obsolescence marker,
  249. # - working directory parent change,
  250. # - bookmark changes
  251. self.filteredrevcache = {}
  252. def close(self):
  253. pass
  254. def _restrictcapabilities(self, caps):
  255. # bundle2 is not ready for prime time, drop it unless explicitly
  256. # required by the tests (or some brave tester)
  257. if self.ui.configbool('experimental', 'bundle2-exp', False):
  258. caps = set(caps)
  259. capsblob = bundle2.encodecaps(self.bundle2caps)
  260. caps.add('bundle2-exp=' + urllib.quote(capsblob))
  261. return caps
  262. def _applyrequirements(self, requirements):
  263. self.requirements = requirements
  264. self.sopener.options = dict((r, 1) for r in requirements
  265. if r in self.openerreqs)
  266. chunkcachesize = self.ui.configint('format', 'chunkcachesize')
  267. if chunkcachesize is not None:
  268. self.sopener.options['chunkcachesize'] = chunkcachesize
  269. def _writerequirements(self):
  270. reqfile = self.opener("requires", "w")
  271. for r in sorted(self.requirements):
  272. reqfile.write("%s\n" % r)
  273. reqfile.close()
  274. def _checknested(self, path):
  275. """Determine if path is a legal nested repository."""
  276. if not path.startswith(self.root):
  277. return False
  278. subpath = path[len(self.root) + 1:]
  279. normsubpath = util.pconvert(subpath)
  280. # XXX: Checking against the current working copy is wrong in
  281. # the sense that it can reject things like
  282. #
  283. # $ hg cat -r 10 sub/x.txt
  284. #
  285. # if sub/ is no longer a subrepository in the working copy
  286. # parent revision.
  287. #
  288. # However, it can of course also allow things that would have
  289. # been rejected before, such as the above cat command if sub/
  290. # is a subrepository now, but was a normal directory before.
  291. # The old path auditor would have rejected by mistake since it
  292. # panics when it sees sub/.hg/.
  293. #
  294. # All in all, checking against the working copy seems sensible
  295. # since we want to prevent access to nested repositories on
  296. # the filesystem *now*.
  297. ctx = self[None]
  298. parts = util.splitpath(subpath)
  299. while parts:
  300. prefix = '/'.join(parts)
  301. if prefix in ctx.substate:
  302. if prefix == normsubpath:
  303. return True
  304. else:
  305. sub = ctx.sub(prefix)
  306. return sub.checknested(subpath[len(prefix) + 1:])
  307. else:
  308. parts.pop()
  309. return False
  310. def peer(self):
  311. return localpeer(self) # not cached to avoid reference cycle
  312. def unfiltered(self):
  313. """Return unfiltered version of the repository
  314. Intended to be overwritten by filtered repo."""
  315. return self
  316. def filtered(self, name):
  317. """Return a filtered version of a repository"""
  318. # build a new class with the mixin and the current class
  319. # (possibly subclass of the repo)
  320. class proxycls(repoview.repoview, self.unfiltered().__class__):
  321. pass
  322. return proxycls(self, name)
  323. @repofilecache('bookmarks')
  324. def _bookmarks(self):
  325. return bookmarks.bmstore(self)
  326. @repofilecache('bookmarks.current')
  327. def _bookmarkcurrent(self):
  328. return bookmarks.readcurrent(self)
  329. def bookmarkheads(self, bookmark):
  330. name = bookmark.split('@', 1)[0]
  331. heads = []
  332. for mark, n in self._bookmarks.iteritems():
  333. if mark.split('@', 1)[0] == name:
  334. heads.append(n)
  335. return heads
  336. @storecache('phaseroots')
  337. def _phasecache(self):
  338. return phases.phasecache(self, self._phasedefaults)
  339. @storecache('obsstore')
  340. def obsstore(self):
  341. store = obsolete.obsstore(self.sopener)
  342. if store and not obsolete._enabled:
  343. # message is rare enough to not be translated
  344. msg = 'obsolete feature not enabled but %i markers found!\n'
  345. self.ui.warn(msg % len(list(store)))
  346. return store
  347. @storecache('00changelog.i')
  348. def changelog(self):
  349. c = changelog.changelog(self.sopener)
  350. if 'HG_PENDING' in os.environ:
  351. p = os.environ['HG_PENDING']
  352. if p.startswith(self.root):
  353. c.readpending('00changelog.i.a')
  354. return c
  355. @storecache('00manifest.i')
  356. def manifest(self):
  357. return manifest.manifest(self.sopener)
  358. @repofilecache('dirstate')
  359. def dirstate(self):
  360. warned = [0]
  361. def validate(node):
  362. try:
  363. self.changelog.rev(node)
  364. return node
  365. except error.LookupError:
  366. if not warned[0]:
  367. warned[0] = True
  368. self.ui.warn(_("warning: ignoring unknown"
  369. " working parent %s!\n") % short(node))
  370. return nullid
  371. return dirstate.dirstate(self.opener, self.ui, self.root, validate)
  372. def __getitem__(self, changeid):
  373. if changeid is None:
  374. return context.workingctx(self)
  375. return context.changectx(self, changeid)
  376. def __contains__(self, changeid):
  377. try:
  378. return bool(self.lookup(changeid))
  379. except error.RepoLookupError:
  380. return False
  381. def __nonzero__(self):
  382. return True
  383. def __len__(self):
  384. return len(self.changelog)
  385. def __iter__(self):
  386. return iter(self.changelog)
  387. def revs(self, expr, *args):
  388. '''Return a list of revisions matching the given revset'''
  389. expr = revset.formatspec(expr, *args)
  390. m = revset.match(None, expr)
  391. return m(self, revset.spanset(self))
  392. def set(self, expr, *args):
  393. '''
  394. Yield a context for each matching revision, after doing arg
  395. replacement via revset.formatspec
  396. '''
  397. for r in self.revs(expr, *args):
  398. yield self[r]
  399. def url(self):
  400. return 'file:' + self.root
  401. def hook(self, name, throw=False, **args):
  402. """Call a hook, passing this repo instance.
  403. This a convenience method to aid invoking hooks. Extensions likely
  404. won't call this unless they have registered a custom hook or are
  405. replacing code that is expected to call a hook.
  406. """
  407. return hook.hook(self.ui, self, name, throw, **args)
  408. @unfilteredmethod
  409. def _tag(self, names, node, message, local, user, date, extra={},
  410. editor=False):
  411. if isinstance(names, str):
  412. names = (names,)
  413. branches = self.branchmap()
  414. for name in names:
  415. self.hook('pretag', throw=True, node=hex(node), tag=name,
  416. local=local)
  417. if name in branches:
  418. self.ui.warn(_("warning: tag %s conflicts with existing"
  419. " branch name\n") % name)
  420. def writetags(fp, names, munge, prevtags):
  421. fp.seek(0, 2)
  422. if prevtags and prevtags[-1] != '\n':
  423. fp.write('\n')
  424. for name in names:
  425. m = munge and munge(name) or name
  426. if (self._tagscache.tagtypes and
  427. name in self._tagscache.tagtypes):
  428. old = self.tags().get(name, nullid)
  429. fp.write('%s %s\n' % (hex(old), m))
  430. fp.write('%s %s\n' % (hex(node), m))
  431. fp.close()
  432. prevtags = ''
  433. if local:
  434. try:
  435. fp = self.opener('localtags', 'r+')
  436. except IOError:
  437. fp = self.opener('localtags', 'a')
  438. else:
  439. prevtags = fp.read()
  440. # local tags are stored in the current charset
  441. writetags(fp, names, None, prevtags)
  442. for name in names:
  443. self.hook('tag', node=hex(node), tag=name, local=local)
  444. return
  445. try:
  446. fp = self.wfile('.hgtags', 'rb+')
  447. except IOError, e:
  448. if e.errno != errno.ENOENT:
  449. raise
  450. fp = self.wfile('.hgtags', 'ab')
  451. else:
  452. prevtags = fp.read()
  453. # committed tags are stored in UTF-8
  454. writetags(fp, names, encoding.fromlocal, prevtags)
  455. fp.close()
  456. self.invalidatecaches()
  457. if '.hgtags' not in self.dirstate:
  458. self[None].add(['.hgtags'])
  459. m = matchmod.exact(self.root, '', ['.hgtags'])
  460. tagnode = self.commit(message, user, date, extra=extra, match=m,
  461. editor=editor)
  462. for name in names:
  463. self.hook('tag', node=hex(node), tag=name, local=local)
  464. return tagnode
  465. def tag(self, names, node, message, local, user, date, editor=False):
  466. '''tag a revision with one or more symbolic names.
  467. names is a list of strings or, when adding a single tag, names may be a
  468. string.
  469. if local is True, the tags are stored in a per-repository file.
  470. otherwise, they are stored in the .hgtags file, and a new
  471. changeset is committed with the change.
  472. keyword arguments:
  473. local: whether to store tags in non-version-controlled file
  474. (default False)
  475. message: commit message to use if committing
  476. user: name of user to use if committing
  477. date: date tuple to use if committing'''
  478. if not local:
  479. for x in self.status()[:5]:
  480. if '.hgtags' in x:
  481. raise util.Abort(_('working copy of .hgtags is changed '
  482. '(please commit .hgtags manually)'))
  483. self.tags() # instantiate the cache
  484. self._tag(names, node, message, local, user, date, editor=editor)
  485. @filteredpropertycache
  486. def _tagscache(self):
  487. '''Returns a tagscache object that contains various tags related
  488. caches.'''
  489. # This simplifies its cache management by having one decorated
  490. # function (this one) and the rest simply fetch things from it.
  491. class tagscache(object):
  492. def __init__(self):
  493. # These two define the set of tags for this repository. tags
  494. # maps tag name to node; tagtypes maps tag name to 'global' or
  495. # 'local'. (Global tags are defined by .hgtags across all
  496. # heads, and local tags are defined in .hg/localtags.)
  497. # They constitute the in-memory cache of tags.
  498. self.tags = self.tagtypes = None
  499. self.nodetagscache = self.tagslist = None
  500. cache = tagscache()
  501. cache.tags, cache.tagtypes = self._findtags()
  502. return cache
  503. def tags(self):
  504. '''return a mapping of tag to node'''
  505. t = {}
  506. if self.changelog.filteredrevs:
  507. tags, tt = self._findtags()
  508. else:
  509. tags = self._tagscache.tags
  510. for k, v in tags.iteritems():
  511. try:
  512. # ignore tags to unknown nodes
  513. self.changelog.rev(v)
  514. t[k] = v
  515. except (error.LookupError, ValueError):
  516. pass
  517. return t
  518. def _findtags(self):
  519. '''Do the hard work of finding tags. Return a pair of dicts
  520. (tags, tagtypes) where tags maps tag name to node, and tagtypes
  521. maps tag name to a string like \'global\' or \'local\'.
  522. Subclasses or extensions are free to add their own tags, but
  523. should be aware that the returned dicts will be retained for the
  524. duration of the localrepo object.'''
  525. # XXX what tagtype should subclasses/extensions use? Currently
  526. # mq and bookmarks add tags, but do not set the tagtype at all.
  527. # Should each extension invent its own tag type? Should there
  528. # be one tagtype for all such "virtual" tags? Or is the status
  529. # quo fine?
  530. alltags = {} # map tag name to (node, hist)
  531. tagtypes = {}
  532. tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
  533. tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
  534. # Build the return dicts. Have to re-encode tag names because
  535. # the tags module always uses UTF-8 (in order not to lose info
  536. # writing to the cache), but the rest of Mercurial wants them in
  537. # local encoding.
  538. tags = {}
  539. for (name, (node, hist)) in alltags.iteritems():
  540. if node != nullid:
  541. tags[encoding.tolocal(name)] = node
  542. tags['tip'] = self.changelog.tip()
  543. tagtypes = dict([(encoding.tolocal(name), value)
  544. for (name, value) in tagtypes.iteritems()])
  545. return (tags, tagtypes)
  546. def tagtype(self, tagname):
  547. '''
  548. return the type of the given tag. result can be:
  549. 'local' : a local tag
  550. 'global' : a global tag
  551. None : tag does not exist
  552. '''
  553. return self._tagscache.tagtypes.get(tagname)
  554. def tagslist(self):
  555. '''return a list of tags ordered by revision'''
  556. if not self._tagscache.tagslist:
  557. l = []
  558. for t, n in self.tags().iteritems():
  559. r = self.changelog.rev(n)
  560. l.append((r, t, n))
  561. self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
  562. return self._tagscache.tagslist
  563. def nodetags(self, node):
  564. '''return the tags associated with a node'''
  565. if not self._tagscache.nodetagscache:
  566. nodetagscache = {}
  567. for t, n in self._tagscache.tags.iteritems():
  568. nodetagscache.setdefault(n, []).append(t)
  569. for tags in nodetagscache.itervalues():
  570. tags.sort()
  571. self._tagscache.nodetagscache = nodetagscache
  572. return self._tagscache.nodetagscache.get(node, [])
  573. def nodebookmarks(self, node):
  574. marks = []
  575. for bookmark, n in self._bookmarks.iteritems():
  576. if n == node:
  577. marks.append(bookmark)
  578. return sorted(marks)
  579. def branchmap(self):
  580. '''returns a dictionary {branch: [branchheads]} with branchheads
  581. ordered by increasing revision number'''
  582. branchmap.updatecache(self)
  583. return self._branchcaches[self.filtername]
  584. def branchtip(self, branch):
  585. '''return the tip node for a given branch'''
  586. try:
  587. return self.branchmap().branchtip(branch)
  588. except KeyError:
  589. raise error.RepoLookupError(_("unknown branch '%s'") % branch)
  590. def lookup(self, key):
  591. return self[key].node()
  592. def lookupbranch(self, key, remote=None):
  593. repo = remote or self
  594. if key in repo.branchmap():
  595. return key
  596. repo = (remote and remote.local()) and remote or self
  597. return repo[key].branch()
  598. def known(self, nodes):
  599. nm = self.changelog.nodemap
  600. pc = self._phasecache
  601. result = []
  602. for n in nodes:
  603. r = nm.get(n)
  604. resp = not (r is None or pc.phase(self, r) >= phases.secret)
  605. result.append(resp)
  606. return result
  607. def local(self):
  608. return self
  609. def cancopy(self):
  610. # so statichttprepo's override of local() works
  611. if not self.local():
  612. return False
  613. if not self.ui.configbool('phases', 'publish', True):
  614. return True
  615. # if publishing we can't copy if there is filtered content
  616. return not self.filtered('visible').changelog.filteredrevs
  617. def join(self, f):
  618. return os.path.join(self.path, f)
  619. def wjoin(self, f):
  620. return os.path.join(self.root, f)
  621. def file(self, f):
  622. if f[0] == '/':
  623. f = f[1:]
  624. return filelog.filelog(self.sopener, f)
  625. def changectx(self, changeid):
  626. return self[changeid]
  627. def parents(self, changeid=None):
  628. '''get list of changectxs for parents of changeid'''
  629. return self[changeid].parents()
  630. def setparents(self, p1, p2=nullid):
  631. copies = self.dirstate.setparents(p1, p2)
  632. pctx = self[p1]
  633. if copies:
  634. # Adjust copy records, the dirstate cannot do it, it
  635. # requires access to parents manifests. Preserve them
  636. # only for entries added to first parent.
  637. for f in copies:
  638. if f not in pctx and copies[f] in pctx:
  639. self.dirstate.copy(copies[f], f)
  640. if p2 == nullid:
  641. for f, s in sorted(self.dirstate.copies().items()):
  642. if f not in pctx and s not in pctx:
  643. self.dirstate.copy(None, f)
  644. def filectx(self, path, changeid=None, fileid=None):
  645. """changeid can be a changeset revision, node, or tag.
  646. fileid can be a file revision or node."""
  647. return context.filectx(self, path, changeid, fileid)
  648. def getcwd(self):
  649. return self.dirstate.getcwd()
  650. def pathto(self, f, cwd=None):
  651. return self.dirstate.pathto(f, cwd)
  652. def wfile(self, f, mode='r'):
  653. return self.wopener(f, mode)
  654. def _link(self, f):
  655. return self.wvfs.islink(f)
  656. def _loadfilter(self, filter):
  657. if filter not in self.filterpats:
  658. l = []
  659. for pat, cmd in self.ui.configitems(filter):
  660. if cmd == '!':
  661. continue
  662. mf = matchmod.match(self.root, '', [pat])
  663. fn = None
  664. params = cmd
  665. for name, filterfn in self._datafilters.iteritems():
  666. if cmd.startswith(name):
  667. fn = filterfn
  668. params = cmd[len(name):].lstrip()
  669. break
  670. if not fn:
  671. fn = lambda s, c, **kwargs: util.filter(s, c)
  672. # Wrap old filters not supporting keyword arguments
  673. if not inspect.getargspec(fn)[2]:
  674. oldfn = fn
  675. fn = lambda s, c, **kwargs: oldfn(s, c)
  676. l.append((mf, fn, params))
  677. self.filterpats[filter] = l
  678. return self.filterpats[filter]
  679. def _filter(self, filterpats, filename, data):
  680. for mf, fn, cmd in filterpats:
  681. if mf(filename):
  682. self.ui.debug("filtering %s through %s\n" % (filename, cmd))
  683. data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
  684. break
  685. return data
  686. @unfilteredpropertycache
  687. def _encodefilterpats(self):
  688. return self._loadfilter('encode')
  689. @unfilteredpropertycache
  690. def _decodefilterpats(self):
  691. return self._loadfilter('decode')
  692. def adddatafilter(self, name, filter):
  693. self._datafilters[name] = filter
  694. def wread(self, filename):
  695. if self._link(filename):
  696. data = self.wvfs.readlink(filename)
  697. else:
  698. data = self.wopener.read(filename)
  699. return self._filter(self._encodefilterpats, filename, data)
  700. def wwrite(self, filename, data, flags):
  701. data = self._filter(self._decodefilterpats, filename, data)
  702. if 'l' in flags:
  703. self.wopener.symlink(data, filename)
  704. else:
  705. self.wopener.write(filename, data)
  706. if 'x' in flags:
  707. self.wvfs.setflags(filename, False, True)
  708. def wwritedata(self, filename, data):
  709. return self._filter(self._decodefilterpats, filename, data)
  710. def transaction(self, desc, report=None):
  711. tr = self._transref and self._transref() or None
  712. if tr and tr.running():
  713. return tr.nest()
  714. # abort here if the journal already exists
  715. if self.svfs.exists("journal"):
  716. raise error.RepoError(
  717. _("abandoned transaction found"),
  718. hint=_("run 'hg recover' to clean up transaction"))
  719. def onclose():
  720. self.store.write(self._transref())
  721. self._writejournal(desc)
  722. renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
  723. rp = report and report or self.ui.warn
  724. tr = transaction.transaction(rp, self.sopener,
  725. "journal",
  726. aftertrans(renames),
  727. self.store.createmode,
  728. onclose)
  729. self._transref = weakref.ref(tr)
  730. return tr
  731. def _journalfiles(self):
  732. return ((self.svfs, 'journal'),
  733. (self.vfs, 'journal.dirstate'),
  734. (self.vfs, 'journal.branch'),
  735. (self.vfs, 'journal.desc'),
  736. (self.vfs, 'journal.bookmarks'),
  737. (self.svfs, 'journal.phaseroots'))
  738. def undofiles(self):
  739. return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
  740. def _writejournal(self, desc):
  741. self.opener.write("journal.dirstate",
  742. self.opener.tryread("dirstate"))
  743. self.opener.write("journal.branch",
  744. encoding.fromlocal(self.dirstate.branch()))
  745. self.opener.write("journal.desc",
  746. "%d\n%s\n" % (len(self), desc))
  747. self.opener.write("journal.bookmarks",
  748. self.opener.tryread("bookmarks"))
  749. self.sopener.write("journal.phaseroots",
  750. self.sopener.tryread("phaseroots"))
  751. def recover(self):
  752. lock = self.lock()
  753. try:
  754. if self.svfs.exists("journal"):
  755. self.ui.status(_("rolling back interrupted transaction\n"))
  756. transaction.rollback(self.sopener, "journal",
  757. self.ui.warn)
  758. self.invalidate()
  759. return True
  760. else:
  761. self.ui.warn(_("no interrupted transaction available\n"))
  762. return False
  763. finally:
  764. lock.release()
  765. def rollback(self, dryrun=False, force=False):
  766. wlock = lock = None
  767. try:
  768. wlock = self.wlock()
  769. lock = self.lock()
  770. if self.svfs.exists("undo"):
  771. return self._rollback(dryrun, force)
  772. else:
  773. self.ui.warn(_("no rollback information available\n"))
  774. return 1
  775. finally:
  776. release(lock, wlock)
  777. @unfilteredmethod # Until we get smarter cache management
  778. def _rollback(self, dryrun, force):
  779. ui = self.ui
  780. try:
  781. args = self.opener.read('undo.desc').splitlines()
  782. (oldlen, desc, detail) = (int(args[0]), args[1], None)
  783. if len(args) >= 3:
  784. detail = args[2]
  785. oldtip = oldlen - 1
  786. if detail and ui.verbose:
  787. msg = (_('repository tip rolled back to revision %s'
  788. ' (undo %s: %s)\n')
  789. % (oldtip, desc, detail))
  790. else:
  791. msg = (_('repository tip rolled back to revision %s'
  792. ' (undo %s)\n')
  793. % (oldtip, desc))
  794. except IOError:
  795. msg = _('rolling back unknown transaction\n')
  796. desc = None
  797. if not force and self['.'] != self['tip'] and desc == 'commit':
  798. raise util.Abort(
  799. _('rollback of last commit while not checked out '
  800. 'may lose data'), hint=_('use -f to force'))
  801. ui.status(msg)
  802. if dryrun:
  803. return 0
  804. parents = self.dirstate.parents()
  805. self.destroying()
  806. transaction.rollback(self.sopener, 'undo', ui.warn)
  807. if self.vfs.exists('undo.bookmarks'):
  808. self.vfs.rename('undo.bookmarks', 'bookmarks')
  809. if self.svfs.exists('undo.phaseroots'):
  810. self.svfs.rename('undo.phaseroots', 'phaseroots')
  811. self.invalidate()
  812. parentgone = (parents[0] not in self.changelog.nodemap or
  813. parents[1] not in self.changelog.nodemap)
  814. if parentgone:
  815. self.vfs.rename('undo.dirstate', 'dirstate')
  816. try:
  817. branch = self.opener.read('undo.branch')
  818. self.dirstate.setbranch(encoding.tolocal(branch))
  819. except IOError:
  820. ui.warn(_('named branch could not be reset: '
  821. 'current branch is still \'%s\'\n')
  822. % self.dirstate.branch())
  823. self.dirstate.invalidate()
  824. parents = tuple([p.rev() for p in self.parents()])
  825. if len(parents) > 1:
  826. ui.status(_('working directory now based on '
  827. 'revisions %d and %d\n') % parents)
  828. else:
  829. ui.status(_('working directory now based on '
  830. 'revision %d\n') % parents)
  831. # TODO: if we know which new heads may result from this rollback, pass
  832. # them to destroy(), which will prevent the branchhead cache from being
  833. # invalidated.
  834. self.destroyed()
  835. return 0
  836. def invalidatecaches(self):
  837. if '_tagscache' in vars(self):
  838. # can't use delattr on proxy
  839. del self.__dict__['_tagscache']
  840. self.unfiltered()._branchcaches.clear()
  841. self.invalidatevolatilesets()
  842. def invalidatevolatilesets(self):
  843. self.filteredrevcache.clear()
  844. obsolete.clearobscaches(self)
  845. def invalidatedirstate(self):
  846. '''Invalidates the dirstate, causing the next call to dirstate
  847. to check if it was modified since the last time it was read,
  848. rereading it if it has.
  849. This is different to dirstate.invalidate() that it doesn't always
  850. rereads the dirstate. Use dirstate.invalidate() if you want to
  851. explicitly read the dirstate again (i.e. restoring it to a previous
  852. known good state).'''
  853. if hasunfilteredcache(self, 'dirstate'):
  854. for k in self.dirstate._filecache:
  855. try:
  856. delattr(self.dirstate, k)
  857. except AttributeError:
  858. pass
  859. delattr(self.unfiltered(), 'dirstate')
  860. def invalidate(self):
  861. unfiltered = self.unfiltered() # all file caches are stored unfiltered
  862. for k in self._filecache:
  863. # dirstate is invalidated separately in invalidatedirstate()
  864. if k == 'dirstate':
  865. continue
  866. try:
  867. delattr(unfiltered, k)
  868. except AttributeError:
  869. pass
  870. self.invalidatecaches()
  871. self.store.invalidatecaches()
  872. def invalidateall(self):
  873. '''Fully invalidates both store and non-store parts, causing the
  874. subsequent operation to reread any outside changes.'''
  875. # extension should hook this to invalidate its caches
  876. self.invalidate()
  877. self.invalidatedirstate()
  878. def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
  879. try:
  880. l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
  881. except error.LockHeld, inst:
  882. if not wait:
  883. raise
  884. self.ui.warn(_("waiting for lock on %s held by %r\n") %
  885. (desc, inst.locker))
  886. # default to 600 seconds timeout
  887. l = lockmod.lock(vfs, lockname,
  888. int(self.ui.config("ui", "timeout", "600")),
  889. releasefn, desc=desc)
  890. self.ui.warn(_("got lock after %s seconds\n") % l.delay)
  891. if acquirefn:
  892. acquirefn()
  893. return l
  894. def _afterlock(self, callback):
  895. """add a callback to the current repository lock.
  896. The callback will be executed on lock release."""
  897. l = self._lockref and self._lockref()
  898. if l:
  899. l.postrelease.append(callback)
  900. else:
  901. callback()
  902. def lock(self, wait=True):
  903. '''Lock the repository store (.hg/store) and return a weak reference
  904. to the lock. Use this before modifying the store (e.g. committing or
  905. stripping). If you are opening a transaction, get a lock as well.)'''
  906. l = self._lockref and self._lockref()
  907. if l is not None and l.held:
  908. l.lock()
  909. return l
  910. def unlock():
  911. if hasunfilteredcache(self, '_phasecache'):
  912. self._phasecache.write()
  913. for k, ce in self._filecache.items():
  914. if k == 'dirstate' or k not in self.__dict__:
  915. continue
  916. ce.refresh()
  917. l = self._lock(self.svfs, "lock", wait, unlock,
  918. self.invalidate, _('repository %s') % self.origroot)
  919. self._lockref = weakref.ref(l)
  920. return l
  921. def wlock(self, wait=True):
  922. '''Lock the non-store parts of the repository (everything under
  923. .hg except .hg/store) and return a weak reference to the lock.
  924. Use this before modifying files in .hg.'''
  925. l = self._wlockref and self._wlockref()
  926. if l is not None and l.held:
  927. l.lock()
  928. return l
  929. def unlock():
  930. self.dirstate.write()
  931. self._filecache['dirstate'].refresh()
  932. l = self._lock(self.vfs, "wlock", wait, unlock,
  933. self.invalidatedirstate, _('working directory of %s') %
  934. self.origroot)
  935. self._wlockref = weakref.ref(l)
  936. return l
  937. def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
  938. """
  939. commit an individual file as part of a larger transaction
  940. """
  941. fname = fctx.path()
  942. text = fctx.data()
  943. flog = self.file(fname)
  944. fparent1 = manifest1.get(fname, nullid)
  945. fparent2 = fparent2o = manifest2.get(fname, nullid)
  946. meta = {}
  947. copy = fctx.renamed()
  948. if copy and copy[0] != fname:
  949. # Mark the new revision of this file as a copy of another
  950. # file. This copy data will effectively act as a parent
  951. # of this new revision. If this is a merge, the first
  952. # parent will be the nullid (meaning "look up the copy data")
  953. # and the second one will be the other parent. For example:
  954. #
  955. # 0 --- 1 --- 3 rev1 changes file foo
  956. # \ / rev2 renames foo to bar and changes it
  957. # \- 2 -/ rev3 should have bar with all changes and
  958. # should record that bar descends from
  959. # bar in rev2 and foo in rev1
  960. #
  961. # this allows this merge to succeed:
  962. #
  963. # 0 --- 1 --- 3 rev4 reverts the content change from rev2
  964. # \ / merging rev3 and rev4 should use bar@rev2
  965. # \- 2 --- 4 as the merge base
  966. #
  967. cfname = copy[0]
  968. crev = manifest1.get(cfname)
  969. newfparent = fparent2
  970. if manifest2: # branch merge
  971. if fparent2 == nullid or crev is None: # copied on remote side
  972. if cfname in manifest2:
  973. crev = manifest2[cfname]
  974. newfparent = fparent1
  975. # find source in nearest ancestor if we've lost track
  976. if not crev:
  977. self.ui.debug(" %s: searching for copy revision for %s\n" %
  978. (fname, cfname))
  979. for ancestor in self[None].ancestors():
  980. if cfname in ancestor:
  981. crev = ancestor[cfname].filenode()
  982. break
  983. if crev:
  984. self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
  985. meta["copy"] = cfname
  986. meta["copyrev"] = hex(crev)
  987. fparent1, fparent2 = nullid, newfparent
  988. else:
  989. self.ui.warn(_("warning: can't find ancestor for '%s' "
  990. "copied from '%s'!\n") % (fname, cfname))
  991. elif fparent1 == nullid:
  992. fparent1, fparent2 = fparent2, nullid
  993. elif fparent2 != nullid:
  994. # is one parent an ancestor of the other?
  995. fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
  996. if fparent1 in fparentancestors:
  997. fparent1, fparent2 = fparent2, nullid
  998. elif fparent2 in fparentancestors:
  999. fparent2 = nullid
  1000. # is the file changed?
  1001. if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
  1002. changelist.append(fname)
  1003. return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
  1004. # are just the flags changed during merge?
  1005. if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
  1006. changelist.append(fname)
  1007. return fparent1
  1008. @unfilteredmethod
  1009. def commit(self, text="", user=None, date=None, match=None, force=False,
  1010. editor=False, extra={}):
  1011. """Add a new revision to current repository.
  1012. Revision information is gathered from the working directory,
  1013. match can be used to filter the committed files. If editor is
  1014. supplied, it is called to get a commit message.
  1015. """
  1016. def fail(f, msg):
  1017. raise util.Abort('%s: %s' % (f, msg))
  1018. if not match:
  1019. match = matchmod.always(self.root, '')
  1020. if not force:
  1021. vdirs = []
  1022. match.explicitdir = vdirs.append
  1023. match.bad = fail
  1024. wlock = self.wlock()
  1025. try:
  1026. wctx = self[None]
  1027. merge = len(wctx.parents()) > 1
  1028. if (not force and merge and match and
  1029. (match.files() or match.anypats())):
  1030. raise util.Abort(_('cannot partially commit a merge '
  1031. '(do not specify files or patterns)'))
  1032. changes = self.status(match=match, clean=force)
  1033. if force:
  1034. changes[0].extend(changes[6]) # mq may commit unchanged files
  1035. # check subrepos
  1036. subs = []
  1037. commitsubs = set()
  1038. newstate = wctx.substate.copy()
  1039. # only manage subrepos and .hgsubstate if .hgsub is present
  1040. if '.hgsub' in wctx:
  1041. # we'll decide whether to track this ourselves, thanks
  1042. for c in changes[:3]:
  1043. if '.hgsubstate' in c:
  1044. c.remove('.hgsubstate')
  1045. # compare current state to last committed state
  1046. # build new substate based on last committed state
  1047. oldstate = wctx.p1().substate
  1048. for s in sorted(newstate.keys()):
  1049. if not match(s):
  1050. # ignore working copy, use old state if present
  1051. if s in oldstate:
  1052. newstate[s] = oldstate[s]
  1053. continue
  1054. if not force:
  1055. raise util.Abort(
  1056. _("commit with new subrepo %s excluded") % s)
  1057. if wctx.sub(s).dirty(True):
  1058. if not self.ui.configbool('ui', 'commitsubrepos'):
  1059. raise util.Abort(
  1060. _("uncommitted changes in subrepo %s") % s,
  1061. hint=_("use --subrepos for recursive commit"))
  1062. subs.append(s)
  1063. commitsubs.add(s)
  1064. else:
  1065. bs = wctx.sub(s).basestate()
  1066. newstate[s] = (newstate[s][0], bs, newstate[s][2])
  1067. if oldstate.get(s, (None, None, None))[1] != bs:
  1068. subs.append(s)
  1069. # check for removed subrepos
  1070. for p in wctx.parents():
  1071. r = [s for s in p.substate if s not in newstate]
  1072. subs += [s for s in r if match(s)]
  1073. if subs:
  1074. if (not match('.hgsub') and
  1075. '.hgsub' in (wctx.modified() + wctx.added())):
  1076. raise util.Abort(
  1077. _("can't commit subrepos without .hgsub"))
  1078. changes[0].insert(0, '.hgsubstate')
  1079. elif '.hgsub' in changes[2]:
  1080. # clean up .hgsubstate when .hgsub is removed
  1081. if ('.hgsubstate' in wctx and
  1082. '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
  1083. changes[2].insert(0, '.hgsubstate')
  1084. # make sure all explicit patterns are matched
  1085. if not force and match.files():
  1086. matched = set(changes[0] + changes[1] + changes[2])
  1087. for f in match.files():
  1088. f = self.dirstate.normalize(f)
  1089. if f == '.' or f in matched or f in wctx.substate:
  1090. continue
  1091. if f in changes[3]: # missing
  1092. fail(f, _('file not found!'))
  1093. if f in vdirs: # visited directory
  1094. d = f + '/'
  1095. for mf in matched:
  1096. if mf.startswith(d):
  1097. break
  1098. else:
  1099. fail(f, _("no match under directory!"))
  1100. elif f not in self.dirstate:
  1101. fail(f, _("file not tracked!"))
  1102. cctx = context.workingctx(self, text, user, date, extra, changes)
  1103. if (not force and not extra.get("close") and not merge
  1104. and not cctx.files()
  1105. and wctx.branch() == wctx.p1().branch()):
  1106. return None
  1107. if merge and cctx.deleted():
  1108. raise util.Abort(_("cannot commit merge with missing files"))
  1109. ms = mergemod.mergestate(self)
  1110. for f in changes[0]:
  1111. if f in ms and ms[f] == 'u':
  1112. raise util.Abort(_("unresolved merge conflicts "
  1113. "(see hg help resolve)"))
  1114. if editor:
  1115. cctx._text = editor(self, cctx, subs)
  1116. edited = (text != cctx._text)
  1117. # Save commit message in case this transaction gets rolled back
  1118. # (e.g. by a pretxncommit hook). Leave the content alone on
  1119. # the assumption that the user will use the same editor again.
  1120. msgfn = self.savecommitmessage(cctx._text)
  1121. # commit subs and write new state
  1122. if subs:
  1123. for s in sorted(commitsubs):
  1124. sub = wctx.sub(s)
  1125. self.ui.status(_('committing subrepository %s\n') %
  1126. subrepo.subrelpath(sub))
  1127. sr = sub.commit(cctx._text, user, date)
  1128. newstate[s] = (newstate[s

Large files files are truncated, but you can click here to view the full file