PageRenderTime 68ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 1ms

/mercurial/obsolete.py

https://bitbucket.org/mirror/mercurial/
Python | 869 lines | 857 code | 1 blank | 11 comment | 1 complexity | b0df2ceec621a4655f8e3bce1e7c6062 MD5 | raw file
Possible License(s): GPL-2.0
  1. # obsolete.py - obsolete markers handling
  2. #
  3. # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
  4. # Logilab SA <contact@logilab.fr>
  5. #
  6. # This software may be used and distributed according to the terms of the
  7. # GNU General Public License version 2 or any later version.
  8. """Obsolete marker handling
  9. An obsolete marker maps an old changeset to a list of new
  10. changesets. If the list of new changesets is empty, the old changeset
  11. is said to be "killed". Otherwise, the old changeset is being
  12. "replaced" by the new changesets.
  13. Obsolete markers can be used to record and distribute changeset graph
  14. transformations performed by history rewrite operations, and help
  15. building new tools to reconcile conflicting rewrite actions. To
  16. facilitate conflict resolution, markers include various annotations
  17. besides old and news changeset identifiers, such as creation date or
  18. author name.
  19. The old obsoleted changeset is called a "precursor" and possible
  20. replacements are called "successors". Markers that used changeset X as
  21. a precursor are called "successor markers of X" because they hold
  22. information about the successors of X. Markers that use changeset Y as
  23. a successors are call "precursor markers of Y" because they hold
  24. information about the precursors of Y.
  25. Examples:
  26. - When changeset A is replaced by changeset A', one marker is stored:
  27. (A, (A',))
  28. - When changesets A and B are folded into a new changeset C, two markers are
  29. stored:
  30. (A, (C,)) and (B, (C,))
  31. - When changeset A is simply "pruned" from the graph, a marker is created:
  32. (A, ())
  33. - When changeset A is split into B and C, a single marker are used:
  34. (A, (C, C))
  35. We use a single marker to distinguish the "split" case from the "divergence"
  36. case. If two independent operations rewrite the same changeset A in to A' and
  37. A'', we have an error case: divergent rewriting. We can detect it because
  38. two markers will be created independently:
  39. (A, (B,)) and (A, (C,))
  40. Format
  41. ------
  42. Markers are stored in an append-only file stored in
  43. '.hg/store/obsstore'.
  44. The file starts with a version header:
  45. - 1 unsigned byte: version number, starting at zero.
  46. The header is followed by the markers. Each marker is made of:
  47. - 1 unsigned byte: number of new changesets "N", can be zero.
  48. - 1 unsigned 32-bits integer: metadata size "M" in bytes.
  49. - 1 byte: a bit field. It is reserved for flags used in common
  50. obsolete marker operations, to avoid repeated decoding of metadata
  51. entries.
  52. - 20 bytes: obsoleted changeset identifier.
  53. - N*20 bytes: new changesets identifiers.
  54. - M bytes: metadata as a sequence of nul-terminated strings. Each
  55. string contains a key and a value, separated by a colon ':', without
  56. additional encoding. Keys cannot contain '\0' or ':' and values
  57. cannot contain '\0'.
  58. """
  59. import struct
  60. import util, base85, node
  61. import phases
  62. from i18n import _
  63. _pack = struct.pack
  64. _unpack = struct.unpack
  65. _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
  66. # the obsolete feature is not mature enough to be enabled by default.
  67. # you have to rely on third party extension extension to enable this.
  68. _enabled = False
  69. # data used for parsing and writing
  70. _fmversion = 0
  71. _fmfixed = '>BIB20s'
  72. _fmnode = '20s'
  73. _fmfsize = struct.calcsize(_fmfixed)
  74. _fnodesize = struct.calcsize(_fmnode)
  75. ### obsolescence marker flag
  76. ## bumpedfix flag
  77. #
  78. # When a changeset A' succeed to a changeset A which became public, we call A'
  79. # "bumped" because it's a successors of a public changesets
  80. #
  81. # o A' (bumped)
  82. # |`:
  83. # | o A
  84. # |/
  85. # o Z
  86. #
  87. # The way to solve this situation is to create a new changeset Ad as children
  88. # of A. This changeset have the same content than A'. So the diff from A to A'
  89. # is the same than the diff from A to Ad. Ad is marked as a successors of A'
  90. #
  91. # o Ad
  92. # |`:
  93. # | x A'
  94. # |'|
  95. # o | A
  96. # |/
  97. # o Z
  98. #
  99. # But by transitivity Ad is also a successors of A. To avoid having Ad marked
  100. # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
  101. # This flag mean that the successors express the changes between the public and
  102. # bumped version and fix the situation, breaking the transitivity of
  103. # "bumped" here.
  104. bumpedfix = 1
  105. def _readmarkers(data):
  106. """Read and enumerate markers from raw data"""
  107. off = 0
  108. diskversion = _unpack('>B', data[off:off + 1])[0]
  109. off += 1
  110. if diskversion != _fmversion:
  111. raise util.Abort(_('parsing obsolete marker: unknown version %r')
  112. % diskversion)
  113. # Loop on markers
  114. l = len(data)
  115. while off + _fmfsize <= l:
  116. # read fixed part
  117. cur = data[off:off + _fmfsize]
  118. off += _fmfsize
  119. nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
  120. # read replacement
  121. sucs = ()
  122. if nbsuc:
  123. s = (_fnodesize * nbsuc)
  124. cur = data[off:off + s]
  125. sucs = _unpack(_fmnode * nbsuc, cur)
  126. off += s
  127. # read metadata
  128. # (metadata will be decoded on demand)
  129. metadata = data[off:off + mdsize]
  130. if len(metadata) != mdsize:
  131. raise util.Abort(_('parsing obsolete marker: metadata is too '
  132. 'short, %d bytes expected, got %d')
  133. % (mdsize, len(metadata)))
  134. off += mdsize
  135. yield (pre, sucs, flags, metadata)
  136. def encodemeta(meta):
  137. """Return encoded metadata string to string mapping.
  138. Assume no ':' in key and no '\0' in both key and value."""
  139. for key, value in meta.iteritems():
  140. if ':' in key or '\0' in key:
  141. raise ValueError("':' and '\0' are forbidden in metadata key'")
  142. if '\0' in value:
  143. raise ValueError("':' is forbidden in metadata value'")
  144. return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
  145. def decodemeta(data):
  146. """Return string to string dictionary from encoded version."""
  147. d = {}
  148. for l in data.split('\0'):
  149. if l:
  150. key, value = l.split(':')
  151. d[key] = value
  152. return d
  153. class marker(object):
  154. """Wrap obsolete marker raw data"""
  155. def __init__(self, repo, data):
  156. # the repo argument will be used to create changectx in later version
  157. self._repo = repo
  158. self._data = data
  159. self._decodedmeta = None
  160. def __hash__(self):
  161. return hash(self._data)
  162. def __eq__(self, other):
  163. if type(other) != type(self):
  164. return False
  165. return self._data == other._data
  166. def precnode(self):
  167. """Precursor changeset node identifier"""
  168. return self._data[0]
  169. def succnodes(self):
  170. """List of successor changesets node identifiers"""
  171. return self._data[1]
  172. def metadata(self):
  173. """Decoded metadata dictionary"""
  174. if self._decodedmeta is None:
  175. self._decodedmeta = decodemeta(self._data[3])
  176. return self._decodedmeta
  177. def date(self):
  178. """Creation date as (unixtime, offset)"""
  179. parts = self.metadata()['date'].split(' ')
  180. return (float(parts[0]), int(parts[1]))
  181. class obsstore(object):
  182. """Store obsolete markers
  183. Markers can be accessed with two mappings:
  184. - precursors[x] -> set(markers on precursors edges of x)
  185. - successors[x] -> set(markers on successors edges of x)
  186. """
  187. def __init__(self, sopener):
  188. # caches for various obsolescence related cache
  189. self.caches = {}
  190. self._all = []
  191. # new markers to serialize
  192. self.precursors = {}
  193. self.successors = {}
  194. self.sopener = sopener
  195. data = sopener.tryread('obsstore')
  196. if data:
  197. self._load(_readmarkers(data))
  198. def __iter__(self):
  199. return iter(self._all)
  200. def __len__(self):
  201. return len(self._all)
  202. def __nonzero__(self):
  203. return bool(self._all)
  204. def create(self, transaction, prec, succs=(), flag=0, metadata=None):
  205. """obsolete: add a new obsolete marker
  206. * ensuring it is hashable
  207. * check mandatory metadata
  208. * encode metadata
  209. If you are a human writing code creating marker you want to use the
  210. `createmarkers` function in this module instead.
  211. return True if a new marker have been added, False if the markers
  212. already existed (no op).
  213. """
  214. if metadata is None:
  215. metadata = {}
  216. if 'date' not in metadata:
  217. metadata['date'] = "%d %d" % util.makedate()
  218. if len(prec) != 20:
  219. raise ValueError(prec)
  220. for succ in succs:
  221. if len(succ) != 20:
  222. raise ValueError(succ)
  223. marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
  224. return bool(self.add(transaction, [marker]))
  225. def add(self, transaction, markers):
  226. """Add new markers to the store
  227. Take care of filtering duplicate.
  228. Return the number of new marker."""
  229. if not _enabled:
  230. raise util.Abort('obsolete feature is not enabled on this repo')
  231. known = set(self._all)
  232. new = []
  233. for m in markers:
  234. if m not in known:
  235. known.add(m)
  236. new.append(m)
  237. if new:
  238. f = self.sopener('obsstore', 'ab')
  239. try:
  240. # Whether the file's current position is at the begin or at
  241. # the end after opening a file for appending is implementation
  242. # defined. So we must seek to the end before calling tell(),
  243. # or we may get a zero offset for non-zero sized files on
  244. # some platforms (issue3543).
  245. f.seek(0, _SEEK_END)
  246. offset = f.tell()
  247. transaction.add('obsstore', offset)
  248. # offset == 0: new file - add the version header
  249. for bytes in _encodemarkers(new, offset == 0):
  250. f.write(bytes)
  251. finally:
  252. # XXX: f.close() == filecache invalidation == obsstore rebuilt.
  253. # call 'filecacheentry.refresh()' here
  254. f.close()
  255. self._load(new)
  256. # new marker *may* have changed several set. invalidate the cache.
  257. self.caches.clear()
  258. return len(new)
  259. def mergemarkers(self, transaction, data):
  260. markers = _readmarkers(data)
  261. self.add(transaction, markers)
  262. def _load(self, markers):
  263. for mark in markers:
  264. self._all.append(mark)
  265. pre, sucs = mark[:2]
  266. self.successors.setdefault(pre, set()).add(mark)
  267. for suc in sucs:
  268. self.precursors.setdefault(suc, set()).add(mark)
  269. if node.nullid in self.precursors:
  270. raise util.Abort(_('bad obsolescence marker detected: '
  271. 'invalid successors nullid'))
  272. def _encodemarkers(markers, addheader=False):
  273. # Kept separate from flushmarkers(), it will be reused for
  274. # markers exchange.
  275. if addheader:
  276. yield _pack('>B', _fmversion)
  277. for marker in markers:
  278. yield _encodeonemarker(marker)
  279. def _encodeonemarker(marker):
  280. pre, sucs, flags, metadata = marker
  281. nbsuc = len(sucs)
  282. format = _fmfixed + (_fmnode * nbsuc)
  283. data = [nbsuc, len(metadata), flags, pre]
  284. data.extend(sucs)
  285. return _pack(format, *data) + metadata
  286. # arbitrary picked to fit into 8K limit from HTTP server
  287. # you have to take in account:
  288. # - the version header
  289. # - the base85 encoding
  290. _maxpayload = 5300
  291. def _pushkeyescape(markers):
  292. """encode markers into a dict suitable for pushkey exchange
  293. - binary data is base85 encoded
  294. - split in chunks smaller than 5300 bytes"""
  295. keys = {}
  296. parts = []
  297. currentlen = _maxpayload * 2 # ensure we create a new part
  298. for marker in markers:
  299. nextdata = _encodeonemarker(marker)
  300. if (len(nextdata) + currentlen > _maxpayload):
  301. currentpart = []
  302. currentlen = 0
  303. parts.append(currentpart)
  304. currentpart.append(nextdata)
  305. currentlen += len(nextdata)
  306. for idx, part in enumerate(reversed(parts)):
  307. data = ''.join([_pack('>B', _fmversion)] + part)
  308. keys['dump%i' % idx] = base85.b85encode(data)
  309. return keys
  310. def listmarkers(repo):
  311. """List markers over pushkey"""
  312. if not repo.obsstore:
  313. return {}
  314. return _pushkeyescape(repo.obsstore)
  315. def pushmarker(repo, key, old, new):
  316. """Push markers over pushkey"""
  317. if not key.startswith('dump'):
  318. repo.ui.warn(_('unknown key: %r') % key)
  319. return 0
  320. if old:
  321. repo.ui.warn(_('unexpected old value for %r') % key)
  322. return 0
  323. data = base85.b85decode(new)
  324. lock = repo.lock()
  325. try:
  326. tr = repo.transaction('pushkey: obsolete markers')
  327. try:
  328. repo.obsstore.mergemarkers(tr, data)
  329. tr.close()
  330. return 1
  331. finally:
  332. tr.release()
  333. finally:
  334. lock.release()
  335. def allmarkers(repo):
  336. """all obsolete markers known in a repository"""
  337. for markerdata in repo.obsstore:
  338. yield marker(repo, markerdata)
  339. def precursormarkers(ctx):
  340. """obsolete marker marking this changeset as a successors"""
  341. for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
  342. yield marker(ctx._repo, data)
  343. def successormarkers(ctx):
  344. """obsolete marker making this changeset obsolete"""
  345. for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
  346. yield marker(ctx._repo, data)
  347. def allsuccessors(obsstore, nodes, ignoreflags=0):
  348. """Yield node for every successor of <nodes>.
  349. Some successors may be unknown locally.
  350. This is a linear yield unsuited to detecting split changesets. It includes
  351. initial nodes too."""
  352. remaining = set(nodes)
  353. seen = set(remaining)
  354. while remaining:
  355. current = remaining.pop()
  356. yield current
  357. for mark in obsstore.successors.get(current, ()):
  358. # ignore marker flagged with specified flag
  359. if mark[2] & ignoreflags:
  360. continue
  361. for suc in mark[1]:
  362. if suc not in seen:
  363. seen.add(suc)
  364. remaining.add(suc)
  365. def allprecursors(obsstore, nodes, ignoreflags=0):
  366. """Yield node for every precursors of <nodes>.
  367. Some precursors may be unknown locally.
  368. This is a linear yield unsuited to detecting folded changesets. It includes
  369. initial nodes too."""
  370. remaining = set(nodes)
  371. seen = set(remaining)
  372. while remaining:
  373. current = remaining.pop()
  374. yield current
  375. for mark in obsstore.precursors.get(current, ()):
  376. # ignore marker flagged with specified flag
  377. if mark[2] & ignoreflags:
  378. continue
  379. suc = mark[0]
  380. if suc not in seen:
  381. seen.add(suc)
  382. remaining.add(suc)
  383. def foreground(repo, nodes):
  384. """return all nodes in the "foreground" of other node
  385. The foreground of a revision is anything reachable using parent -> children
  386. or precursor -> successor relation. It is very similar to "descendant" but
  387. augmented with obsolescence information.
  388. Beware that possible obsolescence cycle may result if complex situation.
  389. """
  390. repo = repo.unfiltered()
  391. foreground = set(repo.set('%ln::', nodes))
  392. if repo.obsstore:
  393. # We only need this complicated logic if there is obsolescence
  394. # XXX will probably deserve an optimised revset.
  395. nm = repo.changelog.nodemap
  396. plen = -1
  397. # compute the whole set of successors or descendants
  398. while len(foreground) != plen:
  399. plen = len(foreground)
  400. succs = set(c.node() for c in foreground)
  401. mutable = [c.node() for c in foreground if c.mutable()]
  402. succs.update(allsuccessors(repo.obsstore, mutable))
  403. known = (n for n in succs if n in nm)
  404. foreground = set(repo.set('%ln::', known))
  405. return set(c.node() for c in foreground)
  406. def successorssets(repo, initialnode, cache=None):
  407. """Return all set of successors of initial nodes
  408. The successors set of a changeset A are a group of revisions that succeed
  409. A. It succeeds A as a consistent whole, each revision being only a partial
  410. replacement. The successors set contains non-obsolete changesets only.
  411. This function returns the full list of successor sets which is why it
  412. returns a list of tuples and not just a single tuple. Each tuple is a valid
  413. successors set. Not that (A,) may be a valid successors set for changeset A
  414. (see below).
  415. In most cases, a changeset A will have a single element (e.g. the changeset
  416. A is replaced by A') in its successors set. Though, it is also common for a
  417. changeset A to have no elements in its successor set (e.g. the changeset
  418. has been pruned). Therefore, the returned list of successors sets will be
  419. [(A',)] or [], respectively.
  420. When a changeset A is split into A' and B', however, it will result in a
  421. successors set containing more than a single element, i.e. [(A',B')].
  422. Divergent changesets will result in multiple successors sets, i.e. [(A',),
  423. (A'')].
  424. If a changeset A is not obsolete, then it will conceptually have no
  425. successors set. To distinguish this from a pruned changeset, the successor
  426. set will only contain itself, i.e. [(A,)].
  427. Finally, successors unknown locally are considered to be pruned (obsoleted
  428. without any successors).
  429. The optional `cache` parameter is a dictionary that may contain precomputed
  430. successors sets. It is meant to reuse the computation of a previous call to
  431. `successorssets` when multiple calls are made at the same time. The cache
  432. dictionary is updated in place. The caller is responsible for its live
  433. spawn. Code that makes multiple calls to `successorssets` *must* use this
  434. cache mechanism or suffer terrible performances.
  435. """
  436. succmarkers = repo.obsstore.successors
  437. # Stack of nodes we search successors sets for
  438. toproceed = [initialnode]
  439. # set version of above list for fast loop detection
  440. # element added to "toproceed" must be added here
  441. stackedset = set(toproceed)
  442. if cache is None:
  443. cache = {}
  444. # This while loop is the flattened version of a recursive search for
  445. # successors sets
  446. #
  447. # def successorssets(x):
  448. # successors = directsuccessors(x)
  449. # ss = [[]]
  450. # for succ in directsuccessors(x):
  451. # # product as in itertools cartesian product
  452. # ss = product(ss, successorssets(succ))
  453. # return ss
  454. #
  455. # But we can not use plain recursive calls here:
  456. # - that would blow the python call stack
  457. # - obsolescence markers may have cycles, we need to handle them.
  458. #
  459. # The `toproceed` list act as our call stack. Every node we search
  460. # successors set for are stacked there.
  461. #
  462. # The `stackedset` is set version of this stack used to check if a node is
  463. # already stacked. This check is used to detect cycles and prevent infinite
  464. # loop.
  465. #
  466. # successors set of all nodes are stored in the `cache` dictionary.
  467. #
  468. # After this while loop ends we use the cache to return the successors sets
  469. # for the node requested by the caller.
  470. while toproceed:
  471. # Every iteration tries to compute the successors sets of the topmost
  472. # node of the stack: CURRENT.
  473. #
  474. # There are four possible outcomes:
  475. #
  476. # 1) We already know the successors sets of CURRENT:
  477. # -> mission accomplished, pop it from the stack.
  478. # 2) Node is not obsolete:
  479. # -> the node is its own successors sets. Add it to the cache.
  480. # 3) We do not know successors set of direct successors of CURRENT:
  481. # -> We add those successors to the stack.
  482. # 4) We know successors sets of all direct successors of CURRENT:
  483. # -> We can compute CURRENT successors set and add it to the
  484. # cache.
  485. #
  486. current = toproceed[-1]
  487. if current in cache:
  488. # case (1): We already know the successors sets
  489. stackedset.remove(toproceed.pop())
  490. elif current not in succmarkers:
  491. # case (2): The node is not obsolete.
  492. if current in repo:
  493. # We have a valid last successors.
  494. cache[current] = [(current,)]
  495. else:
  496. # Final obsolete version is unknown locally.
  497. # Do not count that as a valid successors
  498. cache[current] = []
  499. else:
  500. # cases (3) and (4)
  501. #
  502. # We proceed in two phases. Phase 1 aims to distinguish case (3)
  503. # from case (4):
  504. #
  505. # For each direct successors of CURRENT, we check whether its
  506. # successors sets are known. If they are not, we stack the
  507. # unknown node and proceed to the next iteration of the while
  508. # loop. (case 3)
  509. #
  510. # During this step, we may detect obsolescence cycles: a node
  511. # with unknown successors sets but already in the call stack.
  512. # In such a situation, we arbitrary set the successors sets of
  513. # the node to nothing (node pruned) to break the cycle.
  514. #
  515. # If no break was encountered we proceed to phase 2.
  516. #
  517. # Phase 2 computes successors sets of CURRENT (case 4); see details
  518. # in phase 2 itself.
  519. #
  520. # Note the two levels of iteration in each phase.
  521. # - The first one handles obsolescence markers using CURRENT as
  522. # precursor (successors markers of CURRENT).
  523. #
  524. # Having multiple entry here means divergence.
  525. #
  526. # - The second one handles successors defined in each marker.
  527. #
  528. # Having none means pruned node, multiple successors means split,
  529. # single successors are standard replacement.
  530. #
  531. for mark in sorted(succmarkers[current]):
  532. for suc in mark[1]:
  533. if suc not in cache:
  534. if suc in stackedset:
  535. # cycle breaking
  536. cache[suc] = []
  537. else:
  538. # case (3) If we have not computed successors sets
  539. # of one of those successors we add it to the
  540. # `toproceed` stack and stop all work for this
  541. # iteration.
  542. toproceed.append(suc)
  543. stackedset.add(suc)
  544. break
  545. else:
  546. continue
  547. break
  548. else:
  549. # case (4): we know all successors sets of all direct
  550. # successors
  551. #
  552. # Successors set contributed by each marker depends on the
  553. # successors sets of all its "successors" node.
  554. #
  555. # Each different marker is a divergence in the obsolescence
  556. # history. It contributes successors sets distinct from other
  557. # markers.
  558. #
  559. # Within a marker, a successor may have divergent successors
  560. # sets. In such a case, the marker will contribute multiple
  561. # divergent successors sets. If multiple successors have
  562. # divergent successors sets, a Cartesian product is used.
  563. #
  564. # At the end we post-process successors sets to remove
  565. # duplicated entry and successors set that are strict subset of
  566. # another one.
  567. succssets = []
  568. for mark in sorted(succmarkers[current]):
  569. # successors sets contributed by this marker
  570. markss = [[]]
  571. for suc in mark[1]:
  572. # cardinal product with previous successors
  573. productresult = []
  574. for prefix in markss:
  575. for suffix in cache[suc]:
  576. newss = list(prefix)
  577. for part in suffix:
  578. # do not duplicated entry in successors set
  579. # first entry wins.
  580. if part not in newss:
  581. newss.append(part)
  582. productresult.append(newss)
  583. markss = productresult
  584. succssets.extend(markss)
  585. # remove duplicated and subset
  586. seen = []
  587. final = []
  588. candidate = sorted(((set(s), s) for s in succssets if s),
  589. key=lambda x: len(x[1]), reverse=True)
  590. for setversion, listversion in candidate:
  591. for seenset in seen:
  592. if setversion.issubset(seenset):
  593. break
  594. else:
  595. final.append(listversion)
  596. seen.append(setversion)
  597. final.reverse() # put small successors set first
  598. cache[current] = final
  599. return cache[initialnode]
  600. def _knownrevs(repo, nodes):
  601. """yield revision numbers of known nodes passed in parameters
  602. Unknown revisions are silently ignored."""
  603. torev = repo.changelog.nodemap.get
  604. for n in nodes:
  605. rev = torev(n)
  606. if rev is not None:
  607. yield rev
  608. # mapping of 'set-name' -> <function to compute this set>
  609. cachefuncs = {}
  610. def cachefor(name):
  611. """Decorator to register a function as computing the cache for a set"""
  612. def decorator(func):
  613. assert name not in cachefuncs
  614. cachefuncs[name] = func
  615. return func
  616. return decorator
  617. def getrevs(repo, name):
  618. """Return the set of revision that belong to the <name> set
  619. Such access may compute the set and cache it for future use"""
  620. repo = repo.unfiltered()
  621. if not repo.obsstore:
  622. return ()
  623. if name not in repo.obsstore.caches:
  624. repo.obsstore.caches[name] = cachefuncs[name](repo)
  625. return repo.obsstore.caches[name]
  626. # To be simple we need to invalidate obsolescence cache when:
  627. #
  628. # - new changeset is added:
  629. # - public phase is changed
  630. # - obsolescence marker are added
  631. # - strip is used a repo
  632. def clearobscaches(repo):
  633. """Remove all obsolescence related cache from a repo
  634. This remove all cache in obsstore is the obsstore already exist on the
  635. repo.
  636. (We could be smarter here given the exact event that trigger the cache
  637. clearing)"""
  638. # only clear cache is there is obsstore data in this repo
  639. if 'obsstore' in repo._filecache:
  640. repo.obsstore.caches.clear()
  641. @cachefor('obsolete')
  642. def _computeobsoleteset(repo):
  643. """the set of obsolete revisions"""
  644. obs = set()
  645. getrev = repo.changelog.nodemap.get
  646. getphase = repo._phasecache.phase
  647. for node in repo.obsstore.successors:
  648. rev = getrev(node)
  649. if rev is not None and getphase(repo, rev):
  650. obs.add(rev)
  651. return obs
  652. @cachefor('unstable')
  653. def _computeunstableset(repo):
  654. """the set of non obsolete revisions with obsolete parents"""
  655. # revset is not efficient enough here
  656. # we do (obsolete()::) - obsolete() by hand
  657. obs = getrevs(repo, 'obsolete')
  658. if not obs:
  659. return set()
  660. cl = repo.changelog
  661. return set(r for r in cl.descendants(obs) if r not in obs)
  662. @cachefor('suspended')
  663. def _computesuspendedset(repo):
  664. """the set of obsolete parents with non obsolete descendants"""
  665. suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
  666. return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
  667. @cachefor('extinct')
  668. def _computeextinctset(repo):
  669. """the set of obsolete parents without non obsolete descendants"""
  670. return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
  671. @cachefor('bumped')
  672. def _computebumpedset(repo):
  673. """the set of revs trying to obsolete public revisions"""
  674. bumped = set()
  675. # util function (avoid attribute lookup in the loop)
  676. phase = repo._phasecache.phase # would be faster to grab the full list
  677. public = phases.public
  678. cl = repo.changelog
  679. torev = cl.nodemap.get
  680. obs = getrevs(repo, 'obsolete')
  681. for rev in repo:
  682. # We only evaluate mutable, non-obsolete revision
  683. if (public < phase(repo, rev)) and (rev not in obs):
  684. node = cl.node(rev)
  685. # (future) A cache of precursors may worth if split is very common
  686. for pnode in allprecursors(repo.obsstore, [node],
  687. ignoreflags=bumpedfix):
  688. prev = torev(pnode) # unfiltered! but so is phasecache
  689. if (prev is not None) and (phase(repo, prev) <= public):
  690. # we have a public precursors
  691. bumped.add(rev)
  692. break # Next draft!
  693. return bumped
  694. @cachefor('divergent')
  695. def _computedivergentset(repo):
  696. """the set of rev that compete to be the final successors of some revision.
  697. """
  698. divergent = set()
  699. obsstore = repo.obsstore
  700. newermap = {}
  701. for ctx in repo.set('(not public()) - obsolete()'):
  702. mark = obsstore.precursors.get(ctx.node(), ())
  703. toprocess = set(mark)
  704. while toprocess:
  705. prec = toprocess.pop()[0]
  706. if prec not in newermap:
  707. successorssets(repo, prec, newermap)
  708. newer = [n for n in newermap[prec] if n]
  709. if len(newer) > 1:
  710. divergent.add(ctx.rev())
  711. break
  712. toprocess.update(obsstore.precursors.get(prec, ()))
  713. return divergent
  714. def createmarkers(repo, relations, flag=0, metadata=None):
  715. """Add obsolete markers between changesets in a repo
  716. <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
  717. tuple. `old` and `news` are changectx. metadata is an optional dictionary
  718. containing metadata for this marker only. It is merged with the global
  719. metadata specified through the `metadata` argument of this function,
  720. Trying to obsolete a public changeset will raise an exception.
  721. Current user and date are used except if specified otherwise in the
  722. metadata attribute.
  723. This function operates within a transaction of its own, but does
  724. not take any lock on the repo.
  725. """
  726. # prepare metadata
  727. if metadata is None:
  728. metadata = {}
  729. if 'date' not in metadata:
  730. metadata['date'] = '%i %i' % util.makedate()
  731. if 'user' not in metadata:
  732. metadata['user'] = repo.ui.username()
  733. tr = repo.transaction('add-obsolescence-marker')
  734. try:
  735. for rel in relations:
  736. prec = rel[0]
  737. sucs = rel[1]
  738. localmetadata = metadata.copy()
  739. if 2 < len(rel):
  740. localmetadata.update(rel[2])
  741. if not prec.mutable():
  742. raise util.Abort("cannot obsolete immutable changeset: %s"
  743. % prec)
  744. nprec = prec.node()
  745. nsucs = tuple(s.node() for s in sucs)
  746. if nprec in nsucs:
  747. raise util.Abort("changeset %s cannot obsolete itself" % prec)
  748. repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata)
  749. repo.filteredrevcache.clear()
  750. tr.close()
  751. finally:
  752. tr.release()