/lib-python/2.7/difflib.py
Python | 2059 lines | 1940 code | 15 blank | 104 comment | 44 complexity | acb708524ab9a21b7be5869bb5284585 MD5 | raw file
Large files files are truncated, but you can click here to view the full file
- #! /usr/bin/env python
- """
- Module difflib -- helpers for computing deltas between objects.
- Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
- Use SequenceMatcher to return list of the best "good enough" matches.
- Function context_diff(a, b):
- For two lists of strings, return a delta in context diff format.
- Function ndiff(a, b):
- Return a delta: the difference between `a` and `b` (lists of strings).
- Function restore(delta, which):
- Return one of the two sequences that generated an ndiff delta.
- Function unified_diff(a, b):
- For two lists of strings, return a delta in unified diff format.
- Class SequenceMatcher:
- A flexible class for comparing pairs of sequences of any type.
- Class Differ:
- For producing human-readable deltas from sequences of lines of text.
- Class HtmlDiff:
- For producing HTML side by side comparison with change highlights.
- """
- __all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
- 'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
- 'unified_diff', 'HtmlDiff', 'Match']
- import heapq
- from collections import namedtuple as _namedtuple
- from functools import reduce
- Match = _namedtuple('Match', 'a b size')
- def _calculate_ratio(matches, length):
- if length:
- return 2.0 * matches / length
- return 1.0
- class SequenceMatcher:
- """
- SequenceMatcher is a flexible class for comparing pairs of sequences of
- any type, so long as the sequence elements are hashable. The basic
- algorithm predates, and is a little fancier than, an algorithm
- published in the late 1980's by Ratcliff and Obershelp under the
- hyperbolic name "gestalt pattern matching". The basic idea is to find
- the longest contiguous matching subsequence that contains no "junk"
- elements (R-O doesn't address junk). The same idea is then applied
- recursively to the pieces of the sequences to the left and to the right
- of the matching subsequence. This does not yield minimal edit
- sequences, but does tend to yield matches that "look right" to people.
- SequenceMatcher tries to compute a "human-friendly diff" between two
- sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
- longest *contiguous* & junk-free matching subsequence. That's what
- catches peoples' eyes. The Windows(tm) windiff has another interesting
- notion, pairing up elements that appear uniquely in each sequence.
- That, and the method here, appear to yield more intuitive difference
- reports than does diff. This method appears to be the least vulnerable
- to synching up on blocks of "junk lines", though (like blank lines in
- ordinary text files, or maybe "<P>" lines in HTML files). That may be
- because this is the only method of the 3 that has a *concept* of
- "junk" <wink>.
- Example, comparing two strings, and considering blanks to be "junk":
- >>> s = SequenceMatcher(lambda x: x == " ",
- ... "private Thread currentThread;",
- ... "private volatile Thread currentThread;")
- >>>
- .ratio() returns a float in [0, 1], measuring the "similarity" of the
- sequences. As a rule of thumb, a .ratio() value over 0.6 means the
- sequences are close matches:
- >>> print round(s.ratio(), 3)
- 0.866
- >>>
- If you're only interested in where the sequences match,
- .get_matching_blocks() is handy:
- >>> for block in s.get_matching_blocks():
- ... print "a[%d] and b[%d] match for %d elements" % block
- a[0] and b[0] match for 8 elements
- a[8] and b[17] match for 21 elements
- a[29] and b[38] match for 0 elements
- Note that the last tuple returned by .get_matching_blocks() is always a
- dummy, (len(a), len(b), 0), and this is the only case in which the last
- tuple element (number of elements matched) is 0.
- If you want to know how to change the first sequence into the second,
- use .get_opcodes():
- >>> for opcode in s.get_opcodes():
- ... print "%6s a[%d:%d] b[%d:%d]" % opcode
- equal a[0:8] b[0:8]
- insert a[8:8] b[8:17]
- equal a[8:29] b[17:38]
- See the Differ class for a fancy human-friendly file differencer, which
- uses SequenceMatcher both to compare sequences of lines, and to compare
- sequences of characters within similar (near-matching) lines.
- See also function get_close_matches() in this module, which shows how
- simple code building on SequenceMatcher can be used to do useful work.
- Timing: Basic R-O is cubic time worst case and quadratic time expected
- case. SequenceMatcher is quadratic time for the worst case and has
- expected-case behavior dependent in a complicated way on how many
- elements the sequences have in common; best case time is linear.
- Methods:
- __init__(isjunk=None, a='', b='')
- Construct a SequenceMatcher.
- set_seqs(a, b)
- Set the two sequences to be compared.
- set_seq1(a)
- Set the first sequence to be compared.
- set_seq2(b)
- Set the second sequence to be compared.
- find_longest_match(alo, ahi, blo, bhi)
- Find longest matching block in a[alo:ahi] and b[blo:bhi].
- get_matching_blocks()
- Return list of triples describing matching subsequences.
- get_opcodes()
- Return list of 5-tuples describing how to turn a into b.
- ratio()
- Return a measure of the sequences' similarity (float in [0,1]).
- quick_ratio()
- Return an upper bound on .ratio() relatively quickly.
- real_quick_ratio()
- Return an upper bound on ratio() very quickly.
- """
- def __init__(self, isjunk=None, a='', b='', autojunk=True):
- """Construct a SequenceMatcher.
- Optional arg isjunk is None (the default), or a one-argument
- function that takes a sequence element and returns true iff the
- element is junk. None is equivalent to passing "lambda x: 0", i.e.
- no elements are considered to be junk. For example, pass
- lambda x: x in " \\t"
- if you're comparing lines as sequences of characters, and don't
- want to synch up on blanks or hard tabs.
- Optional arg a is the first of two sequences to be compared. By
- default, an empty string. The elements of a must be hashable. See
- also .set_seqs() and .set_seq1().
- Optional arg b is the second of two sequences to be compared. By
- default, an empty string. The elements of b must be hashable. See
- also .set_seqs() and .set_seq2().
- Optional arg autojunk should be set to False to disable the
- "automatic junk heuristic" that treats popular elements as junk
- (see module documentation for more information).
- """
- # Members:
- # a
- # first sequence
- # b
- # second sequence; differences are computed as "what do
- # we need to do to 'a' to change it into 'b'?"
- # b2j
- # for x in b, b2j[x] is a list of the indices (into b)
- # at which x appears; junk elements do not appear
- # fullbcount
- # for x in b, fullbcount[x] == the number of times x
- # appears in b; only materialized if really needed (used
- # only for computing quick_ratio())
- # matching_blocks
- # a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
- # ascending & non-overlapping in i and in j; terminated by
- # a dummy (len(a), len(b), 0) sentinel
- # opcodes
- # a list of (tag, i1, i2, j1, j2) tuples, where tag is
- # one of
- # 'replace' a[i1:i2] should be replaced by b[j1:j2]
- # 'delete' a[i1:i2] should be deleted
- # 'insert' b[j1:j2] should be inserted
- # 'equal' a[i1:i2] == b[j1:j2]
- # isjunk
- # a user-supplied function taking a sequence element and
- # returning true iff the element is "junk" -- this has
- # subtle but helpful effects on the algorithm, which I'll
- # get around to writing up someday <0.9 wink>.
- # DON'T USE! Only __chain_b uses this. Use isbjunk.
- # isbjunk
- # for x in b, isbjunk(x) == isjunk(x) but much faster;
- # it's really the __contains__ method of a hidden dict.
- # DOES NOT WORK for x in a!
- # isbpopular
- # for x in b, isbpopular(x) is true iff b is reasonably long
- # (at least 200 elements) and x accounts for more than 1 + 1% of
- # its elements (when autojunk is enabled).
- # DOES NOT WORK for x in a!
- self.isjunk = isjunk
- self.a = self.b = None
- self.autojunk = autojunk
- self.set_seqs(a, b)
- def set_seqs(self, a, b):
- """Set the two sequences to be compared.
- >>> s = SequenceMatcher()
- >>> s.set_seqs("abcd", "bcde")
- >>> s.ratio()
- 0.75
- """
- self.set_seq1(a)
- self.set_seq2(b)
- def set_seq1(self, a):
- """Set the first sequence to be compared.
- The second sequence to be compared is not changed.
- >>> s = SequenceMatcher(None, "abcd", "bcde")
- >>> s.ratio()
- 0.75
- >>> s.set_seq1("bcde")
- >>> s.ratio()
- 1.0
- >>>
- SequenceMatcher computes and caches detailed information about the
- second sequence, so if you want to compare one sequence S against
- many sequences, use .set_seq2(S) once and call .set_seq1(x)
- repeatedly for each of the other sequences.
- See also set_seqs() and set_seq2().
- """
- if a is self.a:
- return
- self.a = a
- self.matching_blocks = self.opcodes = None
- def set_seq2(self, b):
- """Set the second sequence to be compared.
- The first sequence to be compared is not changed.
- >>> s = SequenceMatcher(None, "abcd", "bcde")
- >>> s.ratio()
- 0.75
- >>> s.set_seq2("abcd")
- >>> s.ratio()
- 1.0
- >>>
- SequenceMatcher computes and caches detailed information about the
- second sequence, so if you want to compare one sequence S against
- many sequences, use .set_seq2(S) once and call .set_seq1(x)
- repeatedly for each of the other sequences.
- See also set_seqs() and set_seq1().
- """
- if b is self.b:
- return
- self.b = b
- self.matching_blocks = self.opcodes = None
- self.fullbcount = None
- self.__chain_b()
- # For each element x in b, set b2j[x] to a list of the indices in
- # b where x appears; the indices are in increasing order; note that
- # the number of times x appears in b is len(b2j[x]) ...
- # when self.isjunk is defined, junk elements don't show up in this
- # map at all, which stops the central find_longest_match method
- # from starting any matching block at a junk element ...
- # also creates the fast isbjunk function ...
- # b2j also does not contain entries for "popular" elements, meaning
- # elements that account for more than 1 + 1% of the total elements, and
- # when the sequence is reasonably large (>= 200 elements); this can
- # be viewed as an adaptive notion of semi-junk, and yields an enormous
- # speedup when, e.g., comparing program files with hundreds of
- # instances of "return NULL;" ...
- # note that this is only called when b changes; so for cross-product
- # kinds of matches, it's best to call set_seq2 once, then set_seq1
- # repeatedly
- def __chain_b(self):
- # Because isjunk is a user-defined (not C) function, and we test
- # for junk a LOT, it's important to minimize the number of calls.
- # Before the tricks described here, __chain_b was by far the most
- # time-consuming routine in the whole module! If anyone sees
- # Jim Roskind, thank him again for profile.py -- I never would
- # have guessed that.
- # The first trick is to build b2j ignoring the possibility
- # of junk. I.e., we don't call isjunk at all yet. Throwing
- # out the junk later is much cheaper than building b2j "right"
- # from the start.
- b = self.b
- self.b2j = b2j = {}
- for i, elt in enumerate(b):
- indices = b2j.setdefault(elt, [])
- indices.append(i)
- # Purge junk elements
- junk = set()
- isjunk = self.isjunk
- if isjunk:
- for elt in list(b2j.keys()): # using list() since b2j is modified
- if isjunk(elt):
- junk.add(elt)
- del b2j[elt]
- # Purge popular elements that are not junk
- popular = set()
- n = len(b)
- if self.autojunk and n >= 200:
- ntest = n // 100 + 1
- for elt, idxs in list(b2j.items()):
- if len(idxs) > ntest:
- popular.add(elt)
- del b2j[elt]
- # Now for x in b, isjunk(x) == x in junk, but the latter is much faster.
- # Sicne the number of *unique* junk elements is probably small, the
- # memory burden of keeping this set alive is likely trivial compared to
- # the size of b2j.
- self.isbjunk = junk.__contains__
- self.isbpopular = popular.__contains__
- def find_longest_match(self, alo, ahi, blo, bhi):
- """Find longest matching block in a[alo:ahi] and b[blo:bhi].
- If isjunk is not defined:
- Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
- alo <= i <= i+k <= ahi
- blo <= j <= j+k <= bhi
- and for all (i',j',k') meeting those conditions,
- k >= k'
- i <= i'
- and if i == i', j <= j'
- In other words, of all maximal matching blocks, return one that
- starts earliest in a, and of all those maximal matching blocks that
- start earliest in a, return the one that starts earliest in b.
- >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
- >>> s.find_longest_match(0, 5, 0, 9)
- Match(a=0, b=4, size=5)
- If isjunk is defined, first the longest matching block is
- determined as above, but with the additional restriction that no
- junk element appears in the block. Then that block is extended as
- far as possible by matching (only) junk elements on both sides. So
- the resulting block never matches on junk except as identical junk
- happens to be adjacent to an "interesting" match.
- Here's the same example as before, but considering blanks to be
- junk. That prevents " abcd" from matching the " abcd" at the tail
- end of the second sequence directly. Instead only the "abcd" can
- match, and matches the leftmost "abcd" in the second sequence:
- >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
- >>> s.find_longest_match(0, 5, 0, 9)
- Match(a=1, b=0, size=4)
- If no blocks match, return (alo, blo, 0).
- >>> s = SequenceMatcher(None, "ab", "c")
- >>> s.find_longest_match(0, 2, 0, 1)
- Match(a=0, b=0, size=0)
- """
- # CAUTION: stripping common prefix or suffix would be incorrect.
- # E.g.,
- # ab
- # acab
- # Longest matching block is "ab", but if common prefix is
- # stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
- # strip, so ends up claiming that ab is changed to acab by
- # inserting "ca" in the middle. That's minimal but unintuitive:
- # "it's obvious" that someone inserted "ac" at the front.
- # Windiff ends up at the same place as diff, but by pairing up
- # the unique 'b's and then matching the first two 'a's.
- a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
- besti, bestj, bestsize = alo, blo, 0
- # find longest junk-free match
- # during an iteration of the loop, j2len[j] = length of longest
- # junk-free match ending with a[i-1] and b[j]
- j2len = {}
- nothing = []
- for i in xrange(alo, ahi):
- # look at all instances of a[i] in b; note that because
- # b2j has no junk keys, the loop is skipped if a[i] is junk
- j2lenget = j2len.get
- newj2len = {}
- for j in b2j.get(a[i], nothing):
- # a[i] matches b[j]
- if j < blo:
- continue
- if j >= bhi:
- break
- k = newj2len[j] = j2lenget(j-1, 0) + 1
- if k > bestsize:
- besti, bestj, bestsize = i-k+1, j-k+1, k
- j2len = newj2len
- # Extend the best by non-junk elements on each end. In particular,
- # "popular" non-junk elements aren't in b2j, which greatly speeds
- # the inner loop above, but also means "the best" match so far
- # doesn't contain any junk *or* popular non-junk elements.
- while besti > alo and bestj > blo and \
- not isbjunk(b[bestj-1]) and \
- a[besti-1] == b[bestj-1]:
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- while besti+bestsize < ahi and bestj+bestsize < bhi and \
- not isbjunk(b[bestj+bestsize]) and \
- a[besti+bestsize] == b[bestj+bestsize]:
- bestsize += 1
- # Now that we have a wholly interesting match (albeit possibly
- # empty!), we may as well suck up the matching junk on each
- # side of it too. Can't think of a good reason not to, and it
- # saves post-processing the (possibly considerable) expense of
- # figuring out what to do with it. In the case of an empty
- # interesting match, this is clearly the right thing to do,
- # because no other kind of match is possible in the regions.
- while besti > alo and bestj > blo and \
- isbjunk(b[bestj-1]) and \
- a[besti-1] == b[bestj-1]:
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- while besti+bestsize < ahi and bestj+bestsize < bhi and \
- isbjunk(b[bestj+bestsize]) and \
- a[besti+bestsize] == b[bestj+bestsize]:
- bestsize = bestsize + 1
- return Match(besti, bestj, bestsize)
- def get_matching_blocks(self):
- """Return list of triples describing matching subsequences.
- Each triple is of the form (i, j, n), and means that
- a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
- i and in j. New in Python 2.5, it's also guaranteed that if
- (i, j, n) and (i', j', n') are adjacent triples in the list, and
- the second is not the last triple in the list, then i+n != i' or
- j+n != j'. IOW, adjacent triples never describe adjacent equal
- blocks.
- The last triple is a dummy, (len(a), len(b), 0), and is the only
- triple with n==0.
- >>> s = SequenceMatcher(None, "abxcd", "abcd")
- >>> s.get_matching_blocks()
- [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
- """
- if self.matching_blocks is not None:
- return self.matching_blocks
- la, lb = len(self.a), len(self.b)
- # This is most naturally expressed as a recursive algorithm, but
- # at least one user bumped into extreme use cases that exceeded
- # the recursion limit on their box. So, now we maintain a list
- # ('queue`) of blocks we still need to look at, and append partial
- # results to `matching_blocks` in a loop; the matches are sorted
- # at the end.
- queue = [(0, la, 0, lb)]
- matching_blocks = []
- while queue:
- alo, ahi, blo, bhi = queue.pop()
- i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
- # a[alo:i] vs b[blo:j] unknown
- # a[i:i+k] same as b[j:j+k]
- # a[i+k:ahi] vs b[j+k:bhi] unknown
- if k: # if k is 0, there was no matching block
- matching_blocks.append(x)
- if alo < i and blo < j:
- queue.append((alo, i, blo, j))
- if i+k < ahi and j+k < bhi:
- queue.append((i+k, ahi, j+k, bhi))
- matching_blocks.sort()
- # It's possible that we have adjacent equal blocks in the
- # matching_blocks list now. Starting with 2.5, this code was added
- # to collapse them.
- i1 = j1 = k1 = 0
- non_adjacent = []
- for i2, j2, k2 in matching_blocks:
- # Is this block adjacent to i1, j1, k1?
- if i1 + k1 == i2 and j1 + k1 == j2:
- # Yes, so collapse them -- this just increases the length of
- # the first block by the length of the second, and the first
- # block so lengthened remains the block to compare against.
- k1 += k2
- else:
- # Not adjacent. Remember the first block (k1==0 means it's
- # the dummy we started with), and make the second block the
- # new block to compare against.
- if k1:
- non_adjacent.append((i1, j1, k1))
- i1, j1, k1 = i2, j2, k2
- if k1:
- non_adjacent.append((i1, j1, k1))
- non_adjacent.append( (la, lb, 0) )
- self.matching_blocks = non_adjacent
- return map(Match._make, self.matching_blocks)
- def get_opcodes(self):
- """Return list of 5-tuples describing how to turn a into b.
- Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
- has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
- tuple preceding it, and likewise for j1 == the previous j2.
- The tags are strings, with these meanings:
- 'replace': a[i1:i2] should be replaced by b[j1:j2]
- 'delete': a[i1:i2] should be deleted.
- Note that j1==j2 in this case.
- 'insert': b[j1:j2] should be inserted at a[i1:i1].
- Note that i1==i2 in this case.
- 'equal': a[i1:i2] == b[j1:j2]
- >>> a = "qabxcd"
- >>> b = "abycdf"
- >>> s = SequenceMatcher(None, a, b)
- >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
- ... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
- ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
- delete a[0:1] (q) b[0:0] ()
- equal a[1:3] (ab) b[0:2] (ab)
- replace a[3:4] (x) b[2:3] (y)
- equal a[4:6] (cd) b[3:5] (cd)
- insert a[6:6] () b[5:6] (f)
- """
- if self.opcodes is not None:
- return self.opcodes
- i = j = 0
- self.opcodes = answer = []
- for ai, bj, size in self.get_matching_blocks():
- # invariant: we've pumped out correct diffs to change
- # a[:i] into b[:j], and the next matching block is
- # a[ai:ai+size] == b[bj:bj+size]. So we need to pump
- # out a diff to change a[i:ai] into b[j:bj], pump out
- # the matching block, and move (i,j) beyond the match
- tag = ''
- if i < ai and j < bj:
- tag = 'replace'
- elif i < ai:
- tag = 'delete'
- elif j < bj:
- tag = 'insert'
- if tag:
- answer.append( (tag, i, ai, j, bj) )
- i, j = ai+size, bj+size
- # the list of matching blocks is terminated by a
- # sentinel with size 0
- if size:
- answer.append( ('equal', ai, i, bj, j) )
- return answer
- def get_grouped_opcodes(self, n=3):
- """ Isolate change clusters by eliminating ranges with no changes.
- Return a generator of groups with upto n lines of context.
- Each group is in the same format as returned by get_opcodes().
- >>> from pprint import pprint
- >>> a = map(str, range(1,40))
- >>> b = a[:]
- >>> b[8:8] = ['i'] # Make an insertion
- >>> b[20] += 'x' # Make a replacement
- >>> b[23:28] = [] # Make a deletion
- >>> b[30] += 'y' # Make another replacement
- >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
- [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
- [('equal', 16, 19, 17, 20),
- ('replace', 19, 20, 20, 21),
- ('equal', 20, 22, 21, 23),
- ('delete', 22, 27, 23, 23),
- ('equal', 27, 30, 23, 26)],
- [('equal', 31, 34, 27, 30),
- ('replace', 34, 35, 30, 31),
- ('equal', 35, 38, 31, 34)]]
- """
- codes = self.get_opcodes()
- if not codes:
- codes = [("equal", 0, 1, 0, 1)]
- # Fixup leading and trailing groups if they show no changes.
- if codes[0][0] == 'equal':
- tag, i1, i2, j1, j2 = codes[0]
- codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
- if codes[-1][0] == 'equal':
- tag, i1, i2, j1, j2 = codes[-1]
- codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
- nn = n + n
- group = []
- for tag, i1, i2, j1, j2 in codes:
- # End the current group and start a new one whenever
- # there is a large range with no changes.
- if tag == 'equal' and i2-i1 > nn:
- group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
- yield group
- group = []
- i1, j1 = max(i1, i2-n), max(j1, j2-n)
- group.append((tag, i1, i2, j1 ,j2))
- if group and not (len(group)==1 and group[0][0] == 'equal'):
- yield group
- def ratio(self):
- """Return a measure of the sequences' similarity (float in [0,1]).
- Where T is the total number of elements in both sequences, and
- M is the number of matches, this is 2.0*M / T.
- Note that this is 1 if the sequences are identical, and 0 if
- they have nothing in common.
- .ratio() is expensive to compute if you haven't already computed
- .get_matching_blocks() or .get_opcodes(), in which case you may
- want to try .quick_ratio() or .real_quick_ratio() first to get an
- upper bound.
- >>> s = SequenceMatcher(None, "abcd", "bcde")
- >>> s.ratio()
- 0.75
- >>> s.quick_ratio()
- 0.75
- >>> s.real_quick_ratio()
- 1.0
- """
- matches = reduce(lambda sum, triple: sum + triple[-1],
- self.get_matching_blocks(), 0)
- return _calculate_ratio(matches, len(self.a) + len(self.b))
- def quick_ratio(self):
- """Return an upper bound on ratio() relatively quickly.
- This isn't defined beyond that it is an upper bound on .ratio(), and
- is faster to compute.
- """
- # viewing a and b as multisets, set matches to the cardinality
- # of their intersection; this counts the number of matches
- # without regard to order, so is clearly an upper bound
- if self.fullbcount is None:
- self.fullbcount = fullbcount = {}
- for elt in self.b:
- fullbcount[elt] = fullbcount.get(elt, 0) + 1
- fullbcount = self.fullbcount
- # avail[x] is the number of times x appears in 'b' less the
- # number of times we've seen it in 'a' so far ... kinda
- avail = {}
- availhas, matches = avail.__contains__, 0
- for elt in self.a:
- if availhas(elt):
- numb = avail[elt]
- else:
- numb = fullbcount.get(elt, 0)
- avail[elt] = numb - 1
- if numb > 0:
- matches = matches + 1
- return _calculate_ratio(matches, len(self.a) + len(self.b))
- def real_quick_ratio(self):
- """Return an upper bound on ratio() very quickly.
- This isn't defined beyond that it is an upper bound on .ratio(), and
- is faster to compute than either .ratio() or .quick_ratio().
- """
- la, lb = len(self.a), len(self.b)
- # can't have more matches than the number of elements in the
- # shorter sequence
- return _calculate_ratio(min(la, lb), la + lb)
- def get_close_matches(word, possibilities, n=3, cutoff=0.6):
- """Use SequenceMatcher to return list of the best "good enough" matches.
- word is a sequence for which close matches are desired (typically a
- string).
- possibilities is a list of sequences against which to match word
- (typically a list of strings).
- Optional arg n (default 3) is the maximum number of close matches to
- return. n must be > 0.
- Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
- that don't score at least that similar to word are ignored.
- The best (no more than n) matches among the possibilities are returned
- in a list, sorted by similarity score, most similar first.
- >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
- ['apple', 'ape']
- >>> import keyword as _keyword
- >>> get_close_matches("wheel", _keyword.kwlist)
- ['while']
- >>> get_close_matches("apple", _keyword.kwlist)
- []
- >>> get_close_matches("accept", _keyword.kwlist)
- ['except']
- """
- if not n > 0:
- raise ValueError("n must be > 0: %r" % (n,))
- if not 0.0 <= cutoff <= 1.0:
- raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
- result = []
- s = SequenceMatcher()
- s.set_seq2(word)
- for x in possibilities:
- s.set_seq1(x)
- if s.real_quick_ratio() >= cutoff and \
- s.quick_ratio() >= cutoff and \
- s.ratio() >= cutoff:
- result.append((s.ratio(), x))
- # Move the best scorers to head of list
- result = heapq.nlargest(n, result)
- # Strip scores for the best n matches
- return [x for score, x in result]
- def _count_leading(line, ch):
- """
- Return number of `ch` characters at the start of `line`.
- Example:
- >>> _count_leading(' abc', ' ')
- 3
- """
- i, n = 0, len(line)
- while i < n and line[i] == ch:
- i += 1
- return i
- class Differ:
- r"""
- Differ is a class for comparing sequences of lines of text, and
- producing human-readable differences or deltas. Differ uses
- SequenceMatcher both to compare sequences of lines, and to compare
- sequences of characters within similar (near-matching) lines.
- Each line of a Differ delta begins with a two-letter code:
- '- ' line unique to sequence 1
- '+ ' line unique to sequence 2
- ' ' line common to both sequences
- '? ' line not present in either input sequence
- Lines beginning with '? ' attempt to guide the eye to intraline
- differences, and were not present in either input sequence. These lines
- can be confusing if the sequences contain tab characters.
- Note that Differ makes no claim to produce a *minimal* diff. To the
- contrary, minimal diffs are often counter-intuitive, because they synch
- up anywhere possible, sometimes accidental matches 100 pages apart.
- Restricting synch points to contiguous matches preserves some notion of
- locality, at the occasional cost of producing a longer diff.
- Example: Comparing two texts.
- First we set up the texts, sequences of individual single-line strings
- ending with newlines (such sequences can also be obtained from the
- `readlines()` method of file-like objects):
- >>> text1 = ''' 1. Beautiful is better than ugly.
- ... 2. Explicit is better than implicit.
- ... 3. Simple is better than complex.
- ... 4. Complex is better than complicated.
- ... '''.splitlines(1)
- >>> len(text1)
- 4
- >>> text1[0][-1]
- '\n'
- >>> text2 = ''' 1. Beautiful is better than ugly.
- ... 3. Simple is better than complex.
- ... 4. Complicated is better than complex.
- ... 5. Flat is better than nested.
- ... '''.splitlines(1)
- Next we instantiate a Differ object:
- >>> d = Differ()
- Note that when instantiating a Differ object we may pass functions to
- filter out line and character 'junk'. See Differ.__init__ for details.
- Finally, we compare the two:
- >>> result = list(d.compare(text1, text2))
- 'result' is a list of strings, so let's pretty-print it:
- >>> from pprint import pprint as _pprint
- >>> _pprint(result)
- [' 1. Beautiful is better than ugly.\n',
- '- 2. Explicit is better than implicit.\n',
- '- 3. Simple is better than complex.\n',
- '+ 3. Simple is better than complex.\n',
- '? ++\n',
- '- 4. Complex is better than complicated.\n',
- '? ^ ---- ^\n',
- '+ 4. Complicated is better than complex.\n',
- '? ++++ ^ ^\n',
- '+ 5. Flat is better than nested.\n']
- As a single multi-line string it looks like this:
- >>> print ''.join(result),
- 1. Beautiful is better than ugly.
- - 2. Explicit is better than implicit.
- - 3. Simple is better than complex.
- + 3. Simple is better than complex.
- ? ++
- - 4. Complex is better than complicated.
- ? ^ ---- ^
- + 4. Complicated is better than complex.
- ? ++++ ^ ^
- + 5. Flat is better than nested.
- Methods:
- __init__(linejunk=None, charjunk=None)
- Construct a text differencer, with optional filters.
- compare(a, b)
- Compare two sequences of lines; generate the resulting delta.
- """
- def __init__(self, linejunk=None, charjunk=None):
- """
- Construct a text differencer, with optional filters.
- The two optional keyword parameters are for filter functions:
- - `linejunk`: A function that should accept a single string argument,
- and return true iff the string is junk. The module-level function
- `IS_LINE_JUNK` may be used to filter out lines without visible
- characters, except for at most one splat ('#'). It is recommended
- to leave linejunk None; as of Python 2.3, the underlying
- SequenceMatcher class has grown an adaptive notion of "noise" lines
- that's better than any static definition the author has ever been
- able to craft.
- - `charjunk`: A function that should accept a string of length 1. The
- module-level function `IS_CHARACTER_JUNK` may be used to filter out
- whitespace characters (a blank or tab; **note**: bad idea to include
- newline in this!). Use of IS_CHARACTER_JUNK is recommended.
- """
- self.linejunk = linejunk
- self.charjunk = charjunk
- def compare(self, a, b):
- r"""
- Compare two sequences of lines; generate the resulting delta.
- Each sequence must contain individual single-line strings ending with
- newlines. Such sequences can be obtained from the `readlines()` method
- of file-like objects. The delta generated also consists of newline-
- terminated strings, ready to be printed as-is via the writeline()
- method of a file-like object.
- Example:
- >>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
- ... 'ore\ntree\nemu\n'.splitlines(1))),
- - one
- ? ^
- + ore
- ? ^
- - two
- - three
- ? -
- + tree
- + emu
- """
- cruncher = SequenceMatcher(self.linejunk, a, b)
- for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
- if tag == 'replace':
- g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
- elif tag == 'delete':
- g = self._dump('-', a, alo, ahi)
- elif tag == 'insert':
- g = self._dump('+', b, blo, bhi)
- elif tag == 'equal':
- g = self._dump(' ', a, alo, ahi)
- else:
- raise ValueError, 'unknown tag %r' % (tag,)
- for line in g:
- yield line
- def _dump(self, tag, x, lo, hi):
- """Generate comparison results for a same-tagged range."""
- for i in xrange(lo, hi):
- yield '%s %s' % (tag, x[i])
- def _plain_replace(self, a, alo, ahi, b, blo, bhi):
- assert alo < ahi and blo < bhi
- # dump the shorter block first -- reduces the burden on short-term
- # memory if the blocks are of very different sizes
- if bhi - blo < ahi - alo:
- first = self._dump('+', b, blo, bhi)
- second = self._dump('-', a, alo, ahi)
- else:
- first = self._dump('-', a, alo, ahi)
- second = self._dump('+', b, blo, bhi)
- for g in first, second:
- for line in g:
- yield line
- def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
- r"""
- When replacing one block of lines with another, search the blocks
- for *similar* lines; the best-matching pair (if any) is used as a
- synch point, and intraline difference marking is done on the
- similar pair. Lots of work, but often worth it.
- Example:
- >>> d = Differ()
- >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
- ... ['abcdefGhijkl\n'], 0, 1)
- >>> print ''.join(results),
- - abcDefghiJkl
- ? ^ ^ ^
- + abcdefGhijkl
- ? ^ ^ ^
- """
- # don't synch up unless the lines have a similarity score of at
- # least cutoff; best_ratio tracks the best score seen so far
- best_ratio, cutoff = 0.74, 0.75
- cruncher = SequenceMatcher(self.charjunk)
- eqi, eqj = None, None # 1st indices of equal lines (if any)
- # search for the pair that matches best without being identical
- # (identical lines must be junk lines, & we don't want to synch up
- # on junk -- unless we have to)
- for j in xrange(blo, bhi):
- bj = b[j]
- cruncher.set_seq2(bj)
- for i in xrange(alo, ahi):
- ai = a[i]
- if ai == bj:
- if eqi is None:
- eqi, eqj = i, j
- continue
- cruncher.set_seq1(ai)
- # computing similarity is expensive, so use the quick
- # upper bounds first -- have seen this speed up messy
- # compares by a factor of 3.
- # note that ratio() is only expensive to compute the first
- # time it's called on a sequence pair; the expensive part
- # of the computation is cached by cruncher
- if cruncher.real_quick_ratio() > best_ratio and \
- cruncher.quick_ratio() > best_ratio and \
- cruncher.ratio() > best_ratio:
- best_ratio, best_i, best_j = cruncher.ratio(), i, j
- if best_ratio < cutoff:
- # no non-identical "pretty close" pair
- if eqi is None:
- # no identical pair either -- treat it as a straight replace
- for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
- yield line
- return
- # no close pair, but an identical pair -- synch up on that
- best_i, best_j, best_ratio = eqi, eqj, 1.0
- else:
- # there's a close pair, so forget the identical pair (if any)
- eqi = None
- # a[best_i] very similar to b[best_j]; eqi is None iff they're not
- # identical
- # pump out diffs from before the synch point
- for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
- yield line
- # do intraline marking on the synch pair
- aelt, belt = a[best_i], b[best_j]
- if eqi is None:
- # pump out a '-', '?', '+', '?' quad for the synched lines
- atags = btags = ""
- cruncher.set_seqs(aelt, belt)
- for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
- la, lb = ai2 - ai1, bj2 - bj1
- if tag == 'replace':
- atags += '^' * la
- btags += '^' * lb
- elif tag == 'delete':
- atags += '-' * la
- elif tag == 'insert':
- btags += '+' * lb
- elif tag == 'equal':
- atags += ' ' * la
- btags += ' ' * lb
- else:
- raise ValueError, 'unknown tag %r' % (tag,)
- for line in self._qformat(aelt, belt, atags, btags):
- yield line
- else:
- # the synch pair is identical
- yield ' ' + aelt
- # pump out diffs from after the synch point
- for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
- yield line
- def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
- g = []
- if alo < ahi:
- if blo < bhi:
- g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
- else:
- g = self._dump('-', a, alo, ahi)
- elif blo < bhi:
- g = self._dump('+', b, blo, bhi)
- for line in g:
- yield line
- def _qformat(self, aline, bline, atags, btags):
- r"""
- Format "?" output and deal with leading tabs.
- Example:
- >>> d = Differ()
- >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
- ... ' ^ ^ ^ ', ' ^ ^ ^ ')
- >>> for line in results: print repr(line)
- ...
- '- \tabcDefghiJkl\n'
- '? \t ^ ^ ^\n'
- '+ \tabcdefGhijkl\n'
- '? \t ^ ^ ^\n'
- """
- # Can hurt, but will probably help most of the time.
- common = min(_count_leading(aline, "\t"),
- _count_leading(bline, "\t"))
- common = min(common, _count_leading(atags[:common], " "))
- common = min(common, _count_leading(btags[:common], " "))
- atags = atags[common:].rstrip()
- btags = btags[common:].rstrip()
- yield "- " + aline
- if atags:
- yield "? %s%s\n" % ("\t" * common, atags)
- yield "+ " + bline
- if btags:
- yield "? %s%s\n" % ("\t" * common, btags)
- # With respect to junk, an earlier version of ndiff simply refused to
- # *start* a match with a junk element. The result was cases like this:
- # before: private Thread currentThread;
- # after: private volatile Thread currentThread;
- # If you consider whitespace to be junk, the longest contiguous match
- # not starting with junk is "e Thread currentThread". So ndiff reported
- # that "e volatil" was inserted between the 't' and the 'e' in "private".
- # While an accurate view, to people that's absurd. The current version
- # looks for matching blocks that are entirely junk-free, then extends the
- # longest one of those as far as possible but only with matching junk.
- # So now "currentThread" is matched, then extended to suck up the
- # preceding blank; then "private" is matched, and extended to suck up the
- # following blank; then "Thread" is matched; and finally ndiff reports
- # that "volatile " was inserted before "Thread". The only quibble
- # remaining is that perhaps it was really the case that " volatile"
- # was inserted after "private". I can live with that <wink>.
- import re
- def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
- r"""
- Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
- Examples:
- >>> IS_LINE_JUNK('\n')
- True
- >>> IS_LINE_JUNK(' # \n')
- True
- >>> IS_LINE_JUNK('hello\n')
- False
- """
- return pat(line) is not None
- def IS_CHARACTER_JUNK(ch, ws=" \t"):
- r"""
- Return 1 for ignorable character: iff `ch` is a space or tab.
- Examples:
- >>> IS_CHARACTER_JUNK(' ')
- True
- >>> IS_CHARACTER_JUNK('\t')
- True
- >>> IS_CHARACTER_JUNK('\n')
- False
- >>> IS_CHARACTER_JUNK('x')
- False
- """
- return ch in ws
- ########################################################################
- ### Unified Diff
- ########################################################################
- def _format_range_unified(start, stop):
- 'Convert range to the "ed" format'
- # Per the diff spec at http://www.unix.org/single_unix_specification/
- beginning = start + 1 # lines start numbering with one
- length = stop - start
- if length == 1:
- return '{}'.format(beginning)
- if not length:
- beginning -= 1 # empty ranges begin at line just before the range
- return '{},{}'.format(beginning, length)
- def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
- tofiledate='', n=3, lineterm='\n'):
- r"""
- Compare two sequences of lines; generate the delta as a unified diff.
- Unified diffs are a compact way of showing line changes and a few
- lines of context. The number of context lines is set by 'n' which
- defaults to three.
- By default, the diff control lines (those with ---, +++, or @@) are
- created with a trailing newline. This is helpful so that inputs
- created from file.readlines() result in diffs that are suitable for
- file.writelines() since both the inputs and outputs have trailing
- newlines.
- For inputs that do not have trailing newlines, set the lineterm
- argument to "" so that the output will be uniformly newline free.
- The unidiff format normally has a header for filenames and modification
- times. Any or all of these may be specified using strings for
- 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
- The modification times are normally expressed in the ISO 8601 format.
- Example:
- >>> for line in unified_diff('one two three four'.split(),
- ... 'zero one tree four'.split(), 'Original', 'Current',
- ... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
- ... lineterm=''):
- ... print line # doctest: +NORMALIZE_WHITESPACE
- --- Original 2005-01-26 23:30:50
- +++ Current 2010-04-02 10:20:52
- @@ -1,4 +1,4 @@
- +zero
- one
- -two
- -three
- +tree
- four
- """
- started = False
- for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
- if not started:
- started = True
- fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
- todate = '\t{}'.format(tofiledate) if tofiledate else ''
- yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
- yield '+++ {}{}{}'.format(tofile, todate, lineterm)
- first, last = group[0], group[-1]
- file1_range = _format_range_unified(first[1], last[2])
- file2_range = _format_range_unified(first[3], last[4])
- yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
- for tag, i1, i2, j1, j2 in group:
- if tag == 'equal':
- for line in a[i1:i2]:
- yield ' ' + line
- continue
- if tag in ('replace', 'delete'):
- for line in a[i1:i2]:
- yield '-' + line
- if tag in ('replace', 'insert'):
- for line in b[j1:j2]:
- yield '+' + line
- ########################################################################
- ### Context Diff
- ########################################################################
- def _format_range_context(start, stop):
- 'Convert range to the "ed" format'
- # Per the diff spec at http://www.unix.org/single_unix_specification/
- beginning = start + 1 # lines start numbering with one
- length = stop - start
- if not length:
- beginning -= 1 # empty ranges begin at line just before the range
- if length <= 1:
- return '{}'.format(beginning)
- return '{},{}'.format(beginning, beginning + length - 1)
- # See http://www.unix.org/single_unix_specification/
- def context_diff(a, b, fromfile='', tofile='',
- fromfiledate='', tofiledate='', n=3, lineterm='\n'):
- r"""
- Compare two sequences of lines; generate the delta as a context diff.
- Context diffs are a compact way of showing line changes and a few
- lines of context. The number of context lines is set by 'n' which
- defaults to three.
- By default, the diff control lines (those with *** or ---) are
- created with a trailing newline. This is helpful so that inputs
- created from file.readlines() result in diffs that are suitable for
- file.writelines() since both the inputs and outputs have trailing
- newlines.
- For inputs that do not have trailing newlines, set the lineterm
- argument to "" so that the output will be uniformly newline free.
- The context diff format normally has a header for filenames and
- modification times. Any or all of these may be specified using
- strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
- The modification times are normally expressed in the ISO 8601 format.
- If not specified, the strings default to blanks.
- Example:
- >>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
- ... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current')),
- *** Original
- --- Current
- ***************
- *** 1,4 ****
- one
- ! two
- ! three
- four
- --- 1,4 ----
- + zero
- one
- ! tree
- four
- """
- prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
- started = False
- for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
- if not started:
- started = True
- fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
- todate = '\t{}'.format(tofiledate) if tofiledate else ''
- yie…
Large files files are truncated, but you can click here to view the full file