PageRenderTime 52ms CodeModel.GetById 14ms RepoModel.GetById 1ms app.codeStats 0ms

/mercurial/patch.py

https://bitbucket.org/mirror/mercurial/
Python | 1929 lines | 1834 code | 48 blank | 47 comment | 109 complexity | 9e5a1f98ae306cb52ea5dce4f36e8bf2 MD5 | raw file
Possible License(s): GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. # patch.py - patch file parsing routines
  2. #
  3. # Copyright 2006 Brendan Cully <brendan@kublai.com>
  4. # Copyright 2007 Chris Mason <chris.mason@oracle.com>
  5. #
  6. # This software may be used and distributed according to the terms of the
  7. # GNU General Public License version 2 or any later version.
  8. import cStringIO, email, os, errno, re, posixpath
  9. import tempfile, zlib, shutil
  10. # On python2.4 you have to import these by name or they fail to
  11. # load. This was not a problem on Python 2.7.
  12. import email.Generator
  13. import email.Parser
  14. from i18n import _
  15. from node import hex, short
  16. import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
  17. gitre = re.compile('diff --git a/(.*) b/(.*)')
  18. class PatchError(Exception):
  19. pass
  20. # public functions
  21. def split(stream):
  22. '''return an iterator of individual patches from a stream'''
  23. def isheader(line, inheader):
  24. if inheader and line[0] in (' ', '\t'):
  25. # continuation
  26. return True
  27. if line[0] in (' ', '-', '+'):
  28. # diff line - don't check for header pattern in there
  29. return False
  30. l = line.split(': ', 1)
  31. return len(l) == 2 and ' ' not in l[0]
  32. def chunk(lines):
  33. return cStringIO.StringIO(''.join(lines))
  34. def hgsplit(stream, cur):
  35. inheader = True
  36. for line in stream:
  37. if not line.strip():
  38. inheader = False
  39. if not inheader and line.startswith('# HG changeset patch'):
  40. yield chunk(cur)
  41. cur = []
  42. inheader = True
  43. cur.append(line)
  44. if cur:
  45. yield chunk(cur)
  46. def mboxsplit(stream, cur):
  47. for line in stream:
  48. if line.startswith('From '):
  49. for c in split(chunk(cur[1:])):
  50. yield c
  51. cur = []
  52. cur.append(line)
  53. if cur:
  54. for c in split(chunk(cur[1:])):
  55. yield c
  56. def mimesplit(stream, cur):
  57. def msgfp(m):
  58. fp = cStringIO.StringIO()
  59. g = email.Generator.Generator(fp, mangle_from_=False)
  60. g.flatten(m)
  61. fp.seek(0)
  62. return fp
  63. for line in stream:
  64. cur.append(line)
  65. c = chunk(cur)
  66. m = email.Parser.Parser().parse(c)
  67. if not m.is_multipart():
  68. yield msgfp(m)
  69. else:
  70. ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
  71. for part in m.walk():
  72. ct = part.get_content_type()
  73. if ct not in ok_types:
  74. continue
  75. yield msgfp(part)
  76. def headersplit(stream, cur):
  77. inheader = False
  78. for line in stream:
  79. if not inheader and isheader(line, inheader):
  80. yield chunk(cur)
  81. cur = []
  82. inheader = True
  83. if inheader and not isheader(line, inheader):
  84. inheader = False
  85. cur.append(line)
  86. if cur:
  87. yield chunk(cur)
  88. def remainder(cur):
  89. yield chunk(cur)
  90. class fiter(object):
  91. def __init__(self, fp):
  92. self.fp = fp
  93. def __iter__(self):
  94. return self
  95. def next(self):
  96. l = self.fp.readline()
  97. if not l:
  98. raise StopIteration
  99. return l
  100. inheader = False
  101. cur = []
  102. mimeheaders = ['content-type']
  103. if not util.safehasattr(stream, 'next'):
  104. # http responses, for example, have readline but not next
  105. stream = fiter(stream)
  106. for line in stream:
  107. cur.append(line)
  108. if line.startswith('# HG changeset patch'):
  109. return hgsplit(stream, cur)
  110. elif line.startswith('From '):
  111. return mboxsplit(stream, cur)
  112. elif isheader(line, inheader):
  113. inheader = True
  114. if line.split(':', 1)[0].lower() in mimeheaders:
  115. # let email parser handle this
  116. return mimesplit(stream, cur)
  117. elif line.startswith('--- ') and inheader:
  118. # No evil headers seen by diff start, split by hand
  119. return headersplit(stream, cur)
  120. # Not enough info, keep reading
  121. # if we are here, we have a very plain patch
  122. return remainder(cur)
  123. def extract(ui, fileobj):
  124. '''extract patch from data read from fileobj.
  125. patch can be a normal patch or contained in an email message.
  126. return tuple (filename, message, user, date, branch, node, p1, p2).
  127. Any item in the returned tuple can be None. If filename is None,
  128. fileobj did not contain a patch. Caller must unlink filename when done.'''
  129. # attempt to detect the start of a patch
  130. # (this heuristic is borrowed from quilt)
  131. diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
  132. r'retrieving revision [0-9]+(\.[0-9]+)*$|'
  133. r'---[ \t].*?^\+\+\+[ \t]|'
  134. r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
  135. fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
  136. tmpfp = os.fdopen(fd, 'w')
  137. try:
  138. msg = email.Parser.Parser().parse(fileobj)
  139. subject = msg['Subject']
  140. user = msg['From']
  141. if not subject and not user:
  142. # Not an email, restore parsed headers if any
  143. subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
  144. # should try to parse msg['Date']
  145. date = None
  146. nodeid = None
  147. branch = None
  148. parents = []
  149. if subject:
  150. if subject.startswith('[PATCH'):
  151. pend = subject.find(']')
  152. if pend >= 0:
  153. subject = subject[pend + 1:].lstrip()
  154. subject = re.sub(r'\n[ \t]+', ' ', subject)
  155. ui.debug('Subject: %s\n' % subject)
  156. if user:
  157. ui.debug('From: %s\n' % user)
  158. diffs_seen = 0
  159. ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
  160. message = ''
  161. for part in msg.walk():
  162. content_type = part.get_content_type()
  163. ui.debug('Content-Type: %s\n' % content_type)
  164. if content_type not in ok_types:
  165. continue
  166. payload = part.get_payload(decode=True)
  167. m = diffre.search(payload)
  168. if m:
  169. hgpatch = False
  170. hgpatchheader = False
  171. ignoretext = False
  172. ui.debug('found patch at byte %d\n' % m.start(0))
  173. diffs_seen += 1
  174. cfp = cStringIO.StringIO()
  175. for line in payload[:m.start(0)].splitlines():
  176. if line.startswith('# HG changeset patch') and not hgpatch:
  177. ui.debug('patch generated by hg export\n')
  178. hgpatch = True
  179. hgpatchheader = True
  180. # drop earlier commit message content
  181. cfp.seek(0)
  182. cfp.truncate()
  183. subject = None
  184. elif hgpatchheader:
  185. if line.startswith('# User '):
  186. user = line[7:]
  187. ui.debug('From: %s\n' % user)
  188. elif line.startswith("# Date "):
  189. date = line[7:]
  190. elif line.startswith("# Branch "):
  191. branch = line[9:]
  192. elif line.startswith("# Node ID "):
  193. nodeid = line[10:]
  194. elif line.startswith("# Parent "):
  195. parents.append(line[9:].lstrip())
  196. elif not line.startswith("# "):
  197. hgpatchheader = False
  198. elif line == '---':
  199. ignoretext = True
  200. if not hgpatchheader and not ignoretext:
  201. cfp.write(line)
  202. cfp.write('\n')
  203. message = cfp.getvalue()
  204. if tmpfp:
  205. tmpfp.write(payload)
  206. if not payload.endswith('\n'):
  207. tmpfp.write('\n')
  208. elif not diffs_seen and message and content_type == 'text/plain':
  209. message += '\n' + payload
  210. except: # re-raises
  211. tmpfp.close()
  212. os.unlink(tmpname)
  213. raise
  214. if subject and not message.startswith(subject):
  215. message = '%s\n%s' % (subject, message)
  216. tmpfp.close()
  217. if not diffs_seen:
  218. os.unlink(tmpname)
  219. return None, message, user, date, branch, None, None, None
  220. p1 = parents and parents.pop(0) or None
  221. p2 = parents and parents.pop(0) or None
  222. return tmpname, message, user, date, branch, nodeid, p1, p2
  223. class patchmeta(object):
  224. """Patched file metadata
  225. 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
  226. or COPY. 'path' is patched file path. 'oldpath' is set to the
  227. origin file when 'op' is either COPY or RENAME, None otherwise. If
  228. file mode is changed, 'mode' is a tuple (islink, isexec) where
  229. 'islink' is True if the file is a symlink and 'isexec' is True if
  230. the file is executable. Otherwise, 'mode' is None.
  231. """
  232. def __init__(self, path):
  233. self.path = path
  234. self.oldpath = None
  235. self.mode = None
  236. self.op = 'MODIFY'
  237. self.binary = False
  238. def setmode(self, mode):
  239. islink = mode & 020000
  240. isexec = mode & 0100
  241. self.mode = (islink, isexec)
  242. def copy(self):
  243. other = patchmeta(self.path)
  244. other.oldpath = self.oldpath
  245. other.mode = self.mode
  246. other.op = self.op
  247. other.binary = self.binary
  248. return other
  249. def _ispatchinga(self, afile):
  250. if afile == '/dev/null':
  251. return self.op == 'ADD'
  252. return afile == 'a/' + (self.oldpath or self.path)
  253. def _ispatchingb(self, bfile):
  254. if bfile == '/dev/null':
  255. return self.op == 'DELETE'
  256. return bfile == 'b/' + self.path
  257. def ispatching(self, afile, bfile):
  258. return self._ispatchinga(afile) and self._ispatchingb(bfile)
  259. def __repr__(self):
  260. return "<patchmeta %s %r>" % (self.op, self.path)
  261. def readgitpatch(lr):
  262. """extract git-style metadata about patches from <patchname>"""
  263. # Filter patch for git information
  264. gp = None
  265. gitpatches = []
  266. for line in lr:
  267. line = line.rstrip(' \r\n')
  268. if line.startswith('diff --git a/'):
  269. m = gitre.match(line)
  270. if m:
  271. if gp:
  272. gitpatches.append(gp)
  273. dst = m.group(2)
  274. gp = patchmeta(dst)
  275. elif gp:
  276. if line.startswith('--- '):
  277. gitpatches.append(gp)
  278. gp = None
  279. continue
  280. if line.startswith('rename from '):
  281. gp.op = 'RENAME'
  282. gp.oldpath = line[12:]
  283. elif line.startswith('rename to '):
  284. gp.path = line[10:]
  285. elif line.startswith('copy from '):
  286. gp.op = 'COPY'
  287. gp.oldpath = line[10:]
  288. elif line.startswith('copy to '):
  289. gp.path = line[8:]
  290. elif line.startswith('deleted file'):
  291. gp.op = 'DELETE'
  292. elif line.startswith('new file mode '):
  293. gp.op = 'ADD'
  294. gp.setmode(int(line[-6:], 8))
  295. elif line.startswith('new mode '):
  296. gp.setmode(int(line[-6:], 8))
  297. elif line.startswith('GIT binary patch'):
  298. gp.binary = True
  299. if gp:
  300. gitpatches.append(gp)
  301. return gitpatches
  302. class linereader(object):
  303. # simple class to allow pushing lines back into the input stream
  304. def __init__(self, fp):
  305. self.fp = fp
  306. self.buf = []
  307. def push(self, line):
  308. if line is not None:
  309. self.buf.append(line)
  310. def readline(self):
  311. if self.buf:
  312. l = self.buf[0]
  313. del self.buf[0]
  314. return l
  315. return self.fp.readline()
  316. def __iter__(self):
  317. while True:
  318. l = self.readline()
  319. if not l:
  320. break
  321. yield l
  322. class abstractbackend(object):
  323. def __init__(self, ui):
  324. self.ui = ui
  325. def getfile(self, fname):
  326. """Return target file data and flags as a (data, (islink,
  327. isexec)) tuple.
  328. """
  329. raise NotImplementedError
  330. def setfile(self, fname, data, mode, copysource):
  331. """Write data to target file fname and set its mode. mode is a
  332. (islink, isexec) tuple. If data is None, the file content should
  333. be left unchanged. If the file is modified after being copied,
  334. copysource is set to the original file name.
  335. """
  336. raise NotImplementedError
  337. def unlink(self, fname):
  338. """Unlink target file."""
  339. raise NotImplementedError
  340. def writerej(self, fname, failed, total, lines):
  341. """Write rejected lines for fname. total is the number of hunks
  342. which failed to apply and total the total number of hunks for this
  343. files.
  344. """
  345. pass
  346. def exists(self, fname):
  347. raise NotImplementedError
  348. class fsbackend(abstractbackend):
  349. def __init__(self, ui, basedir):
  350. super(fsbackend, self).__init__(ui)
  351. self.opener = scmutil.opener(basedir)
  352. def _join(self, f):
  353. return os.path.join(self.opener.base, f)
  354. def getfile(self, fname):
  355. if self.opener.islink(fname):
  356. return (self.opener.readlink(fname), (True, False))
  357. isexec = False
  358. try:
  359. isexec = self.opener.lstat(fname).st_mode & 0100 != 0
  360. except OSError, e:
  361. if e.errno != errno.ENOENT:
  362. raise
  363. return (self.opener.read(fname), (False, isexec))
  364. def setfile(self, fname, data, mode, copysource):
  365. islink, isexec = mode
  366. if data is None:
  367. self.opener.setflags(fname, islink, isexec)
  368. return
  369. if islink:
  370. self.opener.symlink(data, fname)
  371. else:
  372. self.opener.write(fname, data)
  373. if isexec:
  374. self.opener.setflags(fname, False, True)
  375. def unlink(self, fname):
  376. self.opener.unlinkpath(fname, ignoremissing=True)
  377. def writerej(self, fname, failed, total, lines):
  378. fname = fname + ".rej"
  379. self.ui.warn(
  380. _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
  381. (failed, total, fname))
  382. fp = self.opener(fname, 'w')
  383. fp.writelines(lines)
  384. fp.close()
  385. def exists(self, fname):
  386. return self.opener.lexists(fname)
  387. class workingbackend(fsbackend):
  388. def __init__(self, ui, repo, similarity):
  389. super(workingbackend, self).__init__(ui, repo.root)
  390. self.repo = repo
  391. self.similarity = similarity
  392. self.removed = set()
  393. self.changed = set()
  394. self.copied = []
  395. def _checkknown(self, fname):
  396. if self.repo.dirstate[fname] == '?' and self.exists(fname):
  397. raise PatchError(_('cannot patch %s: file is not tracked') % fname)
  398. def setfile(self, fname, data, mode, copysource):
  399. self._checkknown(fname)
  400. super(workingbackend, self).setfile(fname, data, mode, copysource)
  401. if copysource is not None:
  402. self.copied.append((copysource, fname))
  403. self.changed.add(fname)
  404. def unlink(self, fname):
  405. self._checkknown(fname)
  406. super(workingbackend, self).unlink(fname)
  407. self.removed.add(fname)
  408. self.changed.add(fname)
  409. def close(self):
  410. wctx = self.repo[None]
  411. changed = set(self.changed)
  412. for src, dst in self.copied:
  413. scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
  414. if self.removed:
  415. wctx.forget(sorted(self.removed))
  416. for f in self.removed:
  417. if f not in self.repo.dirstate:
  418. # File was deleted and no longer belongs to the
  419. # dirstate, it was probably marked added then
  420. # deleted, and should not be considered by
  421. # marktouched().
  422. changed.discard(f)
  423. if changed:
  424. scmutil.marktouched(self.repo, changed, self.similarity)
  425. return sorted(self.changed)
  426. class filestore(object):
  427. def __init__(self, maxsize=None):
  428. self.opener = None
  429. self.files = {}
  430. self.created = 0
  431. self.maxsize = maxsize
  432. if self.maxsize is None:
  433. self.maxsize = 4*(2**20)
  434. self.size = 0
  435. self.data = {}
  436. def setfile(self, fname, data, mode, copied=None):
  437. if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
  438. self.data[fname] = (data, mode, copied)
  439. self.size += len(data)
  440. else:
  441. if self.opener is None:
  442. root = tempfile.mkdtemp(prefix='hg-patch-')
  443. self.opener = scmutil.opener(root)
  444. # Avoid filename issues with these simple names
  445. fn = str(self.created)
  446. self.opener.write(fn, data)
  447. self.created += 1
  448. self.files[fname] = (fn, mode, copied)
  449. def getfile(self, fname):
  450. if fname in self.data:
  451. return self.data[fname]
  452. if not self.opener or fname not in self.files:
  453. raise IOError
  454. fn, mode, copied = self.files[fname]
  455. return self.opener.read(fn), mode, copied
  456. def close(self):
  457. if self.opener:
  458. shutil.rmtree(self.opener.base)
  459. class repobackend(abstractbackend):
  460. def __init__(self, ui, repo, ctx, store):
  461. super(repobackend, self).__init__(ui)
  462. self.repo = repo
  463. self.ctx = ctx
  464. self.store = store
  465. self.changed = set()
  466. self.removed = set()
  467. self.copied = {}
  468. def _checkknown(self, fname):
  469. if fname not in self.ctx:
  470. raise PatchError(_('cannot patch %s: file is not tracked') % fname)
  471. def getfile(self, fname):
  472. try:
  473. fctx = self.ctx[fname]
  474. except error.LookupError:
  475. raise IOError
  476. flags = fctx.flags()
  477. return fctx.data(), ('l' in flags, 'x' in flags)
  478. def setfile(self, fname, data, mode, copysource):
  479. if copysource:
  480. self._checkknown(copysource)
  481. if data is None:
  482. data = self.ctx[fname].data()
  483. self.store.setfile(fname, data, mode, copysource)
  484. self.changed.add(fname)
  485. if copysource:
  486. self.copied[fname] = copysource
  487. def unlink(self, fname):
  488. self._checkknown(fname)
  489. self.removed.add(fname)
  490. def exists(self, fname):
  491. return fname in self.ctx
  492. def close(self):
  493. return self.changed | self.removed
  494. # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
  495. unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
  496. contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
  497. eolmodes = ['strict', 'crlf', 'lf', 'auto']
  498. class patchfile(object):
  499. def __init__(self, ui, gp, backend, store, eolmode='strict'):
  500. self.fname = gp.path
  501. self.eolmode = eolmode
  502. self.eol = None
  503. self.backend = backend
  504. self.ui = ui
  505. self.lines = []
  506. self.exists = False
  507. self.missing = True
  508. self.mode = gp.mode
  509. self.copysource = gp.oldpath
  510. self.create = gp.op in ('ADD', 'COPY', 'RENAME')
  511. self.remove = gp.op == 'DELETE'
  512. try:
  513. if self.copysource is None:
  514. data, mode = backend.getfile(self.fname)
  515. self.exists = True
  516. else:
  517. data, mode = store.getfile(self.copysource)[:2]
  518. self.exists = backend.exists(self.fname)
  519. self.missing = False
  520. if data:
  521. self.lines = mdiff.splitnewlines(data)
  522. if self.mode is None:
  523. self.mode = mode
  524. if self.lines:
  525. # Normalize line endings
  526. if self.lines[0].endswith('\r\n'):
  527. self.eol = '\r\n'
  528. elif self.lines[0].endswith('\n'):
  529. self.eol = '\n'
  530. if eolmode != 'strict':
  531. nlines = []
  532. for l in self.lines:
  533. if l.endswith('\r\n'):
  534. l = l[:-2] + '\n'
  535. nlines.append(l)
  536. self.lines = nlines
  537. except IOError:
  538. if self.create:
  539. self.missing = False
  540. if self.mode is None:
  541. self.mode = (False, False)
  542. if self.missing:
  543. self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
  544. self.hash = {}
  545. self.dirty = 0
  546. self.offset = 0
  547. self.skew = 0
  548. self.rej = []
  549. self.fileprinted = False
  550. self.printfile(False)
  551. self.hunks = 0
  552. def writelines(self, fname, lines, mode):
  553. if self.eolmode == 'auto':
  554. eol = self.eol
  555. elif self.eolmode == 'crlf':
  556. eol = '\r\n'
  557. else:
  558. eol = '\n'
  559. if self.eolmode != 'strict' and eol and eol != '\n':
  560. rawlines = []
  561. for l in lines:
  562. if l and l[-1] == '\n':
  563. l = l[:-1] + eol
  564. rawlines.append(l)
  565. lines = rawlines
  566. self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
  567. def printfile(self, warn):
  568. if self.fileprinted:
  569. return
  570. if warn or self.ui.verbose:
  571. self.fileprinted = True
  572. s = _("patching file %s\n") % self.fname
  573. if warn:
  574. self.ui.warn(s)
  575. else:
  576. self.ui.note(s)
  577. def findlines(self, l, linenum):
  578. # looks through the hash and finds candidate lines. The
  579. # result is a list of line numbers sorted based on distance
  580. # from linenum
  581. cand = self.hash.get(l, [])
  582. if len(cand) > 1:
  583. # resort our list of potentials forward then back.
  584. cand.sort(key=lambda x: abs(x - linenum))
  585. return cand
  586. def write_rej(self):
  587. # our rejects are a little different from patch(1). This always
  588. # creates rejects in the same form as the original patch. A file
  589. # header is inserted so that you can run the reject through patch again
  590. # without having to type the filename.
  591. if not self.rej:
  592. return
  593. base = os.path.basename(self.fname)
  594. lines = ["--- %s\n+++ %s\n" % (base, base)]
  595. for x in self.rej:
  596. for l in x.hunk:
  597. lines.append(l)
  598. if l[-1] != '\n':
  599. lines.append("\n\ No newline at end of file\n")
  600. self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
  601. def apply(self, h):
  602. if not h.complete():
  603. raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
  604. (h.number, h.desc, len(h.a), h.lena, len(h.b),
  605. h.lenb))
  606. self.hunks += 1
  607. if self.missing:
  608. self.rej.append(h)
  609. return -1
  610. if self.exists and self.create:
  611. if self.copysource:
  612. self.ui.warn(_("cannot create %s: destination already "
  613. "exists\n") % self.fname)
  614. else:
  615. self.ui.warn(_("file %s already exists\n") % self.fname)
  616. self.rej.append(h)
  617. return -1
  618. if isinstance(h, binhunk):
  619. if self.remove:
  620. self.backend.unlink(self.fname)
  621. else:
  622. l = h.new(self.lines)
  623. self.lines[:] = l
  624. self.offset += len(l)
  625. self.dirty = True
  626. return 0
  627. horig = h
  628. if (self.eolmode in ('crlf', 'lf')
  629. or self.eolmode == 'auto' and self.eol):
  630. # If new eols are going to be normalized, then normalize
  631. # hunk data before patching. Otherwise, preserve input
  632. # line-endings.
  633. h = h.getnormalized()
  634. # fast case first, no offsets, no fuzz
  635. old, oldstart, new, newstart = h.fuzzit(0, False)
  636. oldstart += self.offset
  637. orig_start = oldstart
  638. # if there's skew we want to emit the "(offset %d lines)" even
  639. # when the hunk cleanly applies at start + skew, so skip the
  640. # fast case code
  641. if (self.skew == 0 and
  642. diffhelpers.testhunk(old, self.lines, oldstart) == 0):
  643. if self.remove:
  644. self.backend.unlink(self.fname)
  645. else:
  646. self.lines[oldstart:oldstart + len(old)] = new
  647. self.offset += len(new) - len(old)
  648. self.dirty = True
  649. return 0
  650. # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
  651. self.hash = {}
  652. for x, s in enumerate(self.lines):
  653. self.hash.setdefault(s, []).append(x)
  654. for fuzzlen in xrange(3):
  655. for toponly in [True, False]:
  656. old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
  657. oldstart = oldstart + self.offset + self.skew
  658. oldstart = min(oldstart, len(self.lines))
  659. if old:
  660. cand = self.findlines(old[0][1:], oldstart)
  661. else:
  662. # Only adding lines with no or fuzzed context, just
  663. # take the skew in account
  664. cand = [oldstart]
  665. for l in cand:
  666. if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
  667. self.lines[l : l + len(old)] = new
  668. self.offset += len(new) - len(old)
  669. self.skew = l - orig_start
  670. self.dirty = True
  671. offset = l - orig_start - fuzzlen
  672. if fuzzlen:
  673. msg = _("Hunk #%d succeeded at %d "
  674. "with fuzz %d "
  675. "(offset %d lines).\n")
  676. self.printfile(True)
  677. self.ui.warn(msg %
  678. (h.number, l + 1, fuzzlen, offset))
  679. else:
  680. msg = _("Hunk #%d succeeded at %d "
  681. "(offset %d lines).\n")
  682. self.ui.note(msg % (h.number, l + 1, offset))
  683. return fuzzlen
  684. self.printfile(True)
  685. self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
  686. self.rej.append(horig)
  687. return -1
  688. def close(self):
  689. if self.dirty:
  690. self.writelines(self.fname, self.lines, self.mode)
  691. self.write_rej()
  692. return len(self.rej)
  693. class hunk(object):
  694. def __init__(self, desc, num, lr, context):
  695. self.number = num
  696. self.desc = desc
  697. self.hunk = [desc]
  698. self.a = []
  699. self.b = []
  700. self.starta = self.lena = None
  701. self.startb = self.lenb = None
  702. if lr is not None:
  703. if context:
  704. self.read_context_hunk(lr)
  705. else:
  706. self.read_unified_hunk(lr)
  707. def getnormalized(self):
  708. """Return a copy with line endings normalized to LF."""
  709. def normalize(lines):
  710. nlines = []
  711. for line in lines:
  712. if line.endswith('\r\n'):
  713. line = line[:-2] + '\n'
  714. nlines.append(line)
  715. return nlines
  716. # Dummy object, it is rebuilt manually
  717. nh = hunk(self.desc, self.number, None, None)
  718. nh.number = self.number
  719. nh.desc = self.desc
  720. nh.hunk = self.hunk
  721. nh.a = normalize(self.a)
  722. nh.b = normalize(self.b)
  723. nh.starta = self.starta
  724. nh.startb = self.startb
  725. nh.lena = self.lena
  726. nh.lenb = self.lenb
  727. return nh
  728. def read_unified_hunk(self, lr):
  729. m = unidesc.match(self.desc)
  730. if not m:
  731. raise PatchError(_("bad hunk #%d") % self.number)
  732. self.starta, self.lena, self.startb, self.lenb = m.groups()
  733. if self.lena is None:
  734. self.lena = 1
  735. else:
  736. self.lena = int(self.lena)
  737. if self.lenb is None:
  738. self.lenb = 1
  739. else:
  740. self.lenb = int(self.lenb)
  741. self.starta = int(self.starta)
  742. self.startb = int(self.startb)
  743. diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
  744. self.b)
  745. # if we hit eof before finishing out the hunk, the last line will
  746. # be zero length. Lets try to fix it up.
  747. while len(self.hunk[-1]) == 0:
  748. del self.hunk[-1]
  749. del self.a[-1]
  750. del self.b[-1]
  751. self.lena -= 1
  752. self.lenb -= 1
  753. self._fixnewline(lr)
  754. def read_context_hunk(self, lr):
  755. self.desc = lr.readline()
  756. m = contextdesc.match(self.desc)
  757. if not m:
  758. raise PatchError(_("bad hunk #%d") % self.number)
  759. self.starta, aend = m.groups()
  760. self.starta = int(self.starta)
  761. if aend is None:
  762. aend = self.starta
  763. self.lena = int(aend) - self.starta
  764. if self.starta:
  765. self.lena += 1
  766. for x in xrange(self.lena):
  767. l = lr.readline()
  768. if l.startswith('---'):
  769. # lines addition, old block is empty
  770. lr.push(l)
  771. break
  772. s = l[2:]
  773. if l.startswith('- ') or l.startswith('! '):
  774. u = '-' + s
  775. elif l.startswith(' '):
  776. u = ' ' + s
  777. else:
  778. raise PatchError(_("bad hunk #%d old text line %d") %
  779. (self.number, x))
  780. self.a.append(u)
  781. self.hunk.append(u)
  782. l = lr.readline()
  783. if l.startswith('\ '):
  784. s = self.a[-1][:-1]
  785. self.a[-1] = s
  786. self.hunk[-1] = s
  787. l = lr.readline()
  788. m = contextdesc.match(l)
  789. if not m:
  790. raise PatchError(_("bad hunk #%d") % self.number)
  791. self.startb, bend = m.groups()
  792. self.startb = int(self.startb)
  793. if bend is None:
  794. bend = self.startb
  795. self.lenb = int(bend) - self.startb
  796. if self.startb:
  797. self.lenb += 1
  798. hunki = 1
  799. for x in xrange(self.lenb):
  800. l = lr.readline()
  801. if l.startswith('\ '):
  802. # XXX: the only way to hit this is with an invalid line range.
  803. # The no-eol marker is not counted in the line range, but I
  804. # guess there are diff(1) out there which behave differently.
  805. s = self.b[-1][:-1]
  806. self.b[-1] = s
  807. self.hunk[hunki - 1] = s
  808. continue
  809. if not l:
  810. # line deletions, new block is empty and we hit EOF
  811. lr.push(l)
  812. break
  813. s = l[2:]
  814. if l.startswith('+ ') or l.startswith('! '):
  815. u = '+' + s
  816. elif l.startswith(' '):
  817. u = ' ' + s
  818. elif len(self.b) == 0:
  819. # line deletions, new block is empty
  820. lr.push(l)
  821. break
  822. else:
  823. raise PatchError(_("bad hunk #%d old text line %d") %
  824. (self.number, x))
  825. self.b.append(s)
  826. while True:
  827. if hunki >= len(self.hunk):
  828. h = ""
  829. else:
  830. h = self.hunk[hunki]
  831. hunki += 1
  832. if h == u:
  833. break
  834. elif h.startswith('-'):
  835. continue
  836. else:
  837. self.hunk.insert(hunki - 1, u)
  838. break
  839. if not self.a:
  840. # this happens when lines were only added to the hunk
  841. for x in self.hunk:
  842. if x.startswith('-') or x.startswith(' '):
  843. self.a.append(x)
  844. if not self.b:
  845. # this happens when lines were only deleted from the hunk
  846. for x in self.hunk:
  847. if x.startswith('+') or x.startswith(' '):
  848. self.b.append(x[1:])
  849. # @@ -start,len +start,len @@
  850. self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
  851. self.startb, self.lenb)
  852. self.hunk[0] = self.desc
  853. self._fixnewline(lr)
  854. def _fixnewline(self, lr):
  855. l = lr.readline()
  856. if l.startswith('\ '):
  857. diffhelpers.fix_newline(self.hunk, self.a, self.b)
  858. else:
  859. lr.push(l)
  860. def complete(self):
  861. return len(self.a) == self.lena and len(self.b) == self.lenb
  862. def _fuzzit(self, old, new, fuzz, toponly):
  863. # this removes context lines from the top and bottom of list 'l'. It
  864. # checks the hunk to make sure only context lines are removed, and then
  865. # returns a new shortened list of lines.
  866. fuzz = min(fuzz, len(old))
  867. if fuzz:
  868. top = 0
  869. bot = 0
  870. hlen = len(self.hunk)
  871. for x in xrange(hlen - 1):
  872. # the hunk starts with the @@ line, so use x+1
  873. if self.hunk[x + 1][0] == ' ':
  874. top += 1
  875. else:
  876. break
  877. if not toponly:
  878. for x in xrange(hlen - 1):
  879. if self.hunk[hlen - bot - 1][0] == ' ':
  880. bot += 1
  881. else:
  882. break
  883. bot = min(fuzz, bot)
  884. top = min(fuzz, top)
  885. return old[top:len(old) - bot], new[top:len(new) - bot], top
  886. return old, new, 0
  887. def fuzzit(self, fuzz, toponly):
  888. old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
  889. oldstart = self.starta + top
  890. newstart = self.startb + top
  891. # zero length hunk ranges already have their start decremented
  892. if self.lena and oldstart > 0:
  893. oldstart -= 1
  894. if self.lenb and newstart > 0:
  895. newstart -= 1
  896. return old, oldstart, new, newstart
  897. class binhunk(object):
  898. 'A binary patch file.'
  899. def __init__(self, lr, fname):
  900. self.text = None
  901. self.delta = False
  902. self.hunk = ['GIT binary patch\n']
  903. self._fname = fname
  904. self._read(lr)
  905. def complete(self):
  906. return self.text is not None
  907. def new(self, lines):
  908. if self.delta:
  909. return [applybindelta(self.text, ''.join(lines))]
  910. return [self.text]
  911. def _read(self, lr):
  912. def getline(lr, hunk):
  913. l = lr.readline()
  914. hunk.append(l)
  915. return l.rstrip('\r\n')
  916. size = 0
  917. while True:
  918. line = getline(lr, self.hunk)
  919. if not line:
  920. raise PatchError(_('could not extract "%s" binary data')
  921. % self._fname)
  922. if line.startswith('literal '):
  923. size = int(line[8:].rstrip())
  924. break
  925. if line.startswith('delta '):
  926. size = int(line[6:].rstrip())
  927. self.delta = True
  928. break
  929. dec = []
  930. line = getline(lr, self.hunk)
  931. while len(line) > 1:
  932. l = line[0]
  933. if l <= 'Z' and l >= 'A':
  934. l = ord(l) - ord('A') + 1
  935. else:
  936. l = ord(l) - ord('a') + 27
  937. try:
  938. dec.append(base85.b85decode(line[1:])[:l])
  939. except ValueError, e:
  940. raise PatchError(_('could not decode "%s" binary patch: %s')
  941. % (self._fname, str(e)))
  942. line = getline(lr, self.hunk)
  943. text = zlib.decompress(''.join(dec))
  944. if len(text) != size:
  945. raise PatchError(_('"%s" length is %d bytes, should be %d')
  946. % (self._fname, len(text), size))
  947. self.text = text
  948. def parsefilename(str):
  949. # --- filename \t|space stuff
  950. s = str[4:].rstrip('\r\n')
  951. i = s.find('\t')
  952. if i < 0:
  953. i = s.find(' ')
  954. if i < 0:
  955. return s
  956. return s[:i]
  957. def pathstrip(path, strip):
  958. pathlen = len(path)
  959. i = 0
  960. if strip == 0:
  961. return '', path.rstrip()
  962. count = strip
  963. while count > 0:
  964. i = path.find('/', i)
  965. if i == -1:
  966. raise PatchError(_("unable to strip away %d of %d dirs from %s") %
  967. (count, strip, path))
  968. i += 1
  969. # consume '//' in the path
  970. while i < pathlen - 1 and path[i] == '/':
  971. i += 1
  972. count -= 1
  973. return path[:i].lstrip(), path[i:].rstrip()
  974. def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
  975. nulla = afile_orig == "/dev/null"
  976. nullb = bfile_orig == "/dev/null"
  977. create = nulla and hunk.starta == 0 and hunk.lena == 0
  978. remove = nullb and hunk.startb == 0 and hunk.lenb == 0
  979. abase, afile = pathstrip(afile_orig, strip)
  980. gooda = not nulla and backend.exists(afile)
  981. bbase, bfile = pathstrip(bfile_orig, strip)
  982. if afile == bfile:
  983. goodb = gooda
  984. else:
  985. goodb = not nullb and backend.exists(bfile)
  986. missing = not goodb and not gooda and not create
  987. # some diff programs apparently produce patches where the afile is
  988. # not /dev/null, but afile starts with bfile
  989. abasedir = afile[:afile.rfind('/') + 1]
  990. bbasedir = bfile[:bfile.rfind('/') + 1]
  991. if (missing and abasedir == bbasedir and afile.startswith(bfile)
  992. and hunk.starta == 0 and hunk.lena == 0):
  993. create = True
  994. missing = False
  995. # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
  996. # diff is between a file and its backup. In this case, the original
  997. # file should be patched (see original mpatch code).
  998. isbackup = (abase == bbase and bfile.startswith(afile))
  999. fname = None
  1000. if not missing:
  1001. if gooda and goodb:
  1002. fname = isbackup and afile or bfile
  1003. elif gooda:
  1004. fname = afile
  1005. if not fname:
  1006. if not nullb:
  1007. fname = isbackup and afile or bfile
  1008. elif not nulla:
  1009. fname = afile
  1010. else:
  1011. raise PatchError(_("undefined source and destination files"))
  1012. gp = patchmeta(fname)
  1013. if create:
  1014. gp.op = 'ADD'
  1015. elif remove:
  1016. gp.op = 'DELETE'
  1017. return gp
  1018. def scangitpatch(lr, firstline):
  1019. """
  1020. Git patches can emit:
  1021. - rename a to b
  1022. - change b
  1023. - copy a to c
  1024. - change c
  1025. We cannot apply this sequence as-is, the renamed 'a' could not be
  1026. found for it would have been renamed already. And we cannot copy
  1027. from 'b' instead because 'b' would have been changed already. So
  1028. we scan the git patch for copy and rename commands so we can
  1029. perform the copies ahead of time.
  1030. """
  1031. pos = 0
  1032. try:
  1033. pos = lr.fp.tell()
  1034. fp = lr.fp
  1035. except IOError:
  1036. fp = cStringIO.StringIO(lr.fp.read())
  1037. gitlr = linereader(fp)
  1038. gitlr.push(firstline)
  1039. gitpatches = readgitpatch(gitlr)
  1040. fp.seek(pos)
  1041. return gitpatches
  1042. def iterhunks(fp):
  1043. """Read a patch and yield the following events:
  1044. - ("file", afile, bfile, firsthunk): select a new target file.
  1045. - ("hunk", hunk): a new hunk is ready to be applied, follows a
  1046. "file" event.
  1047. - ("git", gitchanges): current diff is in git format, gitchanges
  1048. maps filenames to gitpatch records. Unique event.
  1049. """
  1050. afile = ""
  1051. bfile = ""
  1052. state = None
  1053. hunknum = 0
  1054. emitfile = newfile = False
  1055. gitpatches = None
  1056. # our states
  1057. BFILE = 1
  1058. context = None
  1059. lr = linereader(fp)
  1060. while True:
  1061. x = lr.readline()
  1062. if not x:
  1063. break
  1064. if state == BFILE and (
  1065. (not context and x[0] == '@')
  1066. or (context is not False and x.startswith('***************'))
  1067. or x.startswith('GIT binary patch')):
  1068. gp = None
  1069. if (gitpatches and
  1070. gitpatches[-1].ispatching(afile, bfile)):
  1071. gp = gitpatches.pop()
  1072. if x.startswith('GIT binary patch'):
  1073. h = binhunk(lr, gp.path)
  1074. else:
  1075. if context is None and x.startswith('***************'):
  1076. context = True
  1077. h = hunk(x, hunknum + 1, lr, context)
  1078. hunknum += 1
  1079. if emitfile:
  1080. emitfile = False
  1081. yield 'file', (afile, bfile, h, gp and gp.copy() or None)
  1082. yield 'hunk', h
  1083. elif x.startswith('diff --git a/'):
  1084. m = gitre.match(x.rstrip(' \r\n'))
  1085. if not m:
  1086. continue
  1087. if gitpatches is None:
  1088. # scan whole input for git metadata
  1089. gitpatches = scangitpatch(lr, x)
  1090. yield 'git', [g.copy() for g in gitpatches
  1091. if g.op in ('COPY', 'RENAME')]
  1092. gitpatches.reverse()
  1093. afile = 'a/' + m.group(1)
  1094. bfile = 'b/' + m.group(2)
  1095. while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
  1096. gp = gitpatches.pop()
  1097. yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
  1098. if not gitpatches:
  1099. raise PatchError(_('failed to synchronize metadata for "%s"')
  1100. % afile[2:])
  1101. gp = gitpatches[-1]
  1102. newfile = True
  1103. elif x.startswith('---'):
  1104. # check for a unified diff
  1105. l2 = lr.readline()
  1106. if not l2.startswith('+++'):
  1107. lr.push(l2)
  1108. continue
  1109. newfile = True
  1110. context = False
  1111. afile = parsefilename(x)
  1112. bfile = parsefilename(l2)
  1113. elif x.startswith('***'):
  1114. # check for a context diff
  1115. l2 = lr.readline()
  1116. if not l2.startswith('---'):
  1117. lr.push(l2)
  1118. continue
  1119. l3 = lr.readline()
  1120. lr.push(l3)
  1121. if not l3.startswith("***************"):
  1122. lr.push(l2)
  1123. continue
  1124. newfile = True
  1125. context = True
  1126. afile = parsefilename(x)
  1127. bfile = parsefilename(l2)
  1128. if newfile:
  1129. newfile = False
  1130. emitfile = True
  1131. state = BFILE
  1132. hunknum = 0
  1133. while gitpatches:
  1134. gp = gitpatches.pop()
  1135. yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
  1136. def applybindelta(binchunk, data):
  1137. """Apply a binary delta hunk
  1138. The algorithm used is the algorithm from git's patch-delta.c
  1139. """
  1140. def deltahead(binchunk):
  1141. i = 0
  1142. for c in binchunk:
  1143. i += 1
  1144. if not (ord(c) & 0x80):
  1145. return i
  1146. return i
  1147. out = ""
  1148. s = deltahead(binchunk)
  1149. binchunk = binchunk[s:]
  1150. s = deltahead(binchunk)
  1151. binchunk = binchunk[s:]
  1152. i = 0
  1153. while i < len(binchunk):
  1154. cmd = ord(binchunk[i])
  1155. i += 1
  1156. if (cmd & 0x80):
  1157. offset = 0
  1158. size = 0
  1159. if (cmd & 0x01):
  1160. offset = ord(binchunk[i])
  1161. i += 1
  1162. if (cmd & 0x02):
  1163. offset |= ord(binchunk[i]) << 8
  1164. i += 1
  1165. if (cmd & 0x04):
  1166. offset |= ord(binchunk[i]) << 16
  1167. i += 1
  1168. if (cmd & 0x08):
  1169. offset |= ord(binchunk[i]) << 24
  1170. i += 1
  1171. if (cmd & 0x10):
  1172. size = ord(binchunk[i])
  1173. i += 1
  1174. if (cmd & 0x20):
  1175. size |= ord(binchunk[i]) << 8
  1176. i += 1
  1177. if (cmd & 0x40):
  1178. size |= ord(binchunk[i]) << 16
  1179. i += 1
  1180. if size == 0:
  1181. size = 0x10000
  1182. offset_end = offset + size
  1183. out += data[offset:offset_end]
  1184. elif cmd != 0:
  1185. offset_end = i + cmd
  1186. out += binchunk[i:offset_end]
  1187. i += cmd
  1188. else:
  1189. raise PatchError(_('unexpected delta opcode 0'))
  1190. return out
  1191. def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
  1192. """Reads a patch from fp and tries to apply it.
  1193. Returns 0 for a clean patch, -1 if any rejects were found and 1 if
  1194. there was any fuzz.
  1195. If 'eolmode' is 'strict', the patch content and patched file are
  1196. read in binary mode. Otherwise, line endings are ignored when
  1197. patching then normalized according to 'eolmode'.
  1198. """
  1199. return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
  1200. eolmode=eolmode)
  1201. def _applydiff(ui, fp, patcher, backend, store, strip=1,
  1202. eolmode='strict'):
  1203. def pstrip(p):
  1204. return pathstrip(p, strip - 1)[1]
  1205. rejects = 0
  1206. err = 0
  1207. current_file = None
  1208. for state, values in iterhunks(fp):
  1209. if state == 'hunk':
  1210. if not current_file:
  1211. continue
  1212. ret = current_file.apply(values)
  1213. if ret > 0:
  1214. err = 1
  1215. elif state == 'file':
  1216. if current_file:
  1217. rejects += current_file.close()
  1218. current_file = None
  1219. afile, bfile, first_hunk, gp = values
  1220. if gp:
  1221. gp.path = pstrip(gp.path)
  1222. if gp.oldpath:
  1223. gp.oldpath = pstrip(gp.oldpath)
  1224. else:
  1225. gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
  1226. if gp.op == 'RENAME':
  1227. backend.unlink(gp.oldpath)
  1228. if not first_hunk:
  1229. if gp.op == 'DELETE':
  1230. backend.unlink(gp.path)
  1231. continue
  1232. data, mode = None, None
  1233. if gp.op in ('RENAME', 'COPY'):
  1234. data, mode = store.getfile(gp.oldpath)[:2]
  1235. if gp.mode:
  1236. mode = gp.mode
  1237. if gp.op == 'ADD':
  1238. # Added files without content have no hunk and
  1239. # must be created
  1240. data = ''
  1241. if data or mode:
  1242. if (gp.op in ('ADD', 'RENAME', 'COPY')
  1243. and backend.exists(gp.path)):
  1244. raise PatchError(_("cannot create %s: destination "
  1245. "already exists") % gp.path)
  1246. backend.setfile(gp.path, data, mode, gp.oldpath)
  1247. continue
  1248. try:
  1249. current_file = patcher(ui, gp, backend, store,
  1250. eolmode=eolmode)
  1251. except PatchError, inst:
  1252. ui.warn(str(inst) + '\n')
  1253. current_file = None
  1254. rejects += 1
  1255. continue
  1256. elif state == 'git':
  1257. for gp in values:
  1258. path = pstrip(gp.oldpath)
  1259. try:
  1260. data, mode = backend.getfile(path)
  1261. except IOError, e:
  1262. if e.errno != errno.ENOENT:
  1263. raise
  1264. # The error ignored here will trigger a getfile()
  1265. # error in a place more appropriate for error
  1266. # handling, and will not interrupt the patching
  1267. # process.
  1268. else:
  1269. store.setfile(path, data, mode)
  1270. else:
  1271. raise util.Abort(_('unsupported parser state: %s') % state)
  1272. if current_file:
  1273. rejects += current_file.close()
  1274. if rejects:
  1275. return -1
  1276. return err
  1277. def _externalpatch(ui, repo, patcher, patchname, strip, files,
  1278. similarity):
  1279. """use <patcher> to apply <patchname> to the working directory.
  1280. returns whether patch was applied with fuzz factor."""
  1281. fuzz = False
  1282. args = []
  1283. cwd = repo.root
  1284. if cwd:
  1285. args.append('-d %s' % util.shellquote(cwd))
  1286. fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
  1287. util.shellquote(patchname)))
  1288. try:
  1289. for line in fp:
  1290. line = line.rstrip()
  1291. ui.note(line + '\n')
  1292. if line.startswith('patching file '):
  1293. pf = util.parsepatchoutput(line)
  1294. printed_file = False
  1295. files.add(pf)
  1296. elif line.find('with fuzz') >= 0:
  1297. fuzz = True
  1298. if not printed_file:
  1299. ui.warn(pf + '\n')
  1300. printed_file = True
  1301. ui.warn(line + '\n')
  1302. elif line.find('saving rejects to file') >= 0:
  1303. ui.warn(line + '\n')
  1304. elif line.find('FAILED') >= 0:
  1305. if not printed_file:
  1306. ui.warn(pf + '\n')
  1307. printed_file = True
  1308. ui.warn(line + '\n')
  1309. finally:
  1310. if files:
  1311. scmutil.marktouched(repo, files, similarity)
  1312. code = fp.close()
  1313. if code:
  1314. raise PatchError(_("patch command failed: %s") %
  1315. util.explainexit(code)[0])
  1316. return fuzz
  1317. def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
  1318. if files is None:
  1319. files = set()
  1320. if eolmode is None:
  1321. eolmode = ui.config('patch', 'eol', 'strict')
  1322. if eolmode.lower() not in eolmodes:
  1323. raise util.Abort(_('unsupported line endings type: %s') % eolmode)
  1324. eolmode = eolmode.lower()
  1325. store = filestore()
  1326. try:
  1327. fp = open(patchobj, 'rb'

Large files files are truncated, but you can click here to view the full file