PageRenderTime 50ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 0ms

/IPython/utils/_tokenize_py3.py

http://github.com/ipython/ipython
Python | 595 lines | 558 code | 20 blank | 17 comment | 16 complexity | 891916ffc41ef438cafac88117e0949b MD5 | raw file
Possible License(s): BSD-3-Clause, MIT, Apache-2.0
  1. """Patched version of standard library tokenize, to deal with various bugs.
  2. Based on Python 3.2 code.
  3. Patches:
  4. - Gareth Rees' patch for Python issue #12691 (untokenizing)
  5. - Except we don't encode the output of untokenize
  6. - Python 2 compatible syntax, so that it can be byte-compiled at installation
  7. - Newlines in comments and blank lines should be either NL or NEWLINE, depending
  8. on whether they are in a multi-line statement. Filed as Python issue #17061.
  9. - Export generate_tokens & TokenError
  10. - u and rb literals are allowed under Python 3.3 and above.
  11. ------------------------------------------------------------------------------
  12. Tokenization help for Python programs.
  13. tokenize(readline) is a generator that breaks a stream of bytes into
  14. Python tokens. It decodes the bytes according to PEP-0263 for
  15. determining source file encoding.
  16. It accepts a readline-like method which is called repeatedly to get the
  17. next line of input (or b"" for EOF). It generates 5-tuples with these
  18. members:
  19. the token type (see token.py)
  20. the token (a string)
  21. the starting (row, column) indices of the token (a 2-tuple of ints)
  22. the ending (row, column) indices of the token (a 2-tuple of ints)
  23. the original line (string)
  24. It is designed to match the working of the Python tokenizer exactly, except
  25. that it produces COMMENT tokens for comments and gives type OP for all
  26. operators. Additionally, all token lists start with an ENCODING token
  27. which tells you which encoding was used to decode the bytes stream.
  28. """
  29. from __future__ import absolute_import
  30. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  31. __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
  32. 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
  33. 'Michael Foord')
  34. import builtins
  35. import re
  36. import sys
  37. from token import *
  38. from codecs import lookup, BOM_UTF8
  39. import collections
  40. from io import TextIOWrapper
  41. cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
  42. import token
  43. __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
  44. "NL", "untokenize", "ENCODING", "TokenInfo"]
  45. del token
  46. __all__ += ["generate_tokens", "TokenError"]
  47. COMMENT = N_TOKENS
  48. tok_name[COMMENT] = 'COMMENT'
  49. NL = N_TOKENS + 1
  50. tok_name[NL] = 'NL'
  51. ENCODING = N_TOKENS + 2
  52. tok_name[ENCODING] = 'ENCODING'
  53. N_TOKENS += 3
  54. class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
  55. def __repr__(self):
  56. annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
  57. return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
  58. self._replace(type=annotated_type))
  59. def group(*choices): return '(' + '|'.join(choices) + ')'
  60. def any(*choices): return group(*choices) + '*'
  61. def maybe(*choices): return group(*choices) + '?'
  62. # Note: we use unicode matching for names ("\w") but ascii matching for
  63. # number literals.
  64. Whitespace = r'[ \f\t]*'
  65. Comment = r'#[^\r\n]*'
  66. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  67. Name = r'\w+'
  68. Hexnumber = r'0[xX][0-9a-fA-F]+'
  69. Binnumber = r'0[bB][01]+'
  70. Octnumber = r'0[oO][0-7]+'
  71. Decnumber = r'(?:0+|[1-9][0-9]*)'
  72. Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
  73. Exponent = r'[eE][-+]?[0-9]+'
  74. Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
  75. Expfloat = r'[0-9]+' + Exponent
  76. Floatnumber = group(Pointfloat, Expfloat)
  77. Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
  78. Number = group(Imagnumber, Floatnumber, Intnumber)
  79. if sys.version_info.minor >= 3:
  80. StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
  81. else:
  82. StringPrefix = r'(?:[bB]?[rR]?)?'
  83. # Tail end of ' string.
  84. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  85. # Tail end of " string.
  86. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  87. # Tail end of ''' string.
  88. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  89. # Tail end of """ string.
  90. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  91. Triple = group(StringPrefix + "'''", StringPrefix + '"""')
  92. # Single-line ' or " string.
  93. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  94. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  95. # Because of leftmost-then-longest match semantics, be sure to put the
  96. # longest operators first (e.g., if = came before ==, == would get
  97. # recognized as two instances of =).
  98. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
  99. r"//=?", r"->",
  100. r"[+\-*/%&|^=<>]=?",
  101. r"~")
  102. Bracket = '[][(){}]'
  103. Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
  104. Funny = group(Operator, Bracket, Special)
  105. PlainToken = group(Number, Funny, String, Name)
  106. Token = Ignore + PlainToken
  107. # First (or only) line of ' or " string.
  108. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  109. group("'", r'\\\r?\n'),
  110. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  111. group('"', r'\\\r?\n'))
  112. PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  113. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  114. def _compile(expr):
  115. return re.compile(expr, re.UNICODE)
  116. tokenprog, pseudoprog, single3prog, double3prog = map(
  117. _compile, (Token, PseudoToken, Single3, Double3))
  118. endprogs = {"'": _compile(Single), '"': _compile(Double),
  119. "'''": single3prog, '"""': double3prog,
  120. "r'''": single3prog, 'r"""': double3prog,
  121. "b'''": single3prog, 'b"""': double3prog,
  122. "R'''": single3prog, 'R"""': double3prog,
  123. "B'''": single3prog, 'B"""': double3prog,
  124. "br'''": single3prog, 'br"""': double3prog,
  125. "bR'''": single3prog, 'bR"""': double3prog,
  126. "Br'''": single3prog, 'Br"""': double3prog,
  127. "BR'''": single3prog, 'BR"""': double3prog,
  128. 'r': None, 'R': None, 'b': None, 'B': None}
  129. triple_quoted = {}
  130. for t in ("'''", '"""',
  131. "r'''", 'r"""', "R'''", 'R"""',
  132. "b'''", 'b"""', "B'''", 'B"""',
  133. "br'''", 'br"""', "Br'''", 'Br"""',
  134. "bR'''", 'bR"""', "BR'''", 'BR"""'):
  135. triple_quoted[t] = t
  136. single_quoted = {}
  137. for t in ("'", '"',
  138. "r'", 'r"', "R'", 'R"',
  139. "b'", 'b"', "B'", 'B"',
  140. "br'", 'br"', "Br'", 'Br"',
  141. "bR'", 'bR"', "BR'", 'BR"' ):
  142. single_quoted[t] = t
  143. if sys.version_info.minor >= 3:
  144. # Python 3.3
  145. for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']:
  146. _t2 = _prefix+'"""'
  147. endprogs[_t2] = double3prog
  148. triple_quoted[_t2] = _t2
  149. _t1 = _prefix + "'''"
  150. endprogs[_t1] = single3prog
  151. triple_quoted[_t1] = _t1
  152. single_quoted[_prefix+'"'] = _prefix+'"'
  153. single_quoted[_prefix+"'"] = _prefix+"'"
  154. del _prefix, _t2, _t1
  155. endprogs['u'] = None
  156. endprogs['U'] = None
  157. del _compile
  158. tabsize = 8
  159. class TokenError(Exception): pass
  160. class StopTokenizing(Exception): pass
  161. class Untokenizer:
  162. def __init__(self):
  163. self.tokens = []
  164. self.prev_row = 1
  165. self.prev_col = 0
  166. self.encoding = 'utf-8'
  167. def add_whitespace(self, tok_type, start):
  168. row, col = start
  169. assert row >= self.prev_row
  170. col_offset = col - self.prev_col
  171. if col_offset > 0:
  172. self.tokens.append(" " * col_offset)
  173. elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
  174. # Line was backslash-continued.
  175. self.tokens.append(" ")
  176. def untokenize(self, tokens):
  177. iterable = iter(tokens)
  178. for t in iterable:
  179. if len(t) == 2:
  180. self.compat(t, iterable)
  181. break
  182. tok_type, token, start, end = t[:4]
  183. if tok_type == ENCODING:
  184. self.encoding = token
  185. continue
  186. self.add_whitespace(tok_type, start)
  187. self.tokens.append(token)
  188. self.prev_row, self.prev_col = end
  189. if tok_type in (NEWLINE, NL):
  190. self.prev_row += 1
  191. self.prev_col = 0
  192. return "".join(self.tokens)
  193. def compat(self, token, iterable):
  194. # This import is here to avoid problems when the itertools
  195. # module is not built yet and tokenize is imported.
  196. from itertools import chain
  197. startline = False
  198. prevstring = False
  199. indents = []
  200. toks_append = self.tokens.append
  201. for tok in chain([token], iterable):
  202. toknum, tokval = tok[:2]
  203. if toknum == ENCODING:
  204. self.encoding = tokval
  205. continue
  206. if toknum in (NAME, NUMBER):
  207. tokval += ' '
  208. # Insert a space between two consecutive strings
  209. if toknum == STRING:
  210. if prevstring:
  211. tokval = ' ' + tokval
  212. prevstring = True
  213. else:
  214. prevstring = False
  215. if toknum == INDENT:
  216. indents.append(tokval)
  217. continue
  218. elif toknum == DEDENT:
  219. indents.pop()
  220. continue
  221. elif toknum in (NEWLINE, NL):
  222. startline = True
  223. elif startline and indents:
  224. toks_append(indents[-1])
  225. startline = False
  226. toks_append(tokval)
  227. def untokenize(tokens):
  228. """
  229. Convert ``tokens`` (an iterable) back into Python source code. Return
  230. a bytes object, encoded using the encoding specified by the last
  231. ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found.
  232. The result is guaranteed to tokenize back to match the input so that
  233. the conversion is lossless and round-trips are assured. The
  234. guarantee applies only to the token type and token string as the
  235. spacing between tokens (column positions) may change.
  236. :func:`untokenize` has two modes. If the input tokens are sequences
  237. of length 2 (``type``, ``string``) then spaces are added as necessary to
  238. preserve the round-trip property.
  239. If the input tokens are sequences of length 4 or more (``type``,
  240. ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then
  241. spaces are added so that each token appears in the result at the
  242. position indicated by ``start`` and ``end``, if possible.
  243. """
  244. return Untokenizer().untokenize(tokens)
  245. def _get_normal_name(orig_enc):
  246. """Imitates get_normal_name in tokenizer.c."""
  247. # Only care about the first 12 characters.
  248. enc = orig_enc[:12].lower().replace("_", "-")
  249. if enc == "utf-8" or enc.startswith("utf-8-"):
  250. return "utf-8"
  251. if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
  252. enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
  253. return "iso-8859-1"
  254. return orig_enc
  255. def detect_encoding(readline):
  256. """
  257. The detect_encoding() function is used to detect the encoding that should
  258. be used to decode a Python source file. It requires one argment, readline,
  259. in the same way as the tokenize() generator.
  260. It will call readline a maximum of twice, and return the encoding used
  261. (as a string) and a list of any lines (left as bytes) it has read in.
  262. It detects the encoding from the presence of a utf-8 bom or an encoding
  263. cookie as specified in pep-0263. If both a bom and a cookie are present,
  264. but disagree, a SyntaxError will be raised. If the encoding cookie is an
  265. invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
  266. 'utf-8-sig' is returned.
  267. If no encoding is specified, then the default of 'utf-8' will be returned.
  268. """
  269. bom_found = False
  270. encoding = None
  271. default = 'utf-8'
  272. def read_or_stop():
  273. try:
  274. return readline()
  275. except StopIteration:
  276. return b''
  277. def find_cookie(line):
  278. try:
  279. # Decode as UTF-8. Either the line is an encoding declaration,
  280. # in which case it should be pure ASCII, or it must be UTF-8
  281. # per default encoding.
  282. line_string = line.decode('utf-8')
  283. except UnicodeDecodeError:
  284. raise SyntaxError("invalid or missing encoding declaration")
  285. matches = cookie_re.findall(line_string)
  286. if not matches:
  287. return None
  288. encoding = _get_normal_name(matches[0])
  289. try:
  290. codec = lookup(encoding)
  291. except LookupError:
  292. # This behaviour mimics the Python interpreter
  293. raise SyntaxError("unknown encoding: " + encoding)
  294. if bom_found:
  295. if encoding != 'utf-8':
  296. # This behaviour mimics the Python interpreter
  297. raise SyntaxError('encoding problem: utf-8')
  298. encoding += '-sig'
  299. return encoding
  300. first = read_or_stop()
  301. if first.startswith(BOM_UTF8):
  302. bom_found = True
  303. first = first[3:]
  304. default = 'utf-8-sig'
  305. if not first:
  306. return default, []
  307. encoding = find_cookie(first)
  308. if encoding:
  309. return encoding, [first]
  310. second = read_or_stop()
  311. if not second:
  312. return default, [first]
  313. encoding = find_cookie(second)
  314. if encoding:
  315. return encoding, [first, second]
  316. return default, [first, second]
  317. def open(filename):
  318. """Open a file in read only mode using the encoding detected by
  319. detect_encoding().
  320. """
  321. buffer = builtins.open(filename, 'rb')
  322. encoding, lines = detect_encoding(buffer.readline)
  323. buffer.seek(0)
  324. text = TextIOWrapper(buffer, encoding, line_buffering=True)
  325. text.mode = 'r'
  326. return text
  327. def tokenize(readline):
  328. """
  329. The tokenize() generator requires one argment, readline, which
  330. must be a callable object which provides the same interface as the
  331. readline() method of built-in file objects. Each call to the function
  332. should return one line of input as bytes. Alternately, readline
  333. can be a callable function terminating with StopIteration:
  334. readline = open(myfile, 'rb').__next__ # Example of alternate readline
  335. The generator produces 5-tuples with these members: the token type; the
  336. token string; a 2-tuple (srow, scol) of ints specifying the row and
  337. column where the token begins in the source; a 2-tuple (erow, ecol) of
  338. ints specifying the row and column where the token ends in the source;
  339. and the line on which the token was found. The line passed is the
  340. logical line; continuation lines are included.
  341. The first token sequence will always be an ENCODING token
  342. which tells you which encoding was used to decode the bytes stream.
  343. """
  344. # This import is here to avoid problems when the itertools module is not
  345. # built yet and tokenize is imported.
  346. from itertools import chain, repeat
  347. encoding, consumed = detect_encoding(readline)
  348. rl_gen = iter(readline, b"")
  349. empty = repeat(b"")
  350. return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
  351. def _tokenize(readline, encoding):
  352. lnum = parenlev = continued = 0
  353. numchars = '0123456789'
  354. contstr, needcont = '', 0
  355. contline = None
  356. indents = [0]
  357. if encoding is not None:
  358. if encoding == "utf-8-sig":
  359. # BOM will already have been stripped.
  360. encoding = "utf-8"
  361. yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
  362. while True: # loop over lines in stream
  363. try:
  364. line = readline()
  365. except StopIteration:
  366. line = b''
  367. if encoding is not None:
  368. line = line.decode(encoding)
  369. lnum += 1
  370. pos, max = 0, len(line)
  371. if contstr: # continued string
  372. if not line:
  373. raise TokenError("EOF in multi-line string", strstart)
  374. endmatch = endprog.match(line)
  375. if endmatch:
  376. pos = end = endmatch.end(0)
  377. yield TokenInfo(STRING, contstr + line[:end],
  378. strstart, (lnum, end), contline + line)
  379. contstr, needcont = '', 0
  380. contline = None
  381. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  382. yield TokenInfo(ERRORTOKEN, contstr + line,
  383. strstart, (lnum, len(line)), contline)
  384. contstr = ''
  385. contline = None
  386. continue
  387. else:
  388. contstr = contstr + line
  389. contline = contline + line
  390. continue
  391. elif parenlev == 0 and not continued: # new statement
  392. if not line: break
  393. column = 0
  394. while pos < max: # measure leading whitespace
  395. if line[pos] == ' ':
  396. column += 1
  397. elif line[pos] == '\t':
  398. column = (column//tabsize + 1)*tabsize
  399. elif line[pos] == '\f':
  400. column = 0
  401. else:
  402. break
  403. pos += 1
  404. if pos == max:
  405. break
  406. if line[pos] in '#\r\n': # skip comments or blank lines
  407. if line[pos] == '#':
  408. comment_token = line[pos:].rstrip('\r\n')
  409. nl_pos = pos + len(comment_token)
  410. yield TokenInfo(COMMENT, comment_token,
  411. (lnum, pos), (lnum, pos + len(comment_token)), line)
  412. yield TokenInfo(NEWLINE, line[nl_pos:],
  413. (lnum, nl_pos), (lnum, len(line)), line)
  414. else:
  415. yield TokenInfo(NEWLINE, line[pos:],
  416. (lnum, pos), (lnum, len(line)), line)
  417. continue
  418. if column > indents[-1]: # count indents or dedents
  419. indents.append(column)
  420. yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  421. while column < indents[-1]:
  422. if column not in indents:
  423. raise IndentationError(
  424. "unindent does not match any outer indentation level",
  425. ("<tokenize>", lnum, pos, line))
  426. indents = indents[:-1]
  427. yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
  428. else: # continued statement
  429. if not line:
  430. raise TokenError("EOF in multi-line statement", (lnum, 0))
  431. continued = 0
  432. while pos < max:
  433. pseudomatch = pseudoprog.match(line, pos)
  434. if pseudomatch: # scan for tokens
  435. start, end = pseudomatch.span(1)
  436. spos, epos, pos = (lnum, start), (lnum, end), end
  437. token, initial = line[start:end], line[start]
  438. if (initial in numchars or # ordinary number
  439. (initial == '.' and token != '.' and token != '...')):
  440. yield TokenInfo(NUMBER, token, spos, epos, line)
  441. elif initial in '\r\n':
  442. yield TokenInfo(NL if parenlev > 0 else NEWLINE,
  443. token, spos, epos, line)
  444. elif initial == '#':
  445. assert not token.endswith("\n")
  446. yield TokenInfo(COMMENT, token, spos, epos, line)
  447. elif token in triple_quoted:
  448. endprog = endprogs[token]
  449. endmatch = endprog.match(line, pos)
  450. if endmatch: # all on one line
  451. pos = endmatch.end(0)
  452. token = line[start:pos]
  453. yield TokenInfo(STRING, token, spos, (lnum, pos), line)
  454. else:
  455. strstart = (lnum, start) # multiple lines
  456. contstr = line[start:]
  457. contline = line
  458. break
  459. elif initial in single_quoted or \
  460. token[:2] in single_quoted or \
  461. token[:3] in single_quoted:
  462. if token[-1] == '\n': # continued string
  463. strstart = (lnum, start)
  464. endprog = (endprogs[initial] or endprogs[token[1]] or
  465. endprogs[token[2]])
  466. contstr, needcont = line[start:], 1
  467. contline = line
  468. break
  469. else: # ordinary string
  470. yield TokenInfo(STRING, token, spos, epos, line)
  471. elif initial.isidentifier(): # ordinary name
  472. yield TokenInfo(NAME, token, spos, epos, line)
  473. elif initial == '\\': # continued stmt
  474. continued = 1
  475. else:
  476. if initial in '([{':
  477. parenlev += 1
  478. elif initial in ')]}':
  479. parenlev -= 1
  480. yield TokenInfo(OP, token, spos, epos, line)
  481. else:
  482. yield TokenInfo(ERRORTOKEN, line[pos],
  483. (lnum, pos), (lnum, pos+1), line)
  484. pos += 1
  485. for indent in indents[1:]: # pop remaining indent levels
  486. yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
  487. yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  488. # An undocumented, backwards compatible, API for all the places in the standard
  489. # library that expect to be able to use tokenize with strings
  490. def generate_tokens(readline):
  491. return _tokenize(readline, None)
  492. if __name__ == "__main__":
  493. # Quick sanity check
  494. s = b'''def parseline(self, line):
  495. """Parse the line into a command name and a string containing
  496. the arguments. Returns a tuple containing (command, args, line).
  497. 'command' and 'args' may be None if the line couldn't be parsed.
  498. """
  499. line = line.strip()
  500. if not line:
  501. return None, None, line
  502. elif line[0] == '?':
  503. line = 'help ' + line[1:]
  504. elif line[0] == '!':
  505. if hasattr(self, 'do_shell'):
  506. line = 'shell ' + line[1:]
  507. else:
  508. return None, None, line
  509. i, n = 0, len(line)
  510. while i < n and line[i] in self.identchars: i = i+1
  511. cmd, arg = line[:i], line[i:].strip()
  512. return cmd, arg, line
  513. '''
  514. for tok in tokenize(iter(s.splitlines()).__next__):
  515. print(tok)