PageRenderTime 75ms CodeModel.GetById 42ms RepoModel.GetById 1ms app.codeStats 0ms

/lib-python/2.7/lib2to3/pgen2/tokenize.py

https://bitbucket.org/jerith/pypy
Python | 500 lines | 457 code | 20 blank | 23 comment | 17 complexity | c890e4a0324a64d69e6ea6a200423b07 MD5 | raw file
  1. # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
  2. # All rights reserved.
  3. """Tokenization help for Python programs.
  4. generate_tokens(readline) is a generator that breaks a stream of
  5. text into Python tokens. It accepts a readline-like method which is called
  6. repeatedly to get the next line of input (or "" for EOF). It generates
  7. 5-tuples with these members:
  8. the token type (see token.py)
  9. the token (a string)
  10. the starting (row, column) indices of the token (a 2-tuple of ints)
  11. the ending (row, column) indices of the token (a 2-tuple of ints)
  12. the original line (string)
  13. It is designed to match the working of the Python tokenizer exactly, except
  14. that it produces COMMENT tokens for comments and gives type OP for all
  15. operators
  16. Older entry points
  17. tokenize_loop(readline, tokeneater)
  18. tokenize(readline, tokeneater=printtoken)
  19. are the same, except instead of generating tokens, tokeneater is a callback
  20. function to which the 5 fields described above are passed as 5 arguments,
  21. each time a new token is found."""
  22. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  23. __credits__ = \
  24. 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
  25. import string, re
  26. from codecs import BOM_UTF8, lookup
  27. from lib2to3.pgen2.token import *
  28. from . import token
  29. __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
  30. "generate_tokens", "untokenize"]
  31. del token
  32. try:
  33. bytes
  34. except NameError:
  35. # Support bytes type in Python <= 2.5, so 2to3 turns itself into
  36. # valid Python 3 code.
  37. bytes = str
  38. def group(*choices): return '(' + '|'.join(choices) + ')'
  39. def any(*choices): return group(*choices) + '*'
  40. def maybe(*choices): return group(*choices) + '?'
  41. Whitespace = r'[ \f\t]*'
  42. Comment = r'#[^\r\n]*'
  43. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  44. Name = r'[a-zA-Z_]\w*'
  45. Binnumber = r'0[bB][01]*'
  46. Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
  47. Octnumber = r'0[oO]?[0-7]*[lL]?'
  48. Decnumber = r'[1-9]\d*[lL]?'
  49. Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
  50. Exponent = r'[eE][-+]?\d+'
  51. Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
  52. Expfloat = r'\d+' + Exponent
  53. Floatnumber = group(Pointfloat, Expfloat)
  54. Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
  55. Number = group(Imagnumber, Floatnumber, Intnumber)
  56. # Tail end of ' string.
  57. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  58. # Tail end of " string.
  59. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  60. # Tail end of ''' string.
  61. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  62. # Tail end of """ string.
  63. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  64. Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
  65. # Single-line ' or " string.
  66. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  67. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  68. # Because of leftmost-then-longest match semantics, be sure to put the
  69. # longest operators first (e.g., if = came before ==, == would get
  70. # recognized as two instances of =).
  71. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
  72. r"//=?", r"->",
  73. r"[+\-*/%&|^=<>]=?",
  74. r"~")
  75. Bracket = '[][(){}]'
  76. Special = group(r'\r?\n', r'[:;.,`@]')
  77. Funny = group(Operator, Bracket, Special)
  78. PlainToken = group(Number, Funny, String, Name)
  79. Token = Ignore + PlainToken
  80. # First (or only) line of ' or " string.
  81. ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  82. group("'", r'\\\r?\n'),
  83. r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  84. group('"', r'\\\r?\n'))
  85. PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  86. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  87. tokenprog, pseudoprog, single3prog, double3prog = map(
  88. re.compile, (Token, PseudoToken, Single3, Double3))
  89. endprogs = {"'": re.compile(Single), '"': re.compile(Double),
  90. "'''": single3prog, '"""': double3prog,
  91. "r'''": single3prog, 'r"""': double3prog,
  92. "u'''": single3prog, 'u"""': double3prog,
  93. "b'''": single3prog, 'b"""': double3prog,
  94. "ur'''": single3prog, 'ur"""': double3prog,
  95. "br'''": single3prog, 'br"""': double3prog,
  96. "R'''": single3prog, 'R"""': double3prog,
  97. "U'''": single3prog, 'U"""': double3prog,
  98. "B'''": single3prog, 'B"""': double3prog,
  99. "uR'''": single3prog, 'uR"""': double3prog,
  100. "Ur'''": single3prog, 'Ur"""': double3prog,
  101. "UR'''": single3prog, 'UR"""': double3prog,
  102. "bR'''": single3prog, 'bR"""': double3prog,
  103. "Br'''": single3prog, 'Br"""': double3prog,
  104. "BR'''": single3prog, 'BR"""': double3prog,
  105. 'r': None, 'R': None,
  106. 'u': None, 'U': None,
  107. 'b': None, 'B': None}
  108. triple_quoted = {}
  109. for t in ("'''", '"""',
  110. "r'''", 'r"""', "R'''", 'R"""',
  111. "u'''", 'u"""', "U'''", 'U"""',
  112. "b'''", 'b"""', "B'''", 'B"""',
  113. "ur'''", 'ur"""', "Ur'''", 'Ur"""',
  114. "uR'''", 'uR"""', "UR'''", 'UR"""',
  115. "br'''", 'br"""', "Br'''", 'Br"""',
  116. "bR'''", 'bR"""', "BR'''", 'BR"""',):
  117. triple_quoted[t] = t
  118. single_quoted = {}
  119. for t in ("'", '"',
  120. "r'", 'r"', "R'", 'R"',
  121. "u'", 'u"', "U'", 'U"',
  122. "b'", 'b"', "B'", 'B"',
  123. "ur'", 'ur"', "Ur'", 'Ur"',
  124. "uR'", 'uR"', "UR'", 'UR"',
  125. "br'", 'br"', "Br'", 'Br"',
  126. "bR'", 'bR"', "BR'", 'BR"', ):
  127. single_quoted[t] = t
  128. tabsize = 8
  129. class TokenError(Exception): pass
  130. class StopTokenizing(Exception): pass
  131. def printtoken(type, token, start, end, line): # for testing
  132. (srow, scol) = start
  133. (erow, ecol) = end
  134. print "%d,%d-%d,%d:\t%s\t%s" % \
  135. (srow, scol, erow, ecol, tok_name[type], repr(token))
  136. def tokenize(readline, tokeneater=printtoken):
  137. """
  138. The tokenize() function accepts two parameters: one representing the
  139. input stream, and one providing an output mechanism for tokenize().
  140. The first parameter, readline, must be a callable object which provides
  141. the same interface as the readline() method of built-in file objects.
  142. Each call to the function should return one line of input as a string.
  143. The second parameter, tokeneater, must also be a callable object. It is
  144. called once for each token, with five arguments, corresponding to the
  145. tuples generated by generate_tokens().
  146. """
  147. try:
  148. tokenize_loop(readline, tokeneater)
  149. except StopTokenizing:
  150. pass
  151. # backwards compatible interface
  152. def tokenize_loop(readline, tokeneater):
  153. for token_info in generate_tokens(readline):
  154. tokeneater(*token_info)
  155. class Untokenizer:
  156. def __init__(self):
  157. self.tokens = []
  158. self.prev_row = 1
  159. self.prev_col = 0
  160. def add_whitespace(self, start):
  161. row, col = start
  162. assert row <= self.prev_row
  163. col_offset = col - self.prev_col
  164. if col_offset:
  165. self.tokens.append(" " * col_offset)
  166. def untokenize(self, iterable):
  167. for t in iterable:
  168. if len(t) == 2:
  169. self.compat(t, iterable)
  170. break
  171. tok_type, token, start, end, line = t
  172. self.add_whitespace(start)
  173. self.tokens.append(token)
  174. self.prev_row, self.prev_col = end
  175. if tok_type in (NEWLINE, NL):
  176. self.prev_row += 1
  177. self.prev_col = 0
  178. return "".join(self.tokens)
  179. def compat(self, token, iterable):
  180. startline = False
  181. indents = []
  182. toks_append = self.tokens.append
  183. toknum, tokval = token
  184. if toknum in (NAME, NUMBER):
  185. tokval += ' '
  186. if toknum in (NEWLINE, NL):
  187. startline = True
  188. for tok in iterable:
  189. toknum, tokval = tok[:2]
  190. if toknum in (NAME, NUMBER):
  191. tokval += ' '
  192. if toknum == INDENT:
  193. indents.append(tokval)
  194. continue
  195. elif toknum == DEDENT:
  196. indents.pop()
  197. continue
  198. elif toknum in (NEWLINE, NL):
  199. startline = True
  200. elif startline and indents:
  201. toks_append(indents[-1])
  202. startline = False
  203. toks_append(tokval)
  204. cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
  205. def _get_normal_name(orig_enc):
  206. """Imitates get_normal_name in tokenizer.c."""
  207. # Only care about the first 12 characters.
  208. enc = orig_enc[:12].lower().replace("_", "-")
  209. if enc == "utf-8" or enc.startswith("utf-8-"):
  210. return "utf-8"
  211. if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
  212. enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
  213. return "iso-8859-1"
  214. return orig_enc
  215. def detect_encoding(readline):
  216. """
  217. The detect_encoding() function is used to detect the encoding that should
  218. be used to decode a Python source file. It requires one argment, readline,
  219. in the same way as the tokenize() generator.
  220. It will call readline a maximum of twice, and return the encoding used
  221. (as a string) and a list of any lines (left as bytes) it has read
  222. in.
  223. It detects the encoding from the presence of a utf-8 bom or an encoding
  224. cookie as specified in pep-0263. If both a bom and a cookie are present, but
  225. disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
  226. charset, raise a SyntaxError. Note that if a utf-8 bom is found,
  227. 'utf-8-sig' is returned.
  228. If no encoding is specified, then the default of 'utf-8' will be returned.
  229. """
  230. bom_found = False
  231. encoding = None
  232. default = 'utf-8'
  233. def read_or_stop():
  234. try:
  235. return readline()
  236. except StopIteration:
  237. return bytes()
  238. def find_cookie(line):
  239. try:
  240. line_string = line.decode('ascii')
  241. except UnicodeDecodeError:
  242. return None
  243. matches = cookie_re.findall(line_string)
  244. if not matches:
  245. return None
  246. encoding = _get_normal_name(matches[0])
  247. try:
  248. codec = lookup(encoding)
  249. except LookupError:
  250. # This behaviour mimics the Python interpreter
  251. raise SyntaxError("unknown encoding: " + encoding)
  252. if bom_found:
  253. if codec.name != 'utf-8':
  254. # This behaviour mimics the Python interpreter
  255. raise SyntaxError('encoding problem: utf-8')
  256. encoding += '-sig'
  257. return encoding
  258. first = read_or_stop()
  259. if first.startswith(BOM_UTF8):
  260. bom_found = True
  261. first = first[3:]
  262. default = 'utf-8-sig'
  263. if not first:
  264. return default, []
  265. encoding = find_cookie(first)
  266. if encoding:
  267. return encoding, [first]
  268. second = read_or_stop()
  269. if not second:
  270. return default, [first]
  271. encoding = find_cookie(second)
  272. if encoding:
  273. return encoding, [first, second]
  274. return default, [first, second]
  275. def untokenize(iterable):
  276. """Transform tokens back into Python source code.
  277. Each element returned by the iterable must be a token sequence
  278. with at least two elements, a token number and token value. If
  279. only two tokens are passed, the resulting output is poor.
  280. Round-trip invariant for full input:
  281. Untokenized source will match input source exactly
  282. Round-trip invariant for limited intput:
  283. # Output text will tokenize the back to the input
  284. t1 = [tok[:2] for tok in generate_tokens(f.readline)]
  285. newcode = untokenize(t1)
  286. readline = iter(newcode.splitlines(1)).next
  287. t2 = [tok[:2] for tokin generate_tokens(readline)]
  288. assert t1 == t2
  289. """
  290. ut = Untokenizer()
  291. return ut.untokenize(iterable)
  292. def generate_tokens(readline):
  293. """
  294. The generate_tokens() generator requires one argment, readline, which
  295. must be a callable object which provides the same interface as the
  296. readline() method of built-in file objects. Each call to the function
  297. should return one line of input as a string. Alternately, readline
  298. can be a callable function terminating with StopIteration:
  299. readline = open(myfile).next # Example of alternate readline
  300. The generator produces 5-tuples with these members: the token type; the
  301. token string; a 2-tuple (srow, scol) of ints specifying the row and
  302. column where the token begins in the source; a 2-tuple (erow, ecol) of
  303. ints specifying the row and column where the token ends in the source;
  304. and the line on which the token was found. The line passed is the
  305. logical line; continuation lines are included.
  306. """
  307. lnum = parenlev = continued = 0
  308. namechars, numchars = string.ascii_letters + '_', '0123456789'
  309. contstr, needcont = '', 0
  310. contline = None
  311. indents = [0]
  312. while 1: # loop over lines in stream
  313. try:
  314. line = readline()
  315. except StopIteration:
  316. line = ''
  317. lnum = lnum + 1
  318. pos, max = 0, len(line)
  319. if contstr: # continued string
  320. if not line:
  321. raise TokenError, ("EOF in multi-line string", strstart)
  322. endmatch = endprog.match(line)
  323. if endmatch:
  324. pos = end = endmatch.end(0)
  325. yield (STRING, contstr + line[:end],
  326. strstart, (lnum, end), contline + line)
  327. contstr, needcont = '', 0
  328. contline = None
  329. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  330. yield (ERRORTOKEN, contstr + line,
  331. strstart, (lnum, len(line)), contline)
  332. contstr = ''
  333. contline = None
  334. continue
  335. else:
  336. contstr = contstr + line
  337. contline = contline + line
  338. continue
  339. elif parenlev == 0 and not continued: # new statement
  340. if not line: break
  341. column = 0
  342. while pos < max: # measure leading whitespace
  343. if line[pos] == ' ': column = column + 1
  344. elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
  345. elif line[pos] == '\f': column = 0
  346. else: break
  347. pos = pos + 1
  348. if pos == max: break
  349. if line[pos] in '#\r\n': # skip comments or blank lines
  350. if line[pos] == '#':
  351. comment_token = line[pos:].rstrip('\r\n')
  352. nl_pos = pos + len(comment_token)
  353. yield (COMMENT, comment_token,
  354. (lnum, pos), (lnum, pos + len(comment_token)), line)
  355. yield (NL, line[nl_pos:],
  356. (lnum, nl_pos), (lnum, len(line)), line)
  357. else:
  358. yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
  359. (lnum, pos), (lnum, len(line)), line)
  360. continue
  361. if column > indents[-1]: # count indents or dedents
  362. indents.append(column)
  363. yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  364. while column < indents[-1]:
  365. if column not in indents:
  366. raise IndentationError(
  367. "unindent does not match any outer indentation level",
  368. ("<tokenize>", lnum, pos, line))
  369. indents = indents[:-1]
  370. yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
  371. else: # continued statement
  372. if not line:
  373. raise TokenError, ("EOF in multi-line statement", (lnum, 0))
  374. continued = 0
  375. while pos < max:
  376. pseudomatch = pseudoprog.match(line, pos)
  377. if pseudomatch: # scan for tokens
  378. start, end = pseudomatch.span(1)
  379. spos, epos, pos = (lnum, start), (lnum, end), end
  380. token, initial = line[start:end], line[start]
  381. if initial in numchars or \
  382. (initial == '.' and token != '.'): # ordinary number
  383. yield (NUMBER, token, spos, epos, line)
  384. elif initial in '\r\n':
  385. newline = NEWLINE
  386. if parenlev > 0:
  387. newline = NL
  388. yield (newline, token, spos, epos, line)
  389. elif initial == '#':
  390. assert not token.endswith("\n")
  391. yield (COMMENT, token, spos, epos, line)
  392. elif token in triple_quoted:
  393. endprog = endprogs[token]
  394. endmatch = endprog.match(line, pos)
  395. if endmatch: # all on one line
  396. pos = endmatch.end(0)
  397. token = line[start:pos]
  398. yield (STRING, token, spos, (lnum, pos), line)
  399. else:
  400. strstart = (lnum, start) # multiple lines
  401. contstr = line[start:]
  402. contline = line
  403. break
  404. elif initial in single_quoted or \
  405. token[:2] in single_quoted or \
  406. token[:3] in single_quoted:
  407. if token[-1] == '\n': # continued string
  408. strstart = (lnum, start)
  409. endprog = (endprogs[initial] or endprogs[token[1]] or
  410. endprogs[token[2]])
  411. contstr, needcont = line[start:], 1
  412. contline = line
  413. break
  414. else: # ordinary string
  415. yield (STRING, token, spos, epos, line)
  416. elif initial in namechars: # ordinary name
  417. yield (NAME, token, spos, epos, line)
  418. elif initial == '\\': # continued stmt
  419. # This yield is new; needed for better idempotency:
  420. yield (NL, token, spos, (lnum, pos), line)
  421. continued = 1
  422. else:
  423. if initial in '([{': parenlev = parenlev + 1
  424. elif initial in ')]}': parenlev = parenlev - 1
  425. yield (OP, token, spos, epos, line)
  426. else:
  427. yield (ERRORTOKEN, line[pos],
  428. (lnum, pos), (lnum, pos+1), line)
  429. pos = pos + 1
  430. for indent in indents[1:]: # pop remaining indent levels
  431. yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
  432. yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  433. if __name__ == '__main__': # testing
  434. import sys
  435. if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
  436. else: tokenize(sys.stdin.readline)