/Lib/lib2to3/pgen2/tokenize.py

http://unladen-swallow.googlecode.com/ · Python · 405 lines · 305 code · 39 blank · 61 comment · 74 complexity · 06aea8121aa7b0fc71345d011813d4b4 MD5 · raw file

  1. # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
  2. # All rights reserved.
  3. """Tokenization help for Python programs.
  4. generate_tokens(readline) is a generator that breaks a stream of
  5. text into Python tokens. It accepts a readline-like method which is called
  6. repeatedly to get the next line of input (or "" for EOF). It generates
  7. 5-tuples with these members:
  8. the token type (see token.py)
  9. the token (a string)
  10. the starting (row, column) indices of the token (a 2-tuple of ints)
  11. the ending (row, column) indices of the token (a 2-tuple of ints)
  12. the original line (string)
  13. It is designed to match the working of the Python tokenizer exactly, except
  14. that it produces COMMENT tokens for comments and gives type OP for all
  15. operators
  16. Older entry points
  17. tokenize_loop(readline, tokeneater)
  18. tokenize(readline, tokeneater=printtoken)
  19. are the same, except instead of generating tokens, tokeneater is a callback
  20. function to which the 5 fields described above are passed as 5 arguments,
  21. each time a new token is found."""
  22. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  23. __credits__ = \
  24. 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
  25. import string, re
  26. from lib2to3.pgen2.token import *
  27. from . import token
  28. __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
  29. "generate_tokens", "untokenize"]
  30. del token
  31. def group(*choices): return '(' + '|'.join(choices) + ')'
  32. def any(*choices): return group(*choices) + '*'
  33. def maybe(*choices): return group(*choices) + '?'
  34. Whitespace = r'[ \f\t]*'
  35. Comment = r'#[^\r\n]*'
  36. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  37. Name = r'[a-zA-Z_]\w*'
  38. Binnumber = r'0[bB][01]*'
  39. Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
  40. Octnumber = r'0[oO]?[0-7]*[lL]?'
  41. Decnumber = r'[1-9]\d*[lL]?'
  42. Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
  43. Exponent = r'[eE][-+]?\d+'
  44. Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
  45. Expfloat = r'\d+' + Exponent
  46. Floatnumber = group(Pointfloat, Expfloat)
  47. Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
  48. Number = group(Imagnumber, Floatnumber, Intnumber)
  49. # Tail end of ' string.
  50. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  51. # Tail end of " string.
  52. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  53. # Tail end of ''' string.
  54. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  55. # Tail end of """ string.
  56. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  57. Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
  58. # Single-line ' or " string.
  59. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  60. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  61. # Because of leftmost-then-longest match semantics, be sure to put the
  62. # longest operators first (e.g., if = came before ==, == would get
  63. # recognized as two instances of =).
  64. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
  65. r"//=?", r"->",
  66. r"[+\-*/%&|^=<>]=?",
  67. r"~")
  68. Bracket = '[][(){}]'
  69. Special = group(r'\r?\n', r'[:;.,`@]')
  70. Funny = group(Operator, Bracket, Special)
  71. PlainToken = group(Number, Funny, String, Name)
  72. Token = Ignore + PlainToken
  73. # First (or only) line of ' or " string.
  74. ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  75. group("'", r'\\\r?\n'),
  76. r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  77. group('"', r'\\\r?\n'))
  78. PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  79. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  80. tokenprog, pseudoprog, single3prog, double3prog = map(
  81. re.compile, (Token, PseudoToken, Single3, Double3))
  82. endprogs = {"'": re.compile(Single), '"': re.compile(Double),
  83. "'''": single3prog, '"""': double3prog,
  84. "r'''": single3prog, 'r"""': double3prog,
  85. "u'''": single3prog, 'u"""': double3prog,
  86. "b'''": single3prog, 'b"""': double3prog,
  87. "ur'''": single3prog, 'ur"""': double3prog,
  88. "br'''": single3prog, 'br"""': double3prog,
  89. "R'''": single3prog, 'R"""': double3prog,
  90. "U'''": single3prog, 'U"""': double3prog,
  91. "B'''": single3prog, 'B"""': double3prog,
  92. "uR'''": single3prog, 'uR"""': double3prog,
  93. "Ur'''": single3prog, 'Ur"""': double3prog,
  94. "UR'''": single3prog, 'UR"""': double3prog,
  95. "bR'''": single3prog, 'bR"""': double3prog,
  96. "Br'''": single3prog, 'Br"""': double3prog,
  97. "BR'''": single3prog, 'BR"""': double3prog,
  98. 'r': None, 'R': None,
  99. 'u': None, 'U': None,
  100. 'b': None, 'B': None}
  101. triple_quoted = {}
  102. for t in ("'''", '"""',
  103. "r'''", 'r"""', "R'''", 'R"""',
  104. "u'''", 'u"""', "U'''", 'U"""',
  105. "b'''", 'b"""', "B'''", 'B"""',
  106. "ur'''", 'ur"""', "Ur'''", 'Ur"""',
  107. "uR'''", 'uR"""', "UR'''", 'UR"""',
  108. "br'''", 'br"""', "Br'''", 'Br"""',
  109. "bR'''", 'bR"""', "BR'''", 'BR"""',):
  110. triple_quoted[t] = t
  111. single_quoted = {}
  112. for t in ("'", '"',
  113. "r'", 'r"', "R'", 'R"',
  114. "u'", 'u"', "U'", 'U"',
  115. "b'", 'b"', "B'", 'B"',
  116. "ur'", 'ur"', "Ur'", 'Ur"',
  117. "uR'", 'uR"', "UR'", 'UR"',
  118. "br'", 'br"', "Br'", 'Br"',
  119. "bR'", 'bR"', "BR'", 'BR"', ):
  120. single_quoted[t] = t
  121. tabsize = 8
  122. class TokenError(Exception): pass
  123. class StopTokenizing(Exception): pass
  124. def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
  125. print "%d,%d-%d,%d:\t%s\t%s" % \
  126. (srow, scol, erow, ecol, tok_name[type], repr(token))
  127. def tokenize(readline, tokeneater=printtoken):
  128. """
  129. The tokenize() function accepts two parameters: one representing the
  130. input stream, and one providing an output mechanism for tokenize().
  131. The first parameter, readline, must be a callable object which provides
  132. the same interface as the readline() method of built-in file objects.
  133. Each call to the function should return one line of input as a string.
  134. The second parameter, tokeneater, must also be a callable object. It is
  135. called once for each token, with five arguments, corresponding to the
  136. tuples generated by generate_tokens().
  137. """
  138. try:
  139. tokenize_loop(readline, tokeneater)
  140. except StopTokenizing:
  141. pass
  142. # backwards compatible interface
  143. def tokenize_loop(readline, tokeneater):
  144. for token_info in generate_tokens(readline):
  145. tokeneater(*token_info)
  146. class Untokenizer:
  147. def __init__(self):
  148. self.tokens = []
  149. self.prev_row = 1
  150. self.prev_col = 0
  151. def add_whitespace(self, start):
  152. row, col = start
  153. assert row <= self.prev_row
  154. col_offset = col - self.prev_col
  155. if col_offset:
  156. self.tokens.append(" " * col_offset)
  157. def untokenize(self, iterable):
  158. for t in iterable:
  159. if len(t) == 2:
  160. self.compat(t, iterable)
  161. break
  162. tok_type, token, start, end, line = t
  163. self.add_whitespace(start)
  164. self.tokens.append(token)
  165. self.prev_row, self.prev_col = end
  166. if tok_type in (NEWLINE, NL):
  167. self.prev_row += 1
  168. self.prev_col = 0
  169. return "".join(self.tokens)
  170. def compat(self, token, iterable):
  171. startline = False
  172. indents = []
  173. toks_append = self.tokens.append
  174. toknum, tokval = token
  175. if toknum in (NAME, NUMBER):
  176. tokval += ' '
  177. if toknum in (NEWLINE, NL):
  178. startline = True
  179. for tok in iterable:
  180. toknum, tokval = tok[:2]
  181. if toknum in (NAME, NUMBER):
  182. tokval += ' '
  183. if toknum == INDENT:
  184. indents.append(tokval)
  185. continue
  186. elif toknum == DEDENT:
  187. indents.pop()
  188. continue
  189. elif toknum in (NEWLINE, NL):
  190. startline = True
  191. elif startline and indents:
  192. toks_append(indents[-1])
  193. startline = False
  194. toks_append(tokval)
  195. def untokenize(iterable):
  196. """Transform tokens back into Python source code.
  197. Each element returned by the iterable must be a token sequence
  198. with at least two elements, a token number and token value. If
  199. only two tokens are passed, the resulting output is poor.
  200. Round-trip invariant for full input:
  201. Untokenized source will match input source exactly
  202. Round-trip invariant for limited intput:
  203. # Output text will tokenize the back to the input
  204. t1 = [tok[:2] for tok in generate_tokens(f.readline)]
  205. newcode = untokenize(t1)
  206. readline = iter(newcode.splitlines(1)).next
  207. t2 = [tok[:2] for tokin generate_tokens(readline)]
  208. assert t1 == t2
  209. """
  210. ut = Untokenizer()
  211. return ut.untokenize(iterable)
  212. def generate_tokens(readline):
  213. """
  214. The generate_tokens() generator requires one argment, readline, which
  215. must be a callable object which provides the same interface as the
  216. readline() method of built-in file objects. Each call to the function
  217. should return one line of input as a string. Alternately, readline
  218. can be a callable function terminating with StopIteration:
  219. readline = open(myfile).next # Example of alternate readline
  220. The generator produces 5-tuples with these members: the token type; the
  221. token string; a 2-tuple (srow, scol) of ints specifying the row and
  222. column where the token begins in the source; a 2-tuple (erow, ecol) of
  223. ints specifying the row and column where the token ends in the source;
  224. and the line on which the token was found. The line passed is the
  225. logical line; continuation lines are included.
  226. """
  227. lnum = parenlev = continued = 0
  228. namechars, numchars = string.ascii_letters + '_', '0123456789'
  229. contstr, needcont = '', 0
  230. contline = None
  231. indents = [0]
  232. while 1: # loop over lines in stream
  233. try:
  234. line = readline()
  235. except StopIteration:
  236. line = ''
  237. lnum = lnum + 1
  238. pos, max = 0, len(line)
  239. if contstr: # continued string
  240. if not line:
  241. raise TokenError, ("EOF in multi-line string", strstart)
  242. endmatch = endprog.match(line)
  243. if endmatch:
  244. pos = end = endmatch.end(0)
  245. yield (STRING, contstr + line[:end],
  246. strstart, (lnum, end), contline + line)
  247. contstr, needcont = '', 0
  248. contline = None
  249. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  250. yield (ERRORTOKEN, contstr + line,
  251. strstart, (lnum, len(line)), contline)
  252. contstr = ''
  253. contline = None
  254. continue
  255. else:
  256. contstr = contstr + line
  257. contline = contline + line
  258. continue
  259. elif parenlev == 0 and not continued: # new statement
  260. if not line: break
  261. column = 0
  262. while pos < max: # measure leading whitespace
  263. if line[pos] == ' ': column = column + 1
  264. elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
  265. elif line[pos] == '\f': column = 0
  266. else: break
  267. pos = pos + 1
  268. if pos == max: break
  269. if line[pos] in '#\r\n': # skip comments or blank lines
  270. if line[pos] == '#':
  271. comment_token = line[pos:].rstrip('\r\n')
  272. nl_pos = pos + len(comment_token)
  273. yield (COMMENT, comment_token,
  274. (lnum, pos), (lnum, pos + len(comment_token)), line)
  275. yield (NL, line[nl_pos:],
  276. (lnum, nl_pos), (lnum, len(line)), line)
  277. else:
  278. yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
  279. (lnum, pos), (lnum, len(line)), line)
  280. continue
  281. if column > indents[-1]: # count indents or dedents
  282. indents.append(column)
  283. yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  284. while column < indents[-1]:
  285. if column not in indents:
  286. raise IndentationError(
  287. "unindent does not match any outer indentation level",
  288. ("<tokenize>", lnum, pos, line))
  289. indents = indents[:-1]
  290. yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
  291. else: # continued statement
  292. if not line:
  293. raise TokenError, ("EOF in multi-line statement", (lnum, 0))
  294. continued = 0
  295. while pos < max:
  296. pseudomatch = pseudoprog.match(line, pos)
  297. if pseudomatch: # scan for tokens
  298. start, end = pseudomatch.span(1)
  299. spos, epos, pos = (lnum, start), (lnum, end), end
  300. token, initial = line[start:end], line[start]
  301. if initial in numchars or \
  302. (initial == '.' and token != '.'): # ordinary number
  303. yield (NUMBER, token, spos, epos, line)
  304. elif initial in '\r\n':
  305. newline = NEWLINE
  306. if parenlev > 0:
  307. newline = NL
  308. yield (newline, token, spos, epos, line)
  309. elif initial == '#':
  310. assert not token.endswith("\n")
  311. yield (COMMENT, token, spos, epos, line)
  312. elif token in triple_quoted:
  313. endprog = endprogs[token]
  314. endmatch = endprog.match(line, pos)
  315. if endmatch: # all on one line
  316. pos = endmatch.end(0)
  317. token = line[start:pos]
  318. yield (STRING, token, spos, (lnum, pos), line)
  319. else:
  320. strstart = (lnum, start) # multiple lines
  321. contstr = line[start:]
  322. contline = line
  323. break
  324. elif initial in single_quoted or \
  325. token[:2] in single_quoted or \
  326. token[:3] in single_quoted:
  327. if token[-1] == '\n': # continued string
  328. strstart = (lnum, start)
  329. endprog = (endprogs[initial] or endprogs[token[1]] or
  330. endprogs[token[2]])
  331. contstr, needcont = line[start:], 1
  332. contline = line
  333. break
  334. else: # ordinary string
  335. yield (STRING, token, spos, epos, line)
  336. elif initial in namechars: # ordinary name
  337. yield (NAME, token, spos, epos, line)
  338. elif initial == '\\': # continued stmt
  339. # This yield is new; needed for better idempotency:
  340. yield (NL, token, spos, (lnum, pos), line)
  341. continued = 1
  342. else:
  343. if initial in '([{': parenlev = parenlev + 1
  344. elif initial in ')]}': parenlev = parenlev - 1
  345. yield (OP, token, spos, epos, line)
  346. else:
  347. yield (ERRORTOKEN, line[pos],
  348. (lnum, pos), (lnum, pos+1), line)
  349. pos = pos + 1
  350. for indent in indents[1:]: # pop remaining indent levels
  351. yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
  352. yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  353. if __name__ == '__main__': # testing
  354. import sys
  355. if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
  356. else: tokenize(sys.stdin.readline)