/Lib/tokenize.py

http://unladen-swallow.googlecode.com/ · Python · 415 lines · 316 code · 40 blank · 59 comment · 78 complexity · 28cb2c24f7b2f709fd2ef6e5c9d2631e MD5 · raw file

  1. """Tokenization help for Python programs.
  2. generate_tokens(readline) is a generator that breaks a stream of
  3. text into Python tokens. It accepts a readline-like method which is called
  4. repeatedly to get the next line of input (or "" for EOF). It generates
  5. 5-tuples with these members:
  6. the token type (see token.py)
  7. the token (a string)
  8. the starting (row, column) indices of the token (a 2-tuple of ints)
  9. the ending (row, column) indices of the token (a 2-tuple of ints)
  10. the original line (string)
  11. It is designed to match the working of the Python tokenizer exactly, except
  12. that it produces COMMENT tokens for comments and gives type OP for all
  13. operators
  14. Older entry points
  15. tokenize_loop(readline, tokeneater)
  16. tokenize(readline, tokeneater=printtoken)
  17. are the same, except instead of generating tokens, tokeneater is a callback
  18. function to which the 5 fields described above are passed as 5 arguments,
  19. each time a new token is found."""
  20. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  21. __credits__ = \
  22. 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger'
  23. import string, re
  24. from token import *
  25. import token
  26. __all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
  27. "generate_tokens", "NL", "untokenize"]
  28. del x
  29. del token
  30. COMMENT = N_TOKENS
  31. tok_name[COMMENT] = 'COMMENT'
  32. NL = N_TOKENS + 1
  33. tok_name[NL] = 'NL'
  34. N_TOKENS += 2
  35. def group(*choices): return '(' + '|'.join(choices) + ')'
  36. def any(*choices): return group(*choices) + '*'
  37. def maybe(*choices): return group(*choices) + '?'
  38. Whitespace = r'[ \f\t]*'
  39. Comment = r'#[^\r\n]*'
  40. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  41. Name = r'[a-zA-Z_]\w*'
  42. Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
  43. Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
  44. Binnumber = r'0[bB][01]+[lL]?'
  45. Decnumber = r'[1-9]\d*[lL]?'
  46. Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
  47. Exponent = r'[eE][-+]?\d+'
  48. Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
  49. Expfloat = r'\d+' + Exponent
  50. Floatnumber = group(Pointfloat, Expfloat)
  51. Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
  52. Number = group(Imagnumber, Floatnumber, Intnumber)
  53. # Tail end of ' string.
  54. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  55. # Tail end of " string.
  56. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  57. # Tail end of ''' string.
  58. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  59. # Tail end of """ string.
  60. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  61. Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
  62. # Single-line ' or " string.
  63. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  64. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  65. # Because of leftmost-then-longest match semantics, be sure to put the
  66. # longest operators first (e.g., if = came before ==, == would get
  67. # recognized as two instances of =).
  68. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
  69. r"//=?",
  70. r"[+\-*/%&|^=<>]=?",
  71. r"~")
  72. Bracket = '[][(){}]'
  73. Special = group(r'\r?\n', r'[:;.,`@]')
  74. Funny = group(Operator, Bracket, Special)
  75. PlainToken = group(Number, Funny, String, Name)
  76. Token = Ignore + PlainToken
  77. # First (or only) line of ' or " string.
  78. ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  79. group("'", r'\\\r?\n'),
  80. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  81. group('"', r'\\\r?\n'))
  82. PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  83. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  84. tokenprog, pseudoprog, single3prog, double3prog = map(
  85. re.compile, (Token, PseudoToken, Single3, Double3))
  86. endprogs = {"'": re.compile(Single), '"': re.compile(Double),
  87. "'''": single3prog, '"""': double3prog,
  88. "r'''": single3prog, 'r"""': double3prog,
  89. "u'''": single3prog, 'u"""': double3prog,
  90. "ur'''": single3prog, 'ur"""': double3prog,
  91. "R'''": single3prog, 'R"""': double3prog,
  92. "U'''": single3prog, 'U"""': double3prog,
  93. "uR'''": single3prog, 'uR"""': double3prog,
  94. "Ur'''": single3prog, 'Ur"""': double3prog,
  95. "UR'''": single3prog, 'UR"""': double3prog,
  96. "b'''": single3prog, 'b"""': double3prog,
  97. "br'''": single3prog, 'br"""': double3prog,
  98. "B'''": single3prog, 'B"""': double3prog,
  99. "bR'''": single3prog, 'bR"""': double3prog,
  100. "Br'''": single3prog, 'Br"""': double3prog,
  101. "BR'''": single3prog, 'BR"""': double3prog,
  102. 'r': None, 'R': None, 'u': None, 'U': None,
  103. 'b': None, 'B': None}
  104. triple_quoted = {}
  105. for t in ("'''", '"""',
  106. "r'''", 'r"""', "R'''", 'R"""',
  107. "u'''", 'u"""', "U'''", 'U"""',
  108. "ur'''", 'ur"""', "Ur'''", 'Ur"""',
  109. "uR'''", 'uR"""', "UR'''", 'UR"""',
  110. "b'''", 'b"""', "B'''", 'B"""',
  111. "br'''", 'br"""', "Br'''", 'Br"""',
  112. "bR'''", 'bR"""', "BR'''", 'BR"""'):
  113. triple_quoted[t] = t
  114. single_quoted = {}
  115. for t in ("'", '"',
  116. "r'", 'r"', "R'", 'R"',
  117. "u'", 'u"', "U'", 'U"',
  118. "ur'", 'ur"', "Ur'", 'Ur"',
  119. "uR'", 'uR"', "UR'", 'UR"',
  120. "b'", 'b"', "B'", 'B"',
  121. "br'", 'br"', "Br'", 'Br"',
  122. "bR'", 'bR"', "BR'", 'BR"' ):
  123. single_quoted[t] = t
  124. tabsize = 8
  125. class TokenError(Exception): pass
  126. class StopTokenizing(Exception): pass
  127. def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
  128. srow, scol = srow_scol
  129. erow, ecol = erow_ecol
  130. print "%d,%d-%d,%d:\t%s\t%s" % \
  131. (srow, scol, erow, ecol, tok_name[type], repr(token))
  132. def tokenize(readline, tokeneater=printtoken):
  133. """
  134. The tokenize() function accepts two parameters: one representing the
  135. input stream, and one providing an output mechanism for tokenize().
  136. The first parameter, readline, must be a callable object which provides
  137. the same interface as the readline() method of built-in file objects.
  138. Each call to the function should return one line of input as a string.
  139. The second parameter, tokeneater, must also be a callable object. It is
  140. called once for each token, with five arguments, corresponding to the
  141. tuples generated by generate_tokens().
  142. """
  143. try:
  144. tokenize_loop(readline, tokeneater)
  145. except StopTokenizing:
  146. pass
  147. # backwards compatible interface
  148. def tokenize_loop(readline, tokeneater):
  149. for token_info in generate_tokens(readline):
  150. tokeneater(*token_info)
  151. class Untokenizer:
  152. def __init__(self):
  153. self.tokens = []
  154. self.prev_row = 1
  155. self.prev_col = 0
  156. def add_whitespace(self, start):
  157. row, col = start
  158. assert row <= self.prev_row
  159. col_offset = col - self.prev_col
  160. if col_offset:
  161. self.tokens.append(" " * col_offset)
  162. def untokenize(self, iterable):
  163. for t in iterable:
  164. if len(t) == 2:
  165. self.compat(t, iterable)
  166. break
  167. tok_type, token, start, end, line = t
  168. self.add_whitespace(start)
  169. self.tokens.append(token)
  170. self.prev_row, self.prev_col = end
  171. if tok_type in (NEWLINE, NL):
  172. self.prev_row += 1
  173. self.prev_col = 0
  174. return "".join(self.tokens)
  175. def compat(self, token, iterable):
  176. startline = False
  177. indents = []
  178. toks_append = self.tokens.append
  179. toknum, tokval = token
  180. if toknum in (NAME, NUMBER):
  181. tokval += ' '
  182. if toknum in (NEWLINE, NL):
  183. startline = True
  184. prevstring = False
  185. for tok in iterable:
  186. toknum, tokval = tok[:2]
  187. if toknum in (NAME, NUMBER):
  188. tokval += ' '
  189. # Insert a space between two consecutive strings
  190. if toknum == STRING:
  191. if prevstring:
  192. tokval = ' ' + tokval
  193. prevstring = True
  194. else:
  195. prevstring = False
  196. if toknum == INDENT:
  197. indents.append(tokval)
  198. continue
  199. elif toknum == DEDENT:
  200. indents.pop()
  201. continue
  202. elif toknum in (NEWLINE, NL):
  203. startline = True
  204. elif startline and indents:
  205. toks_append(indents[-1])
  206. startline = False
  207. toks_append(tokval)
  208. def untokenize(iterable):
  209. """Transform tokens back into Python source code.
  210. Each element returned by the iterable must be a token sequence
  211. with at least two elements, a token number and token value. If
  212. only two tokens are passed, the resulting output is poor.
  213. Round-trip invariant for full input:
  214. Untokenized source will match input source exactly
  215. Round-trip invariant for limited intput:
  216. # Output text will tokenize the back to the input
  217. t1 = [tok[:2] for tok in generate_tokens(f.readline)]
  218. newcode = untokenize(t1)
  219. readline = iter(newcode.splitlines(1)).next
  220. t2 = [tok[:2] for tok in generate_tokens(readline)]
  221. assert t1 == t2
  222. """
  223. ut = Untokenizer()
  224. return ut.untokenize(iterable)
  225. def generate_tokens(readline):
  226. """
  227. The generate_tokens() generator requires one argment, readline, which
  228. must be a callable object which provides the same interface as the
  229. readline() method of built-in file objects. Each call to the function
  230. should return one line of input as a string. Alternately, readline
  231. can be a callable function terminating with StopIteration:
  232. readline = open(myfile).next # Example of alternate readline
  233. The generator produces 5-tuples with these members: the token type; the
  234. token string; a 2-tuple (srow, scol) of ints specifying the row and
  235. column where the token begins in the source; a 2-tuple (erow, ecol) of
  236. ints specifying the row and column where the token ends in the source;
  237. and the line on which the token was found. The line passed is the
  238. logical line; continuation lines are included.
  239. """
  240. lnum = parenlev = continued = 0
  241. namechars, numchars = string.ascii_letters + '_', '0123456789'
  242. contstr, needcont = '', 0
  243. contline = None
  244. indents = [0]
  245. while 1: # loop over lines in stream
  246. try:
  247. line = readline()
  248. except StopIteration:
  249. line = ''
  250. lnum = lnum + 1
  251. pos, max = 0, len(line)
  252. if contstr: # continued string
  253. if not line:
  254. raise TokenError, ("EOF in multi-line string", strstart)
  255. endmatch = endprog.match(line)
  256. if endmatch:
  257. pos = end = endmatch.end(0)
  258. yield (STRING, contstr + line[:end],
  259. strstart, (lnum, end), contline + line)
  260. contstr, needcont = '', 0
  261. contline = None
  262. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  263. yield (ERRORTOKEN, contstr + line,
  264. strstart, (lnum, len(line)), contline)
  265. contstr = ''
  266. contline = None
  267. continue
  268. else:
  269. contstr = contstr + line
  270. contline = contline + line
  271. continue
  272. elif parenlev == 0 and not continued: # new statement
  273. if not line: break
  274. column = 0
  275. while pos < max: # measure leading whitespace
  276. if line[pos] == ' ': column = column + 1
  277. elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
  278. elif line[pos] == '\f': column = 0
  279. else: break
  280. pos = pos + 1
  281. if pos == max: break
  282. if line[pos] in '#\r\n': # skip comments or blank lines
  283. if line[pos] == '#':
  284. comment_token = line[pos:].rstrip('\r\n')
  285. nl_pos = pos + len(comment_token)
  286. yield (COMMENT, comment_token,
  287. (lnum, pos), (lnum, pos + len(comment_token)), line)
  288. yield (NL, line[nl_pos:],
  289. (lnum, nl_pos), (lnum, len(line)), line)
  290. else:
  291. yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
  292. (lnum, pos), (lnum, len(line)), line)
  293. continue
  294. if column > indents[-1]: # count indents or dedents
  295. indents.append(column)
  296. yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  297. while column < indents[-1]:
  298. if column not in indents:
  299. raise IndentationError(
  300. "unindent does not match any outer indentation level",
  301. ("<tokenize>", lnum, pos, line))
  302. indents = indents[:-1]
  303. yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
  304. else: # continued statement
  305. if not line:
  306. raise TokenError, ("EOF in multi-line statement", (lnum, 0))
  307. continued = 0
  308. while pos < max:
  309. pseudomatch = pseudoprog.match(line, pos)
  310. if pseudomatch: # scan for tokens
  311. start, end = pseudomatch.span(1)
  312. spos, epos, pos = (lnum, start), (lnum, end), end
  313. token, initial = line[start:end], line[start]
  314. if initial in numchars or \
  315. (initial == '.' and token != '.'): # ordinary number
  316. yield (NUMBER, token, spos, epos, line)
  317. elif initial in '\r\n':
  318. yield (NL if parenlev > 0 else NEWLINE,
  319. token, spos, epos, line)
  320. elif initial == '#':
  321. assert not token.endswith("\n")
  322. yield (COMMENT, token, spos, epos, line)
  323. elif token in triple_quoted:
  324. endprog = endprogs[token]
  325. endmatch = endprog.match(line, pos)
  326. if endmatch: # all on one line
  327. pos = endmatch.end(0)
  328. token = line[start:pos]
  329. yield (STRING, token, spos, (lnum, pos), line)
  330. else:
  331. strstart = (lnum, start) # multiple lines
  332. contstr = line[start:]
  333. contline = line
  334. break
  335. elif initial in single_quoted or \
  336. token[:2] in single_quoted or \
  337. token[:3] in single_quoted:
  338. if token[-1] == '\n': # continued string
  339. strstart = (lnum, start)
  340. endprog = (endprogs[initial] or endprogs[token[1]] or
  341. endprogs[token[2]])
  342. contstr, needcont = line[start:], 1
  343. contline = line
  344. break
  345. else: # ordinary string
  346. yield (STRING, token, spos, epos, line)
  347. elif initial in namechars: # ordinary name
  348. yield (NAME, token, spos, epos, line)
  349. elif initial == '\\': # continued stmt
  350. continued = 1
  351. else:
  352. if initial in '([{': parenlev = parenlev + 1
  353. elif initial in ')]}': parenlev = parenlev - 1
  354. yield (OP, token, spos, epos, line)
  355. else:
  356. yield (ERRORTOKEN, line[pos],
  357. (lnum, pos), (lnum, pos+1), line)
  358. pos = pos + 1
  359. for indent in indents[1:]: # pop remaining indent levels
  360. yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
  361. yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  362. if __name__ == '__main__': # testing
  363. import sys
  364. if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
  365. else: tokenize(sys.stdin.readline)