PageRenderTime 46ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/Python/system/tokenize.py

https://bitbucket.org/cwalther/moulscript-dlanor
Python | 295 lines | 244 code | 17 blank | 34 comment | 25 complexity | 187a4e17404ff6c16474a0e83ef9b65d MD5 | raw file
Possible License(s): AGPL-1.0, GPL-3.0
  1. """Tokenization help for Python programs.
  2. generate_tokens(readline) is a generator that breaks a stream of
  3. text into Python tokens. It accepts a readline-like method which is called
  4. repeatedly to get the next line of input (or "" for EOF). It generates
  5. 5-tuples with these members:
  6. the token type (see token.py)
  7. the token (a string)
  8. the starting (row, column) indices of the token (a 2-tuple of ints)
  9. the ending (row, column) indices of the token (a 2-tuple of ints)
  10. the original line (string)
  11. It is designed to match the working of the Python tokenizer exactly, except
  12. that it produces COMMENT tokens for comments and gives type OP for all
  13. operators
  14. Older entry points
  15. tokenize_loop(readline, tokeneater)
  16. tokenize(readline, tokeneater=printtoken)
  17. are the same, except instead of generating tokens, tokeneater is a callback
  18. function to which the 5 fields described above are passed as 5 arguments,
  19. each time a new token is found."""
  20. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  21. __credits__ = \
  22. 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
  23. import string, re
  24. from token import *
  25. import token
  26. __all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
  27. "generate_tokens", "NL"]
  28. del x
  29. del token
  30. COMMENT = N_TOKENS
  31. tok_name[COMMENT] = 'COMMENT'
  32. NL = N_TOKENS + 1
  33. tok_name[NL] = 'NL'
  34. N_TOKENS += 2
  35. def group(*choices): return '(' + '|'.join(choices) + ')'
  36. def any(*choices): return group(*choices) + '*'
  37. def maybe(*choices): return group(*choices) + '?'
  38. Whitespace = r'[ \f\t]*'
  39. Comment = r'#[^\r\n]*'
  40. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  41. Name = r'[a-zA-Z_]\w*'
  42. Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
  43. Octnumber = r'0[0-7]*[lL]?'
  44. Decnumber = r'[1-9]\d*[lL]?'
  45. Intnumber = group(Hexnumber, Octnumber, Decnumber)
  46. Exponent = r'[eE][-+]?\d+'
  47. Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
  48. Expfloat = r'\d+' + Exponent
  49. Floatnumber = group(Pointfloat, Expfloat)
  50. Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
  51. Number = group(Imagnumber, Floatnumber, Intnumber)
  52. # Tail end of ' string.
  53. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  54. # Tail end of " string.
  55. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  56. # Tail end of ''' string.
  57. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  58. # Tail end of """ string.
  59. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  60. Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
  61. # Single-line ' or " string.
  62. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  63. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  64. # Because of leftmost-then-longest match semantics, be sure to put the
  65. # longest operators first (e.g., if = came before ==, == would get
  66. # recognized as two instances of =).
  67. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
  68. r"//=?",
  69. r"[+\-*/%&|^=<>]=?",
  70. r"~")
  71. Bracket = '[][(){}]'
  72. Special = group(r'\r?\n', r'[:;.,`]')
  73. Funny = group(Operator, Bracket, Special)
  74. PlainToken = group(Number, Funny, String, Name)
  75. Token = Ignore + PlainToken
  76. # First (or only) line of ' or " string.
  77. ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  78. group("'", r'\\\r?\n'),
  79. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  80. group('"', r'\\\r?\n'))
  81. PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  82. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  83. tokenprog, pseudoprog, single3prog, double3prog = map(
  84. re.compile, (Token, PseudoToken, Single3, Double3))
  85. endprogs = {"'": re.compile(Single), '"': re.compile(Double),
  86. "'''": single3prog, '"""': double3prog,
  87. "r'''": single3prog, 'r"""': double3prog,
  88. "u'''": single3prog, 'u"""': double3prog,
  89. "ur'''": single3prog, 'ur"""': double3prog,
  90. "R'''": single3prog, 'R"""': double3prog,
  91. "U'''": single3prog, 'U"""': double3prog,
  92. "uR'''": single3prog, 'uR"""': double3prog,
  93. "Ur'''": single3prog, 'Ur"""': double3prog,
  94. "UR'''": single3prog, 'UR"""': double3prog,
  95. 'r': None, 'R': None, 'u': None, 'U': None}
  96. triple_quoted = {}
  97. for t in ("'''", '"""',
  98. "r'''", 'r"""', "R'''", 'R"""',
  99. "u'''", 'u"""', "U'''", 'U"""',
  100. "ur'''", 'ur"""', "Ur'''", 'Ur"""',
  101. "uR'''", 'uR"""', "UR'''", 'UR"""'):
  102. triple_quoted[t] = t
  103. single_quoted = {}
  104. for t in ("'", '"',
  105. "r'", 'r"', "R'", 'R"',
  106. "u'", 'u"', "U'", 'U"',
  107. "ur'", 'ur"', "Ur'", 'Ur"',
  108. "uR'", 'uR"', "UR'", 'UR"' ):
  109. single_quoted[t] = t
  110. tabsize = 8
  111. class TokenError(Exception): pass
  112. class StopTokenizing(Exception): pass
  113. def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
  114. print "%d,%d-%d,%d:\t%s\t%s" % \
  115. (srow, scol, erow, ecol, tok_name[type], repr(token))
  116. def tokenize(readline, tokeneater=printtoken):
  117. """
  118. The tokenize() function accepts two parameters: one representing the
  119. input stream, and one providing an output mechanism for tokenize().
  120. The first parameter, readline, must be a callable object which provides
  121. the same interface as the readline() method of built-in file objects.
  122. Each call to the function should return one line of input as a string.
  123. The second parameter, tokeneater, must also be a callable object. It is
  124. called once for each token, with five arguments, corresponding to the
  125. tuples generated by generate_tokens().
  126. """
  127. try:
  128. tokenize_loop(readline, tokeneater)
  129. except StopTokenizing:
  130. pass
  131. # backwards compatible interface
  132. def tokenize_loop(readline, tokeneater):
  133. for token_info in generate_tokens(readline):
  134. tokeneater(*token_info)
  135. def generate_tokens(readline):
  136. """
  137. The generate_tokens() generator requires one argment, readline, which
  138. must be a callable object which provides the same interface as the
  139. readline() method of built-in file objects. Each call to the function
  140. should return one line of input as a string.
  141. The generator produces 5-tuples with these members: the token type; the
  142. token string; a 2-tuple (srow, scol) of ints specifying the row and
  143. column where the token begins in the source; a 2-tuple (erow, ecol) of
  144. ints specifying the row and column where the token ends in the source;
  145. and the line on which the token was found. The line passed is the
  146. logical line; continuation lines are included.
  147. """
  148. lnum = parenlev = continued = 0
  149. namechars, numchars = string.ascii_letters + '_', '0123456789'
  150. contstr, needcont = '', 0
  151. contline = None
  152. indents = [0]
  153. while 1: # loop over lines in stream
  154. line = readline()
  155. lnum = lnum + 1
  156. pos, max = 0, len(line)
  157. if contstr: # continued string
  158. if not line:
  159. raise TokenError, ("EOF in multi-line string", strstart)
  160. endmatch = endprog.match(line)
  161. if endmatch:
  162. pos = end = endmatch.end(0)
  163. yield (STRING, contstr + line[:end],
  164. strstart, (lnum, end), contline + line)
  165. contstr, needcont = '', 0
  166. contline = None
  167. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  168. yield (ERRORTOKEN, contstr + line,
  169. strstart, (lnum, len(line)), contline)
  170. contstr = ''
  171. contline = None
  172. continue
  173. else:
  174. contstr = contstr + line
  175. contline = contline + line
  176. continue
  177. elif parenlev == 0 and not continued: # new statement
  178. if not line: break
  179. column = 0
  180. while pos < max: # measure leading whitespace
  181. if line[pos] == ' ': column = column + 1
  182. elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
  183. elif line[pos] == '\f': column = 0
  184. else: break
  185. pos = pos + 1
  186. if pos == max: break
  187. if line[pos] in '#\r\n': # skip comments or blank lines
  188. yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
  189. (lnum, pos), (lnum, len(line)), line)
  190. continue
  191. if column > indents[-1]: # count indents or dedents
  192. indents.append(column)
  193. yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  194. while column < indents[-1]:
  195. indents = indents[:-1]
  196. yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
  197. else: # continued statement
  198. if not line:
  199. raise TokenError, ("EOF in multi-line statement", (lnum, 0))
  200. continued = 0
  201. while pos < max:
  202. pseudomatch = pseudoprog.match(line, pos)
  203. if pseudomatch: # scan for tokens
  204. start, end = pseudomatch.span(1)
  205. spos, epos, pos = (lnum, start), (lnum, end), end
  206. token, initial = line[start:end], line[start]
  207. if initial in numchars or \
  208. (initial == '.' and token != '.'): # ordinary number
  209. yield (NUMBER, token, spos, epos, line)
  210. elif initial in '\r\n':
  211. yield (parenlev > 0 and NL or NEWLINE,
  212. token, spos, epos, line)
  213. elif initial == '#':
  214. yield (COMMENT, token, spos, epos, line)
  215. elif token in triple_quoted:
  216. endprog = endprogs[token]
  217. endmatch = endprog.match(line, pos)
  218. if endmatch: # all on one line
  219. pos = endmatch.end(0)
  220. token = line[start:pos]
  221. yield (STRING, token, spos, (lnum, pos), line)
  222. else:
  223. strstart = (lnum, start) # multiple lines
  224. contstr = line[start:]
  225. contline = line
  226. break
  227. elif initial in single_quoted or \
  228. token[:2] in single_quoted or \
  229. token[:3] in single_quoted:
  230. if token[-1] == '\n': # continued string
  231. strstart = (lnum, start)
  232. endprog = (endprogs[initial] or endprogs[token[1]] or
  233. endprogs[token[2]])
  234. contstr, needcont = line[start:], 1
  235. contline = line
  236. break
  237. else: # ordinary string
  238. yield (STRING, token, spos, epos, line)
  239. elif initial in namechars: # ordinary name
  240. yield (NAME, token, spos, epos, line)
  241. elif initial == '\\': # continued stmt
  242. continued = 1
  243. else:
  244. if initial in '([{': parenlev = parenlev + 1
  245. elif initial in ')]}': parenlev = parenlev - 1
  246. yield (OP, token, spos, epos, line)
  247. else:
  248. yield (ERRORTOKEN, line[pos],
  249. (lnum, pos), (lnum, pos+1), line)
  250. pos = pos + 1
  251. for indent in indents[1:]: # pop remaining indent levels
  252. yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
  253. yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  254. if __name__ == '__main__': # testing
  255. import sys
  256. if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
  257. else: tokenize(sys.stdin.readline)