PageRenderTime 72ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/mypy/lex.py

https://github.com/SRiikonen/mypy-py
Python | 748 lines | 735 code | 4 blank | 9 comment | 0 complexity | 413a2af001b87c17472c6b1cd7fac9fc MD5 | raw file
Possible License(s): MIT
  1. """Lexical analyzer for mypy.
  2. Translate a string that represents a file or a compilation unit to a list of
  3. tokens.
  4. This module can be run as a script (lex.py FILE).
  5. """
  6. import re
  7. from mypy.util import short_type
  8. class Token:
  9. """Base class for all tokens"""
  10. pre = '' # Space, comments etc. before token
  11. string = None # Token string
  12. line = None # Token line number
  13. def __init__(self, string, pre=''):
  14. self.string = string
  15. self.pre = pre
  16. def __repr__(self):
  17. """The representation is of form Keyword(' if')."""
  18. t = short_type(self)
  19. return t + '(' + self.fix(self.pre) + self.fix(self.string) + ')'
  20. def rep(self):
  21. return self.pre + self.string
  22. def fix(self, s):
  23. """Replace common non-printable chars with escape sequences.
  24. Do not use repr() since we don't want do duplicate backslashes.
  25. """
  26. return s.replace('\n', '\\n').replace('\t', '\\t').replace('\r', '\\r')
  27. # Token classes
  28. class Break(Token):
  29. """Statement break (line break or semicolon)"""
  30. class Indent(Token):
  31. """Increase block indent level."""
  32. class Dedent(Token):
  33. """Decrease block indent level."""
  34. class Eof(Token):
  35. """End of file"""
  36. class Keyword(Token):
  37. """Reserved word (other than keyword operators; they use Op).
  38. Examples: if, class, while, def.
  39. """
  40. class Name(Token):
  41. """An alphanumeric identifier"""
  42. class IntLit(Token):
  43. """Integer literal"""
  44. class StrLit(Token):
  45. """String literal"""
  46. def parsed(self):
  47. """Return the parsed contents of the literal."""
  48. return _parse_str_literal(self.string)
  49. class BytesLit(Token):
  50. """Bytes literal"""
  51. def parsed(self):
  52. """Return the parsed contents of the literal."""
  53. return _parse_str_literal(self.string)
  54. class FloatLit(Token):
  55. """Float literal"""
  56. class Punct(Token):
  57. """Punctuator (e.g. comma, '(' or '=')"""
  58. class Colon(Token):
  59. pass
  60. class Op(Token):
  61. """Operator (e.g. '+' or 'in')"""
  62. class Bom(Token):
  63. """Byte order mark (at the start of a file)"""
  64. class LexError(Token):
  65. """Lexer error token"""
  66. type = None # One of the error types below
  67. def __init__(self, string, type):
  68. super().__init__(string)
  69. self.type = type
  70. # Lexer error types
  71. NUMERIC_LITERAL_ERROR = 0
  72. UNTERMINATED_STRING_LITERAL = 1
  73. INVALID_CHARACTER = 2
  74. NON_ASCII_CHARACTER_IN_COMMENT = 3
  75. NON_ASCII_CHARACTER_IN_STRING = 4
  76. INVALID_UTF8_SEQUENCE = 5
  77. INVALID_BACKSLASH = 6
  78. INVALID_DEDENT = 7
  79. # Encoding contexts
  80. STR_CONTEXT = 1
  81. COMMENT_CONTEXT = 2
  82. def lex(string):
  83. """Analyze string and return an array of token objects.
  84. The last token is always Eof.
  85. """
  86. l = Lexer()
  87. l.lex(string)
  88. return l.tok
  89. # Reserved words (not including operators)
  90. keywords = set([
  91. 'any', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
  92. 'else', 'except', 'finally', 'from', 'for', 'global', 'if', 'import',
  93. 'interface', 'lambda', 'pass', 'raise', 'return', 'try', 'while', 'with',
  94. 'yield'])
  95. # Alphabetical operators (reserved words)
  96. alpha_operators = set(['in', 'is', 'not', 'and', 'or'])
  97. # String literal prefixes
  98. str_prefixes = set(['r', 'b', 'br'])
  99. # List of regular expressions that match non-alphabetical operators
  100. operators = [re.compile('[-+*/<>.%&|^~]'),
  101. re.compile('==|!=|<=|>=|\\*\\*|//|<<|>>')]
  102. # List of regular expressions that match punctuator tokens
  103. punctuators = [re.compile('[=,()@]'),
  104. re.compile('\\['),
  105. re.compile(']'),
  106. re.compile('([-+*/%&|^]|\\*\\*|//|<<|>>)=')]
  107. # Source file encodings
  108. DEFAULT_ENCODING = 0
  109. ASCII_ENCODING = 1
  110. LATIN1_ENCODING = 2
  111. UTF8_ENCODING = 3
  112. # Map single-character string escape sequences to corresponding characters.
  113. escape_map = {'a': '\x07',
  114. 'b': '\x08',
  115. 'f': '\x0c',
  116. 'n': '\x0a',
  117. 'r': '\x0d',
  118. 't': '\x09',
  119. 'v': '\x0b',
  120. '"': '"',
  121. "'": "'"}
  122. # Matches the optional prefix of a string literal, e.g. the 'r' in r"foo".
  123. str_prefix_re = re.compile('[rRbB]*')
  124. # Matches an escape sequence in a string, e.g. \n or \x4F.
  125. escape_re = re.compile(
  126. "\\\\([abfnrtv'\"]|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}|[0-7]{1,3})")
  127. def _parse_str_literal(string):
  128. """Translate escape sequences in str literal to the corresponding chars.
  129. For example, \t is translated to the tab character (ascii 9).
  130. Return the translated contents of the literal. Also handle raw and
  131. triple-quoted string literals.
  132. """
  133. prefix = str_prefix_re.match(string).group(0).lower()
  134. s = string[len(prefix):]
  135. if s.startswith("'''") or s.startswith('"""'):
  136. return s[3:-3]
  137. elif 'r' in prefix:
  138. return s[1:-1].replace('\\' + s[0], s[0])
  139. else:
  140. return escape_re.sub(lambda m: escape_repl(m, prefix), s[1:-1])
  141. def escape_repl(m, prefix):
  142. """Translate a string escape sequence, e.g. \t -> the tab character.
  143. Assume that the Match object is from escape_re.
  144. """
  145. seq = m.group(1)
  146. if len(seq) == 1 and seq in escape_map:
  147. # Single-character escape sequence, e.g. \n.
  148. return escape_map[seq]
  149. elif seq.startswith('x'):
  150. # Hexadecimal sequence \xNN.
  151. return chr(int(seq[1:], 16))
  152. elif seq.startswith('u'):
  153. # Unicode sequence \uNNNN.
  154. if 'b' not in prefix:
  155. return chr(int(seq[1:], 16))
  156. else:
  157. return '\\' + seq
  158. else:
  159. # Octal sequence.
  160. ord = int(seq, 8)
  161. if 'b' in prefix:
  162. # Make sure code is no larger than 255 for bytes literals.
  163. ord = ord % 256
  164. return chr(ord)
  165. class Lexer:
  166. """Lexical analyzer."""
  167. i = None # Current string index (into s)
  168. s = None # The string being analyzed
  169. line = None # Current line number
  170. pre_whitespace = '' # Whitespace and comments before the next token
  171. enc = DEFAULT_ENCODING # Encoding TODO implement properly
  172. # Generated tokens
  173. tok = None
  174. # Table from byte character value to lexer method. E.g. entry at ord('0')
  175. # contains the method lex_number().
  176. map = None
  177. # Indent levels of currently open blocks, in spaces.
  178. indents = None
  179. # Open ('s, ['s and {'s without matching closing bracket; used for ignoring
  180. # newlines within parentheses/brackets.
  181. open_brackets = None
  182. def __init__(self):
  183. self.map = [self.unknown_character] * 256
  184. self.tok = []
  185. self.indents = [0]
  186. self.open_brackets = []
  187. # Fill in the map from valid character codes to relevant lexer methods.
  188. for seq, method in [('ABCDEFGHIJKLMNOPQRSTUVWXYZ', self.lex_name),
  189. ('abcdefghijklmnopqrstuvwxyz_', self.lex_name),
  190. ('0123456789', self.lex_number),
  191. ('.', self.lex_number_or_dot),
  192. (' ' + '\t' + '\x0c', self.lex_space),
  193. ('"', self.lex_str_double),
  194. ("'", self.lex_str_single),
  195. ('\r' + '\n', self.lex_break),
  196. (';', self.lex_semicolon),
  197. (':', self.lex_colon),
  198. ('#', self.lex_comment),
  199. ('\\', self.lex_backslash),
  200. ('([{', self.lex_open_bracket),
  201. (')]}', self.lex_close_bracket),
  202. ('-+*/<>%&|^~=!,@', self.lex_misc)]:
  203. for c in seq:
  204. self.map[ord(c)] = method
  205. def lex(self, s):
  206. """Lexically analyze a string, storing the tokens at the tok array."""
  207. self.s = s
  208. self.i = 0
  209. self.line = 1
  210. if s.startswith('\xef\xbb\xbf'):
  211. self.add_token(Bom(s[0:3]))
  212. # Parse initial indent; otherwise first-line indent would not generate
  213. # an error.
  214. self.lex_indent()
  215. # Make a local copy of map as a simple optimization.
  216. map = self.map
  217. # Lex the file. Repeatedly call the lexer method for the current char.
  218. while self.i < len(s):
  219. # Get the character code of the next character to lex.
  220. c = ord(s[self.i])
  221. # Dispatch to the relevant lexer method. This will consume some
  222. # characters in the text, add a token to self.tok and increment
  223. # self.i.
  224. map[c]()
  225. # Append a break if there is no statement/block terminator at the end
  226. # of input.
  227. if len(self.tok) > 0 and (not isinstance(self.tok[-1], Break) and
  228. not isinstance(self.tok[-1], Dedent)):
  229. self.add_token(Break(''))
  230. # Close remaining open blocks with Dedent tokens.
  231. self.lex_indent()
  232. self.add_token(Eof(''))
  233. def lex_number_or_dot(self):
  234. """Analyse a token starting with a dot.
  235. It can be the member access operator or a float literal such as '.123'.
  236. """
  237. if self.is_at_number():
  238. self.lex_number()
  239. else:
  240. self.lex_misc()
  241. number_exp = re.compile(r'[0-9]|\.[0-9]')
  242. def is_at_number(self):
  243. """Is the current location at a numeric literal?"""
  244. return self.match(self.number_exp) != ''
  245. # Regexps used by lex_number
  246. # Decimal/hex/octal literal
  247. number_exp1 = re.compile('0[xXoO][0-9a-fA-F]+|[0-9]+')
  248. # Float literal, e.g. '1.23' or '12e+34'
  249. number_exp2 = re.compile(
  250. r'[0-9]*\.[0-9]*([eE][-+]?[0-9]+)?|[0-9]+[eE][-+]?[0-9]+')
  251. # These characters must not appear after a number literal.
  252. name_char_exp = re.compile('[a-zA-Z0-9_]')
  253. def lex_number(self):
  254. """Analyse an int or float literal.
  255. Assume that the current location points to one of them.
  256. """
  257. s1 = self.match(self.number_exp1)
  258. s2 = self.match(self.number_exp2)
  259. maxlen = max(len(s1), len(s2))
  260. if self.name_char_exp.match(
  261. self.s[self.i + maxlen:self.i + maxlen + 1]) is not None:
  262. # Error: alphanumeric character after number literal.
  263. s3 = self.match(re.compile('[0-9][0-9a-zA-Z_]*'))
  264. maxlen = max(maxlen, len(s3))
  265. self.add_token(LexError(' ' * maxlen, NUMERIC_LITERAL_ERROR))
  266. elif len(s1) > len(s2):
  267. # Integer literal.
  268. self.add_token(IntLit(s1))
  269. else:
  270. # Float literal.
  271. self.add_token(FloatLit(s2))
  272. name_exp = re.compile('[a-zA-Z_][a-zA-Z0-9_]*')
  273. def lex_name(self):
  274. """Analyse a name (an identifier, a keyword or an alphabetical
  275. operator). This also deals with prefixed string literals such
  276. as r'...'.
  277. """
  278. s = self.match(self.name_exp)
  279. if s in keywords:
  280. self.add_token(Keyword(s))
  281. elif s in alpha_operators:
  282. self.add_token(Op(s))
  283. elif s in str_prefixes and self.match(re.compile('[a-z]+[\'"]')) != '':
  284. self.lex_prefixed_str(s)
  285. else:
  286. self.add_token(Name(s))
  287. # Regexps representing components of string literals
  288. # Initial part of a single-quoted literal, e.g. b'foo' or b'foo\\\n
  289. str_exp_single = re.compile(
  290. r"[a-z]*'([^'\\\r\n]|\\[^\r\n])*('|\\(\n|\r\n?))")
  291. # Non-initial part of a multiline single-quoted literal, e.g. foo'
  292. str_exp_single_multi = re.compile(
  293. r"([^'\\\r\n]|\\[^\r\n])*('|\\(\n|\r\n?))")
  294. # Initial part of a single-quoted raw literal, e.g. r'foo' or r'foo\\\n
  295. str_exp_raw_single = re.compile(
  296. r"[a-z]*'([^'\r\n\\]|\\'|\\[^\n\r])*('|\\(\n|\r\n?))")
  297. # Non-initial part of a raw multiline single-quoted literal, e.g. foo'
  298. str_exp_raw_single_multi = re.compile(
  299. r"([^'\r\n]|'')*('|\\(\n|\r\n?))")
  300. # Start of a ''' literal, e.g. b'''
  301. str_exp_single3 = re.compile("[a-z]*'''")
  302. # End of a ''' literal, e.g. foo'''
  303. str_exp_single3end = re.compile(r"[^\n\r]*?'''")
  304. # The following are similar to above (but use double quotes).
  305. str_exp_double = re.compile(
  306. r'[a-z]*"([^"\\\r\n]|\\[^\r\n])*("|\\(\n|\r\n?))')
  307. str_exp_double_multi = re.compile(
  308. r'([^"\\\r\n]|\\[^\r\n])*("|\\(\n|\r\n?))')
  309. str_exp_raw_double = re.compile(
  310. r'[a-z]*"([^"\r\n\\]|\\"|\\[^\n\r])*("|\\(\n|\r\n?))')
  311. str_exp_raw_double_multi = re.compile(
  312. r'([^"\r\n]|"")*("|\\(\n|\r\n?))')
  313. str_exp_double3 = re.compile('[a-z]*"""')
  314. str_exp_double3end = re.compile(r'[^\n\r]*?"""')
  315. def lex_str_single(self):
  316. """Analyse single-quoted string literal"""
  317. self.lex_str(self.str_exp_single, self.str_exp_single_multi,
  318. self.str_exp_single3, self.str_exp_single3end)
  319. def lex_str_double(self):
  320. """Analyse double-quoted string literal"""
  321. self.lex_str(self.str_exp_double, self.str_exp_double_multi,
  322. self.str_exp_double3, self.str_exp_double3end)
  323. def lex_prefixed_str(self, prefix):
  324. """Analyse a string literal with a prefix, such as r'...'."""
  325. s = self.match(re.compile('[a-z]+[\'"]'))
  326. if s.endswith("'"):
  327. re1 = self.str_exp_single
  328. re2 = self.str_exp_single_multi
  329. if 'r' in prefix:
  330. re1 = self.str_exp_raw_single
  331. re2 = self.str_exp_raw_single_multi
  332. self.lex_str(re1, re2, self.str_exp_single3,
  333. self.str_exp_single3end, prefix)
  334. else:
  335. re1 = self.str_exp_double
  336. re2 = self.str_exp_double_multi
  337. if 'r' in prefix:
  338. re1 = self.str_exp_raw_double
  339. re2 = self.str_exp_raw_double_multi
  340. self.lex_str(re1, re2, self.str_exp_double3,
  341. self.str_exp_double3end, prefix)
  342. def lex_str(self, regex, re2, re3, re3end, prefix=''):
  343. """Analyse a string literal described by regexps. Assume that
  344. the current location is at the beginning of the literal. The
  345. arguments re3 and re3end describe the corresponding
  346. triple-quoted literals.
  347. """
  348. s3 = self.match(re3)
  349. if s3 != '':
  350. # Triple-quoted string literal.
  351. self.lex_triple_quoted_str(re3end, prefix)
  352. else:
  353. # Single or double quoted string literal.
  354. s = self.match(regex)
  355. if s != '':
  356. if s.endswith('\n') or s.endswith('\r'):
  357. self.lex_multiline_string_literal(re2, s)
  358. else:
  359. self.verify_encoding(s, STR_CONTEXT)
  360. if 'b' in prefix:
  361. self.add_token(BytesLit(s))
  362. else:
  363. self.add_token(StrLit(s))
  364. else:
  365. # Unterminated string literal.
  366. s = self.match(re.compile('[^\\n\\r]*'))
  367. self.add_token(LexError(s, UNTERMINATED_STRING_LITERAL))
  368. def lex_triple_quoted_str(self, re3end, prefix):
  369. line = self.line
  370. ss = self.s[self.i:self.i + len(prefix) + 3]
  371. self.i += len(prefix) + 3
  372. while True:
  373. m = re3end.match(self.s, self.i)
  374. if m is not None:
  375. break
  376. m = re.match('[^\\n\\r]*(\\n|\\r\\n?)', self.s[self.i:])
  377. if m is None:
  378. self.add_special_token(
  379. LexError(ss, UNTERMINATED_STRING_LITERAL), line, 0)
  380. return
  381. s = m.group(0)
  382. ss += s
  383. self.line += 1
  384. self.i += len(s)
  385. lit = None
  386. if 'b' in prefix:
  387. lit = BytesLit(ss + m.group(0))
  388. else:
  389. lit = StrLit(ss + m.group(0))
  390. self.add_special_token(lit, line, len(m.group(0)))
  391. def lex_multiline_string_literal(self, re_end, prefix):
  392. """Analyze multiline single/double-quoted string literal.
  393. Use explicit \ for line continuation.
  394. """
  395. line = self.line
  396. self.i += len(prefix)
  397. ss = prefix
  398. while True:
  399. m = self.match(re_end)
  400. if m == '':
  401. self.add_special_token(
  402. LexError(ss, UNTERMINATED_STRING_LITERAL), line, 0)
  403. return
  404. ss += m
  405. self.line += 1
  406. self.i += len(m)
  407. if not m.endswith('\n') and not m.endswith('\r'): break
  408. self.add_special_token(StrLit(ss), line, 0) # TODO bytes
  409. comment_exp = re.compile(r'#[^\n\r]*')
  410. def lex_comment(self):
  411. """Analyse a comment."""
  412. s = self.match(self.comment_exp)
  413. self.verify_encoding(s, COMMENT_CONTEXT)
  414. self.add_pre_whitespace(s)
  415. backslash_exp = re.compile(r'\\(\n|\r\n?)')
  416. def lex_backslash(self):
  417. s = self.match(self.backslash_exp)
  418. if s != '':
  419. self.add_pre_whitespace(s)
  420. self.line += 1
  421. else:
  422. self.add_token(LexError('\\', INVALID_BACKSLASH))
  423. space_exp = re.compile(r'[ \t\x0c]*')
  424. indent_exp = re.compile(r'[ \t]*[#\n\r]?')
  425. def lex_space(self):
  426. """Analyze a run of whitespace characters (within a line, not indents).
  427. Only store them in self.pre_whitespace.
  428. """
  429. s = self.match(self.space_exp)
  430. self.add_pre_whitespace(s)
  431. comment_or_newline = '#' + '\n' + '\r'
  432. def lex_indent(self):
  433. """Analyze whitespace chars at the beginning of a line (indents)."""
  434. s = self.match(self.indent_exp)
  435. if s != '' and s[-1] in self.comment_or_newline:
  436. # Empty line (whitespace only or comment only).
  437. self.add_pre_whitespace(s[:-1])
  438. if s[-1] == '#':
  439. self.lex_comment()
  440. else:
  441. self.lex_break()
  442. self.lex_indent()
  443. return
  444. indent = self.calc_indent(s)
  445. if indent == self.indents[-1]:
  446. # No change in indent: just whitespace.
  447. self.add_pre_whitespace(s)
  448. elif indent > self.indents[-1]:
  449. # An increased indent (new block).
  450. self.indents.append(indent)
  451. self.add_token(Indent(s))
  452. else:
  453. # Decreased indent (end of one or more blocks).
  454. pre = self.pre_whitespace
  455. self.pre_whitespace = ''
  456. while indent < self.indents[-1]:
  457. self.add_token(Dedent(''))
  458. self.indents.pop()
  459. self.pre_whitespace = pre
  460. self.add_pre_whitespace(s)
  461. if indent != self.indents[-1]:
  462. # Error: indent level does not match a previous indent level.
  463. self.add_token(LexError('', INVALID_DEDENT))
  464. def calc_indent(self, s):
  465. indent = 0
  466. for ch in s:
  467. if ch == ' ':
  468. indent += 1
  469. else:
  470. # Tab: 8 spaces (rounded to a multiple of 8).
  471. indent += 8 - indent % 8
  472. return indent
  473. break_exp = re.compile(r'\r\n|\r|\n|;')
  474. def lex_break(self):
  475. """Analyse a line break."""
  476. s = self.match(self.break_exp)
  477. if self.ignore_break():
  478. self.add_pre_whitespace(s)
  479. self.line += 1
  480. else:
  481. self.add_token(Break(s))
  482. self.line += 1
  483. self.lex_indent()
  484. def lex_semicolon(self):
  485. self.add_token(Break(';'))
  486. def lex_colon(self):
  487. self.add_token(Colon(':'))
  488. open_bracket_exp = re.compile('[[({]')
  489. def lex_open_bracket(self):
  490. s = self.match(self.open_bracket_exp)
  491. self.open_brackets.append(s)
  492. self.add_token(Punct(s))
  493. close_bracket_exp = re.compile('[])}]')
  494. open_bracket = {')': '(', ']': '[', '}': '{'}
  495. def lex_close_bracket(self):
  496. s = self.match(self.close_bracket_exp)
  497. if (self.open_brackets != []
  498. and self.open_bracket[s] == self.open_brackets[-1]):
  499. self.open_brackets.pop()
  500. self.add_token(Punct(s))
  501. def lex_misc(self):
  502. """Analyse a non-alphabetical operator or a punctuator."""
  503. s = ''
  504. t = None
  505. for re_list, type in [(operators, Op), (punctuators, Punct)]:
  506. for re in re_list:
  507. s2 = self.match(re)
  508. if len(s2) > len(s):
  509. t = type
  510. s = s2
  511. if s == '':
  512. # Could not match any token; report an invalid character. This is
  513. # reached at least if the current character is '!' not followed by
  514. # '='.
  515. self.add_token(LexError(self.s[self.i], INVALID_CHARACTER))
  516. else:
  517. self.add_token(t(s))
  518. def unknown_character(self):
  519. """Report an unknown character as a lexical analysis error."""
  520. self.add_token(LexError(self.s[self.i], INVALID_CHARACTER))
  521. # Utility methods
  522. def match(self, pattern):
  523. """If the argument regexp is matched at the current location,
  524. return the matched string; otherwise return the empty string.
  525. """
  526. m = pattern.match(self.s, self.i)
  527. if m is not None:
  528. return m.group(0)
  529. else:
  530. return ''
  531. def add_pre_whitespace(self, s):
  532. """Record whitespace and comments before the next token.
  533. The accumulated whitespace/comments will be stored in the next token
  534. and then it will be cleared.
  535. This is needed for pretty-printing the original source code while
  536. preserving comments, indentation, whitespace etc.
  537. """
  538. self.pre_whitespace += s
  539. self.i += len(s)
  540. def add_token(self, tok):
  541. """Store a token. Update its line number and record preceding
  542. whitespace characters and comments.
  543. """
  544. if (tok.string == '' and not isinstance(tok, Eof)
  545. and not isinstance(tok, Break)
  546. and not isinstance(tok, LexError)
  547. and not isinstance(tok, Dedent)):
  548. raise ValueError('Empty token')
  549. tok.pre = self.pre_whitespace
  550. tok.line = self.line
  551. self.tok.append(tok)
  552. self.i += len(tok.string)
  553. self.pre_whitespace = ''
  554. def add_special_token(self, tok, line, skip):
  555. """Like add_token, but caller sets the number of chars to skip."""
  556. if (tok.string == '' and not isinstance(tok, Eof)
  557. and not isinstance(tok, Break)
  558. and not isinstance(tok, LexError)
  559. and not isinstance(tok, Dedent)):
  560. raise ValueError('Empty token')
  561. tok.pre = self.pre_whitespace
  562. tok.line = line
  563. self.tok.append(tok)
  564. self.i += skip
  565. self.pre_whitespace = ''
  566. def ignore_break(self):
  567. """If the next token is a break, can we ignore it?"""
  568. if len(self.open_brackets) > 0 or len(self.tok) == 0:
  569. # Ignore break after open ( [ or { or at the beginning of file.
  570. return True
  571. else:
  572. # Ignore break after another break or dedent.
  573. t = self.tok[-1]
  574. return isinstance(t, Break) or isinstance(t, Dedent)
  575. def verify_encoding(self, string, context):
  576. """Verify that a token (represented by a string) is encoded correctly
  577. according to the file encoding.
  578. """
  579. codec = None
  580. if self.enc == ASCII_ENCODING:
  581. codec = 'ascii'
  582. elif self.enc in [UTF8_ENCODING, DEFAULT_ENCODING]:
  583. codec = 'utf8'
  584. if codec is not None:
  585. try:
  586. pass # FIX string.decode(codec)
  587. except UnicodeDecodeError:
  588. type = INVALID_UTF8_SEQUENCE
  589. if self.enc == ASCII_ENCODING:
  590. if context == STR_CONTEXT:
  591. type = NON_ASCII_CHARACTER_IN_STRING
  592. else:
  593. type = NON_ASCII_CHARACTER_IN_COMMENT
  594. self.add_token(LexError('', type))
  595. import sys
  596. if __name__ == '__main__':
  597. if len(sys.argv) != 2:
  598. print('Usage: lex.py FILE')
  599. sys.exit(2)
  600. fnam = sys.argv[1]
  601. s = open(fnam).read()
  602. for t in lex(s):
  603. print(t)