PageRenderTime 50ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/lib/python/yapps/runtime.py

https://github.com/araisrobo/linuxcnc
Python | 442 lines | 396 code | 11 blank | 35 comment | 6 complexity | 21be939574e26e44be1fb1e3abb43172 MD5 | raw file
  1. # Yapps 2 Runtime, part of Yapps 2 - yet another python parser system
  2. # Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
  3. # Enhancements copyright 2003-2004 by Matthias Urlichs <smurf@debian.org>
  4. #
  5. # This version of the Yapps 2 Runtime can be distributed under the
  6. # terms of the MIT open source license, either found in the LICENSE file
  7. # included with the Yapps distribution
  8. # <http://theory.stanford.edu/~amitp/yapps/> or at
  9. # <http://www.opensource.org/licenses/mit-license.php>
  10. #
  11. """Run time libraries needed to run parsers generated by Yapps.
  12. This module defines parse-time exception classes, a scanner class, a
  13. base class for parsers produced by Yapps, and a context class that
  14. keeps track of the parse stack.
  15. """
  16. import sys, re
  17. MIN_WINDOW=4096
  18. # File lookup window
  19. class SyntaxError(Exception):
  20. """When we run into an unexpected token, this is the exception to use"""
  21. def __init__(self, pos=None, msg="Bad Token", context=None):
  22. Exception.__init__(self)
  23. self.pos = pos
  24. self.msg = msg
  25. self.context = context
  26. def __str__(self):
  27. if not self.pos: return 'SyntaxError'
  28. else: return 'SyntaxError@%s(%s)' % (repr(self.pos), self.msg)
  29. class NoMoreTokens(Exception):
  30. """Another exception object, for when we run out of tokens"""
  31. pass
  32. class Token(object):
  33. """Yapps token.
  34. This is a container for a scanned token.
  35. """
  36. def __init__(self, type,value, pos=None):
  37. """Initialize a token."""
  38. self.type = type
  39. self.value = value
  40. self.pos = pos
  41. def __repr__(self):
  42. output = '<%s: %s' % (self.type, repr(self.value))
  43. if self.pos:
  44. output += " @ "
  45. if self.pos[0]:
  46. output += "%s:" % self.pos[0]
  47. if self.pos[1]:
  48. output += "%d" % self.pos[1]
  49. if self.pos[2] is not None:
  50. output += ".%d" % self.pos[2]
  51. output += ">"
  52. return output
  53. in_name=0
  54. class Scanner(object):
  55. """Yapps scanner.
  56. The Yapps scanner can work in context sensitive or context
  57. insensitive modes. The token(i) method is used to retrieve the
  58. i-th token. It takes a restrict set that limits the set of tokens
  59. it is allowed to return. In context sensitive mode, this restrict
  60. set guides the scanner. In context insensitive mode, there is no
  61. restriction (the set is always the full set of tokens).
  62. """
  63. def __init__(self, patterns, ignore, input="",
  64. file=None,filename=None,stacked=False):
  65. """Initialize the scanner.
  66. Parameters:
  67. patterns : [(terminal, uncompiled regex), ...] or None
  68. ignore : {terminal:None, ...}
  69. input : string
  70. If patterns is None, we assume that the subclass has
  71. defined self.patterns : [(terminal, compiled regex), ...].
  72. Note that the patterns parameter expects uncompiled regexes,
  73. whereas the self.patterns field expects compiled regexes.
  74. The 'ignore' value is either None or a callable, which is called
  75. with the scanner and the to-be-ignored match object; this can
  76. be used for include file or comment handling.
  77. """
  78. if not filename:
  79. global in_name
  80. filename="<f.%d>" % in_name
  81. in_name += 1
  82. self.input = input
  83. self.ignore = ignore
  84. self.file = file
  85. self.filename = filename
  86. self.pos = 0
  87. self.del_pos = 0 # skipped
  88. self.line = 1
  89. self.del_line = 0 # skipped
  90. self.col = 0
  91. self.tokens = []
  92. self.stack = None
  93. self.stacked = stacked
  94. self.last_read_token = None
  95. self.last_token = None
  96. self.last_types = None
  97. if patterns is not None:
  98. # Compile the regex strings into regex objects
  99. self.patterns = []
  100. for terminal, regex in patterns:
  101. self.patterns.append( (terminal, re.compile(regex)) )
  102. def stack_input(self, input="", file=None, filename=None):
  103. """Temporarily parse from a second file."""
  104. # Already reading from somewhere else: Go on top of that, please.
  105. if self.stack:
  106. # autogenerate a recursion-level-identifying filename
  107. if not filename:
  108. filename = 1
  109. else:
  110. try:
  111. filename += 1
  112. except TypeError:
  113. pass
  114. # now pass off to the include file
  115. self.stack.stack_input(input,file,filename)
  116. else:
  117. try:
  118. filename += 0
  119. except TypeError:
  120. pass
  121. else:
  122. filename = "<str_%d>" % filename
  123. # self.stack = object.__new__(self.__class__)
  124. # Scanner.__init__(self.stack,self.patterns,self.ignore,input,file,filename, stacked=True)
  125. # Note that the pattern+ignore are added by the generated
  126. # scanner code
  127. self.stack = self.__class__(input,file,filename, stacked=True)
  128. def get_pos(self):
  129. """Return a file/line/char tuple."""
  130. if self.stack: return self.stack.get_pos()
  131. return (self.filename, self.line+self.del_line, self.col)
  132. # def __repr__(self):
  133. # """Print the last few tokens that have been scanned in"""
  134. # output = ''
  135. # for t in self.tokens:
  136. # output += '%s\n' % (repr(t),)
  137. # return output
  138. def print_line_with_pointer(self, pos, length=0, out=sys.stderr):
  139. """Print the line of 'text' that includes position 'p',
  140. along with a second line with a single caret (^) at position p"""
  141. file,line,p = pos
  142. if file != self.filename:
  143. if self.stack: return self.stack.print_line_with_pointer(pos,length=length,out=out)
  144. print >>out, "(%s: not in input buffer)" % file
  145. return
  146. text = self.input
  147. p += length-1 # starts at pos 1
  148. origline=line
  149. line -= self.del_line
  150. spos=0
  151. if line > 0:
  152. while 1:
  153. line = line - 1
  154. try:
  155. cr = text.index("\n",spos)
  156. except ValueError:
  157. if line:
  158. text = ""
  159. break
  160. if line == 0:
  161. text = text[spos:cr]
  162. break
  163. spos = cr+1
  164. else:
  165. print >>out, "(%s:%d not in input buffer)" % (file,origline)
  166. return
  167. # Now try printing part of the line
  168. text = text[max(p-80, 0):p+80]
  169. p = p - max(p-80, 0)
  170. # Strip to the left
  171. i = text[:p].rfind('\n')
  172. j = text[:p].rfind('\r')
  173. if i < 0 or (0 <= j < i): i = j
  174. if 0 <= i < p:
  175. p = p - i - 1
  176. text = text[i+1:]
  177. # Strip to the right
  178. i = text.find('\n', p)
  179. j = text.find('\r', p)
  180. if i < 0 or (0 <= j < i): i = j
  181. if i >= 0:
  182. text = text[:i]
  183. # Now shorten the text
  184. while len(text) > 70 and p > 60:
  185. # Cut off 10 chars
  186. text = "..." + text[10:]
  187. p = p - 7
  188. # Now print the string, along with an indicator
  189. print >>out, '> ',text
  190. print >>out, '> ',' '*p + '^'
  191. def grab_input(self):
  192. """Get more input if possible."""
  193. if not self.file: return
  194. if len(self.input) - self.pos >= MIN_WINDOW: return
  195. data = self.file.read(MIN_WINDOW)
  196. if data is None or data == "":
  197. self.file = None
  198. # Drop bytes from the start, if necessary.
  199. if self.pos > 2*MIN_WINDOW:
  200. self.del_pos += MIN_WINDOW
  201. self.del_line += self.input[:MIN_WINDOW].count("\n")
  202. self.pos -= MIN_WINDOW
  203. self.input = self.input[MIN_WINDOW:] + data
  204. else:
  205. self.input = self.input + data
  206. def getchar(self):
  207. """Return the next character."""
  208. self.grab_input()
  209. c = self.input[self.pos]
  210. self.pos += 1
  211. return c
  212. def token(self, restrict, context=None):
  213. """Scan for another token."""
  214. while 1:
  215. if self.stack:
  216. try:
  217. return self.stack.token(restrict, context)
  218. except StopIteration:
  219. self.stack = None
  220. # Keep looking for a token, ignoring any in self.ignore
  221. self.grab_input()
  222. # special handling for end-of-file
  223. if self.stacked and self.pos==len(self.input):
  224. raise StopIteration
  225. # Search the patterns for the longest match, with earlier
  226. # tokens in the list having preference
  227. best_match = -1
  228. best_pat = '(error)'
  229. best_m = None
  230. for p, regexp in self.patterns:
  231. # First check to see if we're ignoring this token
  232. if restrict and p not in restrict and p not in self.ignore:
  233. continue
  234. m = regexp.match(self.input, self.pos)
  235. if m and m.end()-m.start() > best_match:
  236. # We got a match that's better than the previous one
  237. best_pat = p
  238. best_match = m.end()-m.start()
  239. best_m = m
  240. # If we didn't find anything, raise an error
  241. if best_pat == '(error)' and best_match < 0:
  242. msg = 'Bad Token'
  243. if restrict:
  244. msg = 'Trying to find one of '+', '.join(restrict)
  245. raise SyntaxError(self.get_pos(), msg, context=context)
  246. ignore = best_pat in self.ignore
  247. value = self.input[self.pos:self.pos+best_match]
  248. if not ignore:
  249. tok=Token(type=best_pat, value=value, pos=self.get_pos())
  250. self.pos += best_match
  251. npos = value.rfind("\n")
  252. if npos > -1:
  253. self.col = best_match-npos
  254. self.line += value.count("\n")
  255. else:
  256. self.col += best_match
  257. # If we found something that isn't to be ignored, return it
  258. if not ignore:
  259. if len(self.tokens) >= 10:
  260. del self.tokens[0]
  261. self.tokens.append(tok)
  262. self.last_read_token = tok
  263. # print repr(tok)
  264. return tok
  265. else:
  266. ignore = self.ignore[best_pat]
  267. if ignore:
  268. ignore(self, best_m)
  269. def peek(self, *types, **kw):
  270. """Returns the token type for lookahead; if there are any args
  271. then the list of args is the set of token types to allow"""
  272. context = kw.get("context",None)
  273. if self.last_token is None:
  274. self.last_types = types
  275. self.last_token = self.token(types,context)
  276. elif self.last_types:
  277. for t in types:
  278. if t not in self.last_types:
  279. raise NotImplementedError("Unimplemented: restriction set changed")
  280. return self.last_token.type
  281. def scan(self, type, **kw):
  282. """Returns the matched text, and moves to the next token"""
  283. context = kw.get("context",None)
  284. if self.last_token is None:
  285. tok = self.token([type],context)
  286. else:
  287. if self.last_types and type not in self.last_types:
  288. raise NotImplementedError("Unimplemented: restriction set changed")
  289. tok = self.last_token
  290. self.last_token = None
  291. if tok.type != type:
  292. if not self.last_types: self.last_types=[]
  293. raise SyntaxError(tok.pos, 'Trying to find '+type+': '+ ', '.join(self.last_types)+", got "+tok.type, context=context)
  294. return tok.value
  295. class Parser(object):
  296. """Base class for Yapps-generated parsers.
  297. """
  298. def __init__(self, scanner):
  299. self._scanner = scanner
  300. def _stack(self, input="",file=None,filename=None):
  301. """Temporarily read from someplace else"""
  302. self._scanner.stack_input(input,file,filename)
  303. self._tok = None
  304. def _peek(self, *types, **kw):
  305. """Returns the token type for lookahead; if there are any args
  306. then the list of args is the set of token types to allow"""
  307. return self._scanner.peek(*types, **kw)
  308. def _scan(self, type, **kw):
  309. """Returns the matched text, and moves to the next token"""
  310. return self._scanner.scan(type, **kw)
  311. class Context(object):
  312. """Class to represent the parser's call stack.
  313. Every rule creates a Context that links to its parent rule. The
  314. contexts can be used for debugging.
  315. """
  316. def __init__(self, parent, scanner, rule, args=()):
  317. """Create a new context.
  318. Args:
  319. parent: Context object or None
  320. scanner: Scanner object
  321. rule: string (name of the rule)
  322. args: tuple listing parameters to the rule
  323. """
  324. self.parent = parent
  325. self.scanner = scanner
  326. self.rule = rule
  327. self.args = args
  328. while scanner.stack: scanner = scanner.stack
  329. self.token = scanner.last_read_token
  330. def __str__(self):
  331. output = ''
  332. if self.parent: output = str(self.parent) + ' > '
  333. output += self.rule
  334. return output
  335. def print_error(err, scanner, max_ctx=None):
  336. """Print error messages, the parser stack, and the input text -- for human-readable error messages."""
  337. # NOTE: this function assumes 80 columns :-(
  338. # Figure out the line number
  339. pos = err.pos
  340. if not pos:
  341. pos = scanner.get_pos()
  342. file_name, line_number, column_number = pos
  343. print >>sys.stderr, '%s:%d:%d: %s' % (file_name, line_number, column_number, err.msg)
  344. scanner.print_line_with_pointer(pos)
  345. context = err.context
  346. token = None
  347. while context:
  348. print >>sys.stderr, 'while parsing %s%s:' % (context.rule, tuple(context.args))
  349. if context.token:
  350. token = context.token
  351. if token:
  352. scanner.print_line_with_pointer(token.pos, length=len(token.value))
  353. context = context.parent
  354. if max_ctx:
  355. max_ctx = max_ctx-1
  356. if not max_ctx:
  357. break
  358. def wrap_error_reporter(parser, rule, *args,**kw):
  359. try:
  360. return getattr(parser, rule)(*args,**kw)
  361. except SyntaxError, e:
  362. print_error(e, parser._scanner)
  363. except NoMoreTokens:
  364. print >>sys.stderr, 'Could not complete parsing; stopped around here:'
  365. print >>sys.stderr, parser._scanner