/jsonjinja/lexer.py
Python | 522 lines | 424 code | 38 blank | 60 comment | 63 complexity | ec1cb817c254dc6a5f3df16da8658964 MD5 | raw file
- import re
- from operator import itemgetter
- from collections import deque
- from jsonjinja.exceptions import TemplateSyntaxError
- whitespace_re = re.compile(r'\s+', re.U)
- string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
- r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
- float_re = re.compile(r'[+-]?(?<!\.)\d+\.\d+')
- integer_re = re.compile(r'[+-]?\d+')
- name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
- ignored_tokens = frozenset(['comment_begin', 'comment', 'comment_end',
- 'whitespace', 'whitespace'])
- ignore_if_empty = frozenset(['whitespace', 'data', 'comment'])
- newline_re = re.compile(r'(\r\n|\r|\n)')
- operators = {
- '~': 'tilde',
- '==': 'eq',
- '!=': 'ne',
- '>': 'gt',
- '<': 'lt',
- '>=': 'ge',
- '<=': 'le',
- '|': 'pipe',
- ',': 'comma',
- ';': 'semicolon',
- '=': 'assign',
- ':': 'colon',
- '[': 'lbracket',
- ']': 'rbracket',
- '{': 'lbrace',
- '}': 'rbrace',
- '(': 'lparen',
- ')': 'rparen',
- '.': 'dot'
- }
- block_start_string = '{%'
- block_end_string = '%}'
- variable_start_string = '{{'
- variable_end_string = '}}'
- comment_start_string = '{#'
- comment_end_string = '#}'
- reverse_operators = dict([(v, k) for k, v in operators.iteritems()])
- assert len(operators) == len(reverse_operators), 'operators dropped'
- operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
- sorted(operators, key=lambda x: -len(x))))
- def _describe_token_type(token_type):
- if token_type in reverse_operators:
- return reverse_operators[token_type]
- return {
- 'comment_begin': 'begin of comment',
- 'comment_end': 'end of comment',
- 'comment': 'comment',
- 'block_begin': 'begin of statement block',
- 'block_end': 'end of statement block',
- 'variable_begin': 'begin of print statement',
- 'variable_end': 'end of print statement',
- 'data': 'template data / text',
- 'eof': 'end of template'
- }.get(token_type, token_type)
- def describe_token(token):
- """Returns a description of the token."""
- if token.type == 'name':
- return token.value
- return _describe_token_type(token.type)
- def describe_token_expr(expr):
- """Like `describe_token` but for token expressions."""
- if ':' in expr:
- type, value = expr.split(':', 1)
- if type == 'name':
- return value
- else:
- type = expr
- return _describe_token_type(type)
- def count_newlines(value):
- """Count the number of newline characters in the string. This is
- useful for extensions that filter a stream.
- """
- return len(newline_re.findall(value))
- class Failure(object):
- """Class that raises a `TemplateSyntaxError` if called.
- Used by the `Lexer` to specify known errors.
- """
- def __init__(self, message, cls=TemplateSyntaxError):
- self.message = message
- self.error_class = cls
- def __call__(self, lineno, filename):
- raise self.error_class(self.message, lineno, filename)
- def compile_root_rules():
- e = re.escape
- rules = [
- (len(comment_start_string), 'comment', e(comment_start_string)),
- (len(block_start_string), 'block', e(block_start_string)),
- (len(variable_start_string), 'variable', e(variable_start_string))
- ]
- return [x[1:] for x in sorted(rules, reverse=True)]
- class Token(tuple):
- """Token class."""
- __slots__ = ()
- lineno, type, value = (property(itemgetter(x)) for x in range(3))
- def __new__(cls, lineno, type, value):
- return tuple.__new__(cls, (lineno, intern(str(type)), value))
- def __str__(self):
- if self.type in reverse_operators:
- return reverse_operators[self.type]
- elif self.type == 'name':
- return self.value
- return self.type
- def test(self, expr):
- """Test a token against a token expression. This can either be a
- token type or ``'token_type:token_value'``. This can only test
- against string values and types.
- """
- # here we do a regular string equality check as test_any is usually
- # passed an iterable of not interned strings.
- if self.type == expr:
- return True
- elif ':' in expr:
- return expr.split(':', 1) == [self.type, self.value]
- return False
- def test_any(self, *iterable):
- """Test against multiple token expressions."""
- for expr in iterable:
- if self.test(expr):
- return True
- return False
- def __repr__(self):
- return 'Token(%r, %r, %r)' % (
- self.lineno,
- self.type,
- self.value
- )
- class TokenStreamIterator(object):
- """The iterator for tokenstreams. Iterate over the stream
- until the eof token is reached.
- """
- def __init__(self, stream):
- self.stream = stream
- def __iter__(self):
- return self
- def next(self):
- token = self.stream.current
- if token.type == 'eof':
- self.stream.close()
- raise StopIteration()
- self.self.stream.next()
- return token
- class TokenStream(object):
- """A token stream is an iterable that yields :class:`Token`\s. The
- parser however does not iterate over it but calls :meth:`next` to go
- one token ahead. The current active token is stored as :attr:`current`.
- """
- def __init__(self, generator, name, filename):
- self._next = iter(generator).next
- self._pushed = deque()
- self.name = name
- self.filename = filename
- self.closed = False
- self.current = Token(1, 'initial', '')
- self.next()
- def __iter__(self):
- return TokenStreamIterator(self)
- def __nonzero__(self):
- return bool(self._pushed) or self.current.type != 'eof'
- eos = property(lambda x: not x, doc="Are we at the end of the stream?")
- def push(self, token):
- """Push a token back to the stream."""
- self._pushed.append(token)
- def look(self):
- """Look at the next token."""
- old_token = self.next()
- result = self.current
- self.push(result)
- self.current = old_token
- return result
- def skip(self, n=1):
- """Got n tokens ahead."""
- for x in xrange(n):
- self.next()
- def next_if(self, expr):
- """Perform the token test and return the token if it matched.
- Otherwise the return value is `None`.
- """
- if self.current.test(expr):
- return self.next()
- def skip_if(self, expr):
- """Like :meth:`next_if` but only returns `True` or `False`."""
- return self.next_if(expr) is not None
- def next(self):
- """Go one token ahead and return the old one"""
- rv = self.current
- if self._pushed:
- self.current = self._pushed.popleft()
- elif self.current.type != 'eof':
- try:
- self.current = self._next()
- except StopIteration:
- self.close()
- return rv
- def close(self):
- """Close the stream."""
- self.current = Token(self.current.lineno, 'eof', '')
- self._next = None
- self.closed = True
- def expect(self, expr):
- """Expect a given token type and return it. This accepts the same
- argument as :meth:`jinja2.lexer.Token.test`.
- """
- if not self.current.test(expr):
- expr = describe_token_expr(expr)
- if self.current.type != 'eof':
- raise TemplateSyntaxError('unexpected end of template, '
- 'expected %r.' % expr,
- self.current.lineno,
- self.name, self.filename)
- raise TemplateSyntaxError("expected token %r, got %r" %
- (expr, describe_token(self.current)),
- self.current.lineno,
- self.name, self.filename)
- try:
- return self.current
- finally:
- self.next()
- class Lexer(object):
- def __init__(self):
- c = lambda x: re.compile(x, re.M | re.S)
- e = re.escape
- tag_rules = [
- (whitespace_re, 'whitespace', None),
- (float_re, 'float', None),
- (integer_re, 'integer', None),
- (name_re, 'name', None),
- (string_re, 'string', None),
- (operator_re, 'operator', None)
- ]
- root_tag_rules = compile_root_rules()
- self.rules = {
- 'root': [
- # directives
- (c('(.*?)(?:%s)' % '|'.join(
- [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
- e(block_start_string),
- e(block_start_string),
- e(block_end_string),
- e(block_end_string)
- )] + [
- r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, r)
- for n, r in root_tag_rules
- ])), ('data', '#bygroup'), '#bygroup'),
- # data
- (c('.+'), 'data', None)
- ],
- # comments
- 'comment_begin': [
- (c(r'(.*?)(\-%s\s*|%s)' % (
- e(comment_end_string),
- e(comment_end_string)
- )), ('comment', 'comment_end'), '#pop'),
- (c('(.)'), (Failure('Missing end of comment tag'),), None)
- ],
- # blocks
- 'block_begin': [
- (c('(?:\-%s\s*|%s)' % (
- e(block_end_string),
- e(block_end_string)
- )), 'block_end', '#pop'),
- ] + tag_rules,
- # variables
- 'variable_begin': [
- (c('\-%s\s*|%s' % (
- e(variable_end_string),
- e(variable_end_string)
- )), 'variable_end', '#pop')
- ] + tag_rules,
- # raw block
- 'raw_begin': [
- (c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s))' % (
- e(block_start_string),
- e(block_start_string),
- e(block_end_string),
- e(block_end_string)
- )), ('data', 'raw_end'), '#pop'),
- (c('(.)'), (Failure('Missing end of raw directive'),), None)
- ]
- }
- self.newline_sequence = '\n'
- def _normalize_newlines(self, value):
- """Called for strings and template data to normalize it to unicode."""
- return newline_re.sub(self.newline_sequence, value)
- def tokenize(self, source, name=None, filename=None, state=None):
- """Calls tokeniter + tokenize and wraps it in a token stream.
- """
- stream = self.tokeniter(source, name, filename, state)
- return TokenStream(self.wrap(stream, name, filename), name, filename)
- def wrap(self, stream, name=None, filename=None):
- """This is called with the stream as returned by `tokenize` and wraps
- every token in a :class:`Token` and converts the value.
- """
- for lineno, token, value in stream:
- if token in ignored_tokens:
- continue
- # we are not interested in those tokens in the parser
- elif token in ('raw_begin', 'raw_end'):
- continue
- elif token == 'data':
- value = self._normalize_newlines(value)
- elif token == 'keyword':
- token = value
- elif token == 'name':
- value = str(value)
- elif token == 'string':
- # try to unescape string
- try:
- value = self._normalize_newlines(value[1:-1]) \
- .encode('ascii', 'backslashreplace') \
- .decode('unicode-escape')
- except Exception, e:
- msg = str(e).split(':')[-1].strip()
- raise TemplateSyntaxError(msg, lineno, name, filename)
- # if we can express it as bytestring (ascii only)
- # we do that for support of semi broken APIs
- # as datetime.datetime.strftime. On python 3 this
- # call becomes a noop thanks to 2to3
- try:
- value = str(value)
- except UnicodeError:
- pass
- elif token == 'integer':
- value = int(value)
- elif token == 'float':
- value = float(value)
- elif token == 'operator':
- token = operators[value]
- yield Token(lineno, token, value)
- def tokeniter(self, source, name, filename=None, state=None):
- """This method tokenizes the text and returns the tokens in a
- generator. Use this method if you just want to tokenize a template.
- """
- source = '\n'.join(unicode(source).splitlines())
- pos = 0
- lineno = 1
- stack = ['root']
- if state is not None and state != 'root':
- assert state in ('variable', 'block'), 'invalid state'
- stack.append(state + '_begin')
- else:
- state = 'root'
- statetokens = self.rules[stack[-1]]
- source_length = len(source)
- balancing_stack = []
- while 1:
- # tokenizer loop
- for regex, tokens, new_state in statetokens:
- m = regex.match(source, pos)
- # if no match we try again with the next rule
- if m is None:
- continue
- # we only match blocks and variables if braces / parentheses
- # are balanced. continue parsing with the lower rule which
- # is the operator rule. do this only if the end tags look
- # like operators
- if balancing_stack and \
- tokens in ('variable_end', 'block_end'):
- continue
- # tuples support more options
- if isinstance(tokens, tuple):
- for idx, token in enumerate(tokens):
- # failure group
- if token.__class__ is Failure:
- raise token(lineno, filename)
- # bygroup is a bit more complex, in that case we
- # yield for the current token the first named
- # group that matched
- elif token == '#bygroup':
- for key, value in m.groupdict().iteritems():
- if value is not None:
- yield lineno, key, value
- lineno += value.count('\n')
- break
- else:
- raise RuntimeError('%r wanted to resolve '
- 'the token dynamically'
- ' but no group matched'
- % regex)
- # normal group
- else:
- data = m.group(idx + 1)
- if data or token not in ignore_if_empty:
- yield lineno, token, data
- lineno += data.count('\n')
- # strings as token just are yielded as it.
- else:
- data = m.group()
- # update brace/parentheses balance
- if tokens == 'operator':
- if data == '{':
- balancing_stack.append('}')
- elif data == '(':
- balancing_stack.append(')')
- elif data == '[':
- balancing_stack.append(']')
- elif data in ('}', ')', ']'):
- if not balancing_stack:
- raise TemplateSyntaxError('unexpected \'%s\'' %
- data, lineno, name,
- filename)
- expected_op = balancing_stack.pop()
- if expected_op != data:
- raise TemplateSyntaxError('unexpected \'%s\', '
- 'expected \'%s\'' %
- (data, expected_op),
- lineno, name,
- filename)
- # yield items
- if data or tokens not in ignore_if_empty:
- yield lineno, tokens, data
- lineno += data.count('\n')
- # fetch new position into new variable so that we can check
- # if there is a internal parsing error which would result
- # in an infinite loop
- pos2 = m.end()
- # handle state changes
- if new_state is not None:
- # remove the uppermost state
- if new_state == '#pop':
- stack.pop()
- # resolve the new state by group checking
- elif new_state == '#bygroup':
- for key, value in m.groupdict().iteritems():
- if value is not None:
- stack.append(key)
- break
- else:
- raise RuntimeError('%r wanted to resolve the '
- 'new state dynamically but'
- ' no group matched' %
- regex)
- # direct state name given
- else:
- stack.append(new_state)
- statetokens = self.rules[stack[-1]]
- # we are still at the same position and no stack change.
- # this means a loop without break condition, avoid that and
- # raise error
- elif pos2 == pos:
- raise RuntimeError('%r yielded empty string without '
- 'stack change' % regex)
- # publish new function and start again
- pos = pos2
- break
- # if loop terminated without break we haven't found a single match
- # either we are at the end of the file or we have a problem
- else:
- # end of text
- if pos >= source_length:
- return
- # something went wrong
- raise TemplateSyntaxError('unexpected char %r at %d' %
- (source[pos], pos), lineno,
- name, filename)