PageRenderTime 67ms CodeModel.GetById 15ms app.highlight 46ms RepoModel.GetById 1ms app.codeStats 0ms

/thirdparty/breakpad/third_party/protobuf/protobuf/gtest/scripts/pump.py

http://github.com/tomahawk-player/tomahawk
Python | 835 lines | 723 code | 40 blank | 72 comment | 20 complexity | 92f0800d1806672a9c18df8611a02de5 MD5 | raw file
  1#!/usr/bin/env python
  2#
  3# Copyright 2008, Google Inc.
  4# All rights reserved.
  5#
  6# Redistribution and use in source and binary forms, with or without
  7# modification, are permitted provided that the following conditions are
  8# met:
  9#
 10#     * Redistributions of source code must retain the above copyright
 11# notice, this list of conditions and the following disclaimer.
 12#     * Redistributions in binary form must reproduce the above
 13# copyright notice, this list of conditions and the following disclaimer
 14# in the documentation and/or other materials provided with the
 15# distribution.
 16#     * Neither the name of Google Inc. nor the names of its
 17# contributors may be used to endorse or promote products derived from
 18# this software without specific prior written permission.
 19#
 20# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 21# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 22# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 23# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 24# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 25# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 26# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 27# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 28# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 29# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 30# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 31
 32"""pump v0.1 - Pretty Useful for Meta Programming.
 33
 34A tool for preprocessor meta programming.  Useful for generating
 35repetitive boilerplate code.  Especially useful for writing C++
 36classes, functions, macros, and templates that need to work with
 37various number of arguments.
 38
 39USAGE:
 40       pump.py SOURCE_FILE
 41
 42EXAMPLES:
 43       pump.py foo.cc.pump
 44         Converts foo.cc.pump to foo.cc.
 45
 46GRAMMAR:
 47       CODE ::= ATOMIC_CODE*
 48       ATOMIC_CODE ::= $var ID = EXPRESSION
 49           | $var ID = [[ CODE ]]
 50           | $range ID EXPRESSION..EXPRESSION
 51           | $for ID SEPARATOR [[ CODE ]]
 52           | $($)
 53           | $ID
 54           | $(EXPRESSION)
 55           | $if EXPRESSION [[ CODE ]] ELSE_BRANCH
 56           | [[ CODE ]]
 57           | RAW_CODE
 58       SEPARATOR ::= RAW_CODE | EMPTY
 59       ELSE_BRANCH ::= $else [[ CODE ]]
 60           | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
 61           | EMPTY
 62       EXPRESSION has Python syntax.
 63"""
 64
 65__author__ = 'wan@google.com (Zhanyong Wan)'
 66
 67import os
 68import re
 69import sys
 70
 71
 72TOKEN_TABLE = [
 73    (re.compile(r'\$var\s+'), '$var'),
 74    (re.compile(r'\$elif\s+'), '$elif'),
 75    (re.compile(r'\$else\s+'), '$else'),
 76    (re.compile(r'\$for\s+'), '$for'),
 77    (re.compile(r'\$if\s+'), '$if'),
 78    (re.compile(r'\$range\s+'), '$range'),
 79    (re.compile(r'\$[_A-Za-z]\w*'), '$id'),
 80    (re.compile(r'\$\(\$\)'), '$($)'),
 81    (re.compile(r'\$\$.*'), '$$'),
 82    (re.compile(r'\$'), '$'),
 83    (re.compile(r'\[\[\n?'), '[['),
 84    (re.compile(r'\]\]\n?'), ']]'),
 85    ]
 86
 87
 88class Cursor:
 89  """Represents a position (line and column) in a text file."""
 90
 91  def __init__(self, line=-1, column=-1):
 92    self.line = line
 93    self.column = column
 94
 95  def __eq__(self, rhs):
 96    return self.line == rhs.line and self.column == rhs.column
 97
 98  def __ne__(self, rhs):
 99    return not self == rhs
100
101  def __lt__(self, rhs):
102    return self.line < rhs.line or (
103        self.line == rhs.line and self.column < rhs.column)
104
105  def __le__(self, rhs):
106    return self < rhs or self == rhs
107
108  def __gt__(self, rhs):
109    return rhs < self
110
111  def __ge__(self, rhs):
112    return rhs <= self
113
114  def __str__(self):
115    if self == Eof():
116      return 'EOF'
117    else:
118      return '%s(%s)' % (self.line + 1, self.column)
119
120  def __add__(self, offset):
121    return Cursor(self.line, self.column + offset)
122
123  def __sub__(self, offset):
124    return Cursor(self.line, self.column - offset)
125
126  def Clone(self):
127    """Returns a copy of self."""
128
129    return Cursor(self.line, self.column)
130
131
132# Special cursor to indicate the end-of-file.
133def Eof():
134  """Returns the special cursor to denote the end-of-file."""
135  return Cursor(-1, -1)
136
137
138class Token:
139  """Represents a token in a Pump source file."""
140
141  def __init__(self, start=None, end=None, value=None, token_type=None):
142    if start is None:
143      self.start = Eof()
144    else:
145      self.start = start
146    if end is None:
147      self.end = Eof()
148    else:
149      self.end = end
150    self.value = value
151    self.token_type = token_type
152
153  def __str__(self):
154    return 'Token @%s: \'%s\' type=%s' % (
155        self.start, self.value, self.token_type)
156
157  def Clone(self):
158    """Returns a copy of self."""
159
160    return Token(self.start.Clone(), self.end.Clone(), self.value,
161                 self.token_type)
162
163
164def StartsWith(lines, pos, string):
165  """Returns True iff the given position in lines starts with 'string'."""
166
167  return lines[pos.line][pos.column:].startswith(string)
168
169
170def FindFirstInLine(line, token_table):
171  best_match_start = -1
172  for (regex, token_type) in token_table:
173    m = regex.search(line)
174    if m:
175      # We found regex in lines
176      if best_match_start < 0 or m.start() < best_match_start:
177        best_match_start = m.start()
178        best_match_length = m.end() - m.start()
179        best_match_token_type = token_type
180
181  if best_match_start < 0:
182    return None
183
184  return (best_match_start, best_match_length, best_match_token_type)
185
186
187def FindFirst(lines, token_table, cursor):
188  """Finds the first occurrence of any string in strings in lines."""
189
190  start = cursor.Clone()
191  cur_line_number = cursor.line
192  for line in lines[start.line:]:
193    if cur_line_number == start.line:
194      line = line[start.column:]
195    m = FindFirstInLine(line, token_table)
196    if m:
197      # We found a regex in line.
198      (start_column, length, token_type) = m
199      if cur_line_number == start.line:
200        start_column += start.column
201      found_start = Cursor(cur_line_number, start_column)
202      found_end = found_start + length
203      return MakeToken(lines, found_start, found_end, token_type)
204    cur_line_number += 1
205  # We failed to find str in lines
206  return None
207
208
209def SubString(lines, start, end):
210  """Returns a substring in lines."""
211
212  if end == Eof():
213    end = Cursor(len(lines) - 1, len(lines[-1]))
214
215  if start >= end:
216    return ''
217
218  if start.line == end.line:
219    return lines[start.line][start.column:end.column]
220
221  result_lines = ([lines[start.line][start.column:]] +
222                  lines[start.line + 1:end.line] +
223                  [lines[end.line][:end.column]])
224  return ''.join(result_lines)
225
226
227def MakeToken(lines, start, end, token_type):
228  """Creates a new instance of Token."""
229
230  return Token(start, end, SubString(lines, start, end), token_type)
231
232
233def ParseToken(lines, pos, regex, token_type):
234  line = lines[pos.line][pos.column:]
235  m = regex.search(line)
236  if m and not m.start():
237    return MakeToken(lines, pos, pos + m.end(), token_type)
238  else:
239    print 'ERROR: %s expected at %s.' % (token_type, pos)
240    sys.exit(1)
241
242
243ID_REGEX = re.compile(r'[_A-Za-z]\w*')
244EQ_REGEX = re.compile(r'=')
245REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
246OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
247WHITE_SPACE_REGEX = re.compile(r'\s')
248DOT_DOT_REGEX = re.compile(r'\.\.')
249
250
251def Skip(lines, pos, regex):
252  line = lines[pos.line][pos.column:]
253  m = re.search(regex, line)
254  if m and not m.start():
255    return pos + m.end()
256  else:
257    return pos
258
259
260def SkipUntil(lines, pos, regex, token_type):
261  line = lines[pos.line][pos.column:]
262  m = re.search(regex, line)
263  if m:
264    return pos + m.start()
265  else:
266    print ('ERROR: %s expected on line %s after column %s.' %
267           (token_type, pos.line + 1, pos.column))
268    sys.exit(1)
269
270
271def ParseExpTokenInParens(lines, pos):
272  def ParseInParens(pos):
273    pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
274    pos = Skip(lines, pos, r'\(')
275    pos = Parse(pos)
276    pos = Skip(lines, pos, r'\)')
277    return pos
278
279  def Parse(pos):
280    pos = SkipUntil(lines, pos, r'\(|\)', ')')
281    if SubString(lines, pos, pos + 1) == '(':
282      pos = Parse(pos + 1)
283      pos = Skip(lines, pos, r'\)')
284      return Parse(pos)
285    else:
286      return pos
287
288  start = pos.Clone()
289  pos = ParseInParens(pos)
290  return MakeToken(lines, start, pos, 'exp')
291
292
293def RStripNewLineFromToken(token):
294  if token.value.endswith('\n'):
295    return Token(token.start, token.end, token.value[:-1], token.token_type)
296  else:
297    return token
298
299
300def TokenizeLines(lines, pos):
301  while True:
302    found = FindFirst(lines, TOKEN_TABLE, pos)
303    if not found:
304      yield MakeToken(lines, pos, Eof(), 'code')
305      return
306
307    if found.start == pos:
308      prev_token = None
309      prev_token_rstripped = None
310    else:
311      prev_token = MakeToken(lines, pos, found.start, 'code')
312      prev_token_rstripped = RStripNewLineFromToken(prev_token)
313
314    if found.token_type == '$$':  # A meta comment.
315      if prev_token_rstripped:
316        yield prev_token_rstripped
317      pos = Cursor(found.end.line + 1, 0)
318    elif found.token_type == '$var':
319      if prev_token_rstripped:
320        yield prev_token_rstripped
321      yield found
322      id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
323      yield id_token
324      pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
325
326      eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
327      yield eq_token
328      pos = Skip(lines, eq_token.end, r'\s*')
329
330      if SubString(lines, pos, pos + 2) != '[[':
331        exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
332        yield exp_token
333        pos = Cursor(exp_token.end.line + 1, 0)
334    elif found.token_type == '$for':
335      if prev_token_rstripped:
336        yield prev_token_rstripped
337      yield found
338      id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
339      yield id_token
340      pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
341    elif found.token_type == '$range':
342      if prev_token_rstripped:
343        yield prev_token_rstripped
344      yield found
345      id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
346      yield id_token
347      pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
348
349      dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
350      yield MakeToken(lines, pos, dots_pos, 'exp')
351      yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
352      pos = dots_pos + 2
353      new_pos = Cursor(pos.line + 1, 0)
354      yield MakeToken(lines, pos, new_pos, 'exp')
355      pos = new_pos
356    elif found.token_type == '$':
357      if prev_token:
358        yield prev_token
359      yield found
360      exp_token = ParseExpTokenInParens(lines, found.end)
361      yield exp_token
362      pos = exp_token.end
363    elif (found.token_type == ']]' or found.token_type == '$if' or
364          found.token_type == '$elif' or found.token_type == '$else'):
365      if prev_token_rstripped:
366        yield prev_token_rstripped
367      yield found
368      pos = found.end
369    else:
370      if prev_token:
371        yield prev_token
372      yield found
373      pos = found.end
374
375
376def Tokenize(s):
377  lines = s.splitlines(True)
378  return TokenizeLines(lines, Cursor(0, 0))
379
380
381class CodeNode:
382  def __init__(self, atomic_code_list=None):
383    self.atomic_code = atomic_code_list
384
385
386class VarNode:
387  def __init__(self, identifier=None, atomic_code=None):
388    self.identifier = identifier
389    self.atomic_code = atomic_code
390
391
392class RangeNode:
393  def __init__(self, identifier=None, exp1=None, exp2=None):
394    self.identifier = identifier
395    self.exp1 = exp1
396    self.exp2 = exp2
397
398
399class ForNode:
400  def __init__(self, identifier=None, sep=None, code=None):
401    self.identifier = identifier
402    self.sep = sep
403    self.code = code
404
405
406class ElseNode:
407  def __init__(self, else_branch=None):
408    self.else_branch = else_branch
409
410
411class IfNode:
412  def __init__(self, exp=None, then_branch=None, else_branch=None):
413    self.exp = exp
414    self.then_branch = then_branch
415    self.else_branch = else_branch
416
417
418class RawCodeNode:
419  def __init__(self, token=None):
420    self.raw_code = token
421
422
423class LiteralDollarNode:
424  def __init__(self, token):
425    self.token = token
426
427
428class ExpNode:
429  def __init__(self, token, python_exp):
430    self.token = token
431    self.python_exp = python_exp
432
433
434def PopFront(a_list):
435  head = a_list[0]
436  a_list[:1] = []
437  return head
438
439
440def PushFront(a_list, elem):
441  a_list[:0] = [elem]
442
443
444def PopToken(a_list, token_type=None):
445  token = PopFront(a_list)
446  if token_type is not None and token.token_type != token_type:
447    print 'ERROR: %s expected at %s' % (token_type, token.start)
448    print 'ERROR: %s found instead' % (token,)
449    sys.exit(1)
450
451  return token
452
453
454def PeekToken(a_list):
455  if not a_list:
456    return None
457
458  return a_list[0]
459
460
461def ParseExpNode(token):
462  python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
463  return ExpNode(token, python_exp)
464
465
466def ParseElseNode(tokens):
467  def Pop(token_type=None):
468    return PopToken(tokens, token_type)
469
470  next = PeekToken(tokens)
471  if not next:
472    return None
473  if next.token_type == '$else':
474    Pop('$else')
475    Pop('[[')
476    code_node = ParseCodeNode(tokens)
477    Pop(']]')
478    return code_node
479  elif next.token_type == '$elif':
480    Pop('$elif')
481    exp = Pop('code')
482    Pop('[[')
483    code_node = ParseCodeNode(tokens)
484    Pop(']]')
485    inner_else_node = ParseElseNode(tokens)
486    return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
487  elif not next.value.strip():
488    Pop('code')
489    return ParseElseNode(tokens)
490  else:
491    return None
492
493
494def ParseAtomicCodeNode(tokens):
495  def Pop(token_type=None):
496    return PopToken(tokens, token_type)
497
498  head = PopFront(tokens)
499  t = head.token_type
500  if t == 'code':
501    return RawCodeNode(head)
502  elif t == '$var':
503    id_token = Pop('id')
504    Pop('=')
505    next = PeekToken(tokens)
506    if next.token_type == 'exp':
507      exp_token = Pop()
508      return VarNode(id_token, ParseExpNode(exp_token))
509    Pop('[[')
510    code_node = ParseCodeNode(tokens)
511    Pop(']]')
512    return VarNode(id_token, code_node)
513  elif t == '$for':
514    id_token = Pop('id')
515    next_token = PeekToken(tokens)
516    if next_token.token_type == 'code':
517      sep_token = next_token
518      Pop('code')
519    else:
520      sep_token = None
521    Pop('[[')
522    code_node = ParseCodeNode(tokens)
523    Pop(']]')
524    return ForNode(id_token, sep_token, code_node)
525  elif t == '$if':
526    exp_token = Pop('code')
527    Pop('[[')
528    code_node = ParseCodeNode(tokens)
529    Pop(']]')
530    else_node = ParseElseNode(tokens)
531    return IfNode(ParseExpNode(exp_token), code_node, else_node)
532  elif t == '$range':
533    id_token = Pop('id')
534    exp1_token = Pop('exp')
535    Pop('..')
536    exp2_token = Pop('exp')
537    return RangeNode(id_token, ParseExpNode(exp1_token),
538                     ParseExpNode(exp2_token))
539  elif t == '$id':
540    return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
541  elif t == '$($)':
542    return LiteralDollarNode(head)
543  elif t == '$':
544    exp_token = Pop('exp')
545    return ParseExpNode(exp_token)
546  elif t == '[[':
547    code_node = ParseCodeNode(tokens)
548    Pop(']]')
549    return code_node
550  else:
551    PushFront(tokens, head)
552    return None
553
554
555def ParseCodeNode(tokens):
556  atomic_code_list = []
557  while True:
558    if not tokens:
559      break
560    atomic_code_node = ParseAtomicCodeNode(tokens)
561    if atomic_code_node:
562      atomic_code_list.append(atomic_code_node)
563    else:
564      break
565  return CodeNode(atomic_code_list)
566
567
568def Convert(file_path):
569  s = file(file_path, 'r').read()
570  tokens = []
571  for token in Tokenize(s):
572    tokens.append(token)
573  code_node = ParseCodeNode(tokens)
574  return code_node
575
576
577class Env:
578  def __init__(self):
579    self.variables = []
580    self.ranges = []
581
582  def Clone(self):
583    clone = Env()
584    clone.variables = self.variables[:]
585    clone.ranges = self.ranges[:]
586    return clone
587
588  def PushVariable(self, var, value):
589    # If value looks like an int, store it as an int.
590    try:
591      int_value = int(value)
592      if ('%s' % int_value) == value:
593        value = int_value
594    except Exception:
595      pass
596    self.variables[:0] = [(var, value)]
597
598  def PopVariable(self):
599    self.variables[:1] = []
600
601  def PushRange(self, var, lower, upper):
602    self.ranges[:0] = [(var, lower, upper)]
603
604  def PopRange(self):
605    self.ranges[:1] = []
606
607  def GetValue(self, identifier):
608    for (var, value) in self.variables:
609      if identifier == var:
610        return value
611
612    print 'ERROR: meta variable %s is undefined.' % (identifier,)
613    sys.exit(1)
614
615  def EvalExp(self, exp):
616    try:
617      result = eval(exp.python_exp)
618    except Exception, e:
619      print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
620      print ('ERROR: failed to evaluate meta expression %s at %s' %
621             (exp.python_exp, exp.token.start))
622      sys.exit(1)
623    return result
624
625  def GetRange(self, identifier):
626    for (var, lower, upper) in self.ranges:
627      if identifier == var:
628        return (lower, upper)
629
630    print 'ERROR: range %s is undefined.' % (identifier,)
631    sys.exit(1)
632
633
634class Output:
635  def __init__(self):
636    self.string = ''
637
638  def GetLastLine(self):
639    index = self.string.rfind('\n')
640    if index < 0:
641      return ''
642
643    return self.string[index + 1:]
644
645  def Append(self, s):
646    self.string += s
647
648
649def RunAtomicCode(env, node, output):
650  if isinstance(node, VarNode):
651    identifier = node.identifier.value.strip()
652    result = Output()
653    RunAtomicCode(env.Clone(), node.atomic_code, result)
654    value = result.string
655    env.PushVariable(identifier, value)
656  elif isinstance(node, RangeNode):
657    identifier = node.identifier.value.strip()
658    lower = int(env.EvalExp(node.exp1))
659    upper = int(env.EvalExp(node.exp2))
660    env.PushRange(identifier, lower, upper)
661  elif isinstance(node, ForNode):
662    identifier = node.identifier.value.strip()
663    if node.sep is None:
664      sep = ''
665    else:
666      sep = node.sep.value
667    (lower, upper) = env.GetRange(identifier)
668    for i in range(lower, upper + 1):
669      new_env = env.Clone()
670      new_env.PushVariable(identifier, i)
671      RunCode(new_env, node.code, output)
672      if i != upper:
673        output.Append(sep)
674  elif isinstance(node, RawCodeNode):
675    output.Append(node.raw_code.value)
676  elif isinstance(node, IfNode):
677    cond = env.EvalExp(node.exp)
678    if cond:
679      RunCode(env.Clone(), node.then_branch, output)
680    elif node.else_branch is not None:
681      RunCode(env.Clone(), node.else_branch, output)
682  elif isinstance(node, ExpNode):
683    value = env.EvalExp(node)
684    output.Append('%s' % (value,))
685  elif isinstance(node, LiteralDollarNode):
686    output.Append('$')
687  elif isinstance(node, CodeNode):
688    RunCode(env.Clone(), node, output)
689  else:
690    print 'BAD'
691    print node
692    sys.exit(1)
693
694
695def RunCode(env, code_node, output):
696  for atomic_code in code_node.atomic_code:
697    RunAtomicCode(env, atomic_code, output)
698
699
700def IsComment(cur_line):
701  return '//' in cur_line
702
703
704def IsInPreprocessorDirevative(prev_lines, cur_line):
705  if cur_line.lstrip().startswith('#'):
706    return True
707  return prev_lines != [] and prev_lines[-1].endswith('\\')
708
709
710def WrapComment(line, output):
711  loc = line.find('//')
712  before_comment = line[:loc].rstrip()
713  if before_comment == '':
714    indent = loc
715  else:
716    output.append(before_comment)
717    indent = len(before_comment) - len(before_comment.lstrip())
718  prefix = indent*' ' + '// '
719  max_len = 80 - len(prefix)
720  comment = line[loc + 2:].strip()
721  segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
722  cur_line = ''
723  for seg in segs:
724    if len((cur_line + seg).rstrip()) < max_len:
725      cur_line += seg
726    else:
727      if cur_line.strip() != '':
728        output.append(prefix + cur_line.rstrip())
729      cur_line = seg.lstrip()
730  if cur_line.strip() != '':
731    output.append(prefix + cur_line.strip())
732
733
734def WrapCode(line, line_concat, output):
735  indent = len(line) - len(line.lstrip())
736  prefix = indent*' '  # Prefix of the current line
737  max_len = 80 - indent - len(line_concat)  # Maximum length of the current line
738  new_prefix = prefix + 4*' '  # Prefix of a continuation line
739  new_max_len = max_len - 4  # Maximum length of a continuation line
740  # Prefers to wrap a line after a ',' or ';'.
741  segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
742  cur_line = ''  # The current line without leading spaces.
743  for seg in segs:
744    # If the line is still too long, wrap at a space.
745    while cur_line == '' and len(seg.strip()) > max_len:
746      seg = seg.lstrip()
747      split_at = seg.rfind(' ', 0, max_len)
748      output.append(prefix + seg[:split_at].strip() + line_concat)
749      seg = seg[split_at + 1:]
750      prefix = new_prefix
751      max_len = new_max_len
752
753    if len((cur_line + seg).rstrip()) < max_len:
754      cur_line = (cur_line + seg).lstrip()
755    else:
756      output.append(prefix + cur_line.rstrip() + line_concat)
757      prefix = new_prefix
758      max_len = new_max_len
759      cur_line = seg.lstrip()
760  if cur_line.strip() != '':
761    output.append(prefix + cur_line.strip())
762
763
764def WrapPreprocessorDirevative(line, output):
765  WrapCode(line, ' \\', output)
766
767
768def WrapPlainCode(line, output):
769  WrapCode(line, '', output)
770
771
772def IsHeaderGuardOrInclude(line):
773  return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
774          re.match(r'^#include\s', line))
775
776
777def WrapLongLine(line, output):
778  line = line.rstrip()
779  if len(line) <= 80:
780    output.append(line)
781  elif IsComment(line):
782    if IsHeaderGuardOrInclude(line):
783      # The style guide made an exception to allow long header guard lines
784      # and includes.
785      output.append(line)
786    else:
787      WrapComment(line, output)
788  elif IsInPreprocessorDirevative(output, line):
789    if IsHeaderGuardOrInclude(line):
790      # The style guide made an exception to allow long header guard lines
791      # and includes.
792      output.append(line)
793    else:
794      WrapPreprocessorDirevative(line, output)
795  else:
796    WrapPlainCode(line, output)
797
798
799def BeautifyCode(string):
800  lines = string.splitlines()
801  output = []
802  for line in lines:
803    WrapLongLine(line, output)
804  output2 = [line.rstrip() for line in output]
805  return '\n'.join(output2) + '\n'
806
807
808def main(argv):
809  if len(argv) == 1:
810    print __doc__
811    sys.exit(1)
812
813  file_path = argv[-1]
814  ast = Convert(file_path)
815  output = Output()
816  RunCode(Env(), ast, output)
817  output_str = BeautifyCode(output.string)
818  if file_path.endswith('.pump'):
819    output_file_path = file_path[:-5]
820  else:
821    output_file_path = '-'
822  if output_file_path == '-':
823    print output_str,
824  else:
825    output_file = file(output_file_path, 'w')
826    output_file.write('// This file was GENERATED by command:\n')
827    output_file.write('//     %s %s\n' %
828                      (os.path.basename(__file__), os.path.basename(file_path)))
829    output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
830    output_file.write(output_str)
831    output_file.close()
832
833
834if __name__ == '__main__':
835  main(sys.argv)