/lib/antlr-2.7.5/antlr/BooCodeGenerator.java
Java | 4011 lines | 2604 code | 425 blank | 982 comment | 820 complexity | f37e530769f0c83ee80ff8f87b685e7b MD5 | raw file
Possible License(s): GPL-2.0
Large files files are truncated, but you can click here to view the full file
- package antlr;
- /* ANTLR Translator Generator
- * Project led by Terence Parr at http://www.jGuru.com
- * Software rights: http://www.antlr.org/license.html
- *
- * $Id:$
- */
- //
- // ANTLR C# Code Generator by Micheal Jordan
- // Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com
- // Anthony Oguntimehin
- //
- // With many thanks to Eric V. Smith from the ANTLR list.
- //
- // HISTORY:
- //
- // 17-May-2002 kunle Fixed bug in OctalToUnicode() - was processing non-Octal escape sequences
- // Also added namespace support based on Cpp version.
- // 07-Jun-2002 kunle Added Scott Ellis's _saveIndex creation optimizations
- // 09-Sep-2002 richardN Richard Ney's bug-fix for literals table construction.
- // [ Hashtable ctor needed instance of hash code provider not it's class name. ]
- // 17-Sep-2002 kunle & Added all Token ID definitions as data member of every Lexer/Parser/TreeParser
- // AOg [ A by-product of problem-solving phase of the hetero-AST changes below
- // but, it breaks nothing and restores "normal" ANTLR codegen behaviour. ]
- // 19-Oct-2002 kunle & Completed the work required to support heterogenous ASTs (many changes)
- // AOg &
- // michealj
- // 14-Nov-2002 michealj Added "initializeASTFactory()" to support flexible ASTFactory initialization.
- // [ Thanks to Ric Klaren - for suggesting it and implementing it for Cpp. ]
- // 18-Nov-2002 kunle Added fix to make xx_tokenSet_xx names CLS compliant.
- // 01-Dec-2002 richardN Patch to reduce "unreachable code" warnings
- // 01-Dec-2002 richardN Fix to generate correct TreeParser token-type classnames.
- // 12-Jan-2003 kunle & Generated Lexers, Parsers and TreeParsers now support ANTLR's tracing option.
- // michealj
- // 12-Jan-2003 kunle Fixed issue where initializeASTFactory() was generated when "buildAST=false"
- // 14-Jan-2003 AOg initializeASTFactory(AST factory) method was modifying the Parser's "astFactory"
- // member rather than it's own "factory" parameter. Fixed.
- // 18-Jan-2003 kunle & Fixed reported issues with ASTFactory create() calls for hetero ASTs
- // michealj - code generated for LEXER token with hetero-AST option specified does not compile
- // - code generated for imaginary tokens with hetero-AST option specified uses
- // default AST type
- // - code generated for per-TokenRef hetero-AST option specified does not compile
- // 18-Jan-2003 kunle initializeASTFactory(AST) method is now a static public member
- // 18-May-2003 kunle Changes to address outstanding reported issues::
- // - Fixed reported issues with support for case-sensitive literals
- // - antlr.SemanticException now imported for all Lexers.
- // [ This exception is thrown on predicate failure. ]
- // 12-Jan-2004 kunle Added fix for reported issue with un-compileable generated lexers
- //
- //
- import java.io.IOException;
- import java.util.Enumeration;
- import java.util.Hashtable;
- import java.util.Iterator;
- import java.util.List;
- import java.util.StringTokenizer;
- import antlr.collections.impl.BitSet;
- import antlr.collections.impl.Vector;
- /** Generates MyParser.cs, MyLexer.cs and MyParserTokenTypes.cs */
- public class BooCodeGenerator extends CodeGenerator {
- // non-zero if inside syntactic predicate generation
- protected int syntacticPredLevel = 0;
- // Are we generating ASTs (for parsers and tree parsers) right now?
- protected boolean genAST = false;
- // Are we saving the text consumed (for lexers) right now?
- protected boolean saveText = false;
- // Grammar parameters set up to handle different grammar classes.
- // These are used to get instanceof tests out of code generation
- boolean usingCustomAST = false;
- String labeledElementType;
- String labeledElementASTType;
- String labeledElementInit;
- String commonExtraArgs;
- String commonExtraParams;
- String commonLocalVars;
- String lt1Value;
- String exceptionThrown;
- String throwNoViable;
- // Tracks the rule being generated. Used for mapTreeId
- RuleBlock currentRule;
- // Tracks the rule or labeled subrule being generated. Used for AST
- // generation.
- String currentASTResult;
- /**
- * Mapping between the ids used in the current alt, and the names of
- * variables used to represent their AST values.
- */
- Hashtable treeVariableMap = new Hashtable();
- /**
- * Used to keep track of which AST variables have been defined in a rule
- * (except for the #rule_name and #rule_name_in var's
- */
- Hashtable declaredASTVariables = new Hashtable();
- /* Count of unnamed generated variables */
- int astVarNumber = 1;
- /** Special value used to mark duplicate in treeVariableMap */
- protected static final String NONUNIQUE = new String();
- public static final int caseSizeThreshold = 127; // ascii is max
- private Vector semPreds;
- // Used to keep track of which (heterogeneous AST types are used)
- // which need to be set in the ASTFactory of the generated parser
- private java.util.Vector astTypes;
- private static BooNameSpace nameSpace = null;
- /**
- * Create a Boo code-generator using the given Grammar. The caller must
- * still call setTool, setBehavior, and setAnalyzer before generating code.
- */
- public BooCodeGenerator() {
- super();
- charFormatter = new BooCharFormatter();
- }
- /**
- * Adds a semantic predicate string to the sem pred vector These strings
- * will be used to build an array of sem pred names when building a
- * debugging parser. This method should only be called when the debug option
- * is specified
- */
- protected int addSemPred(String predicate) {
- semPreds.appendElement(predicate);
- return semPreds.size() - 1;
- }
- public void exitIfError() {
- if (antlrTool.hasError()) {
- antlrTool.fatalError("Exiting due to errors.");
- }
- }
- /** Generate the parser, lexer, treeparser, and token types in Boo */
- public void gen() {
- // Do the code generation
- try {
- // Loop over all grammars
- Enumeration grammarIter = behavior.grammars.elements();
- while (grammarIter.hasMoreElements()) {
- Grammar g = (Grammar) grammarIter.nextElement();
- // Connect all the components to each other
- g.setGrammarAnalyzer(analyzer);
- g.setCodeGenerator(this);
- analyzer.setGrammar(g);
- // To get right overloading behavior across heterogeneous
- // grammars
- setupGrammarParameters(g);
- g.generate();
- exitIfError();
- }
- // Loop over all token managers (some of which are lexers)
- Enumeration tmIter = behavior.tokenManagers.elements();
- while (tmIter.hasMoreElements()) {
- TokenManager tm = (TokenManager) tmIter.nextElement();
- if (!tm.isReadOnly()) {
- // Write the token manager tokens as Boo
- // this must appear before genTokenInterchange so that
- // labels are set on string literals
- genTokenTypes(tm);
- // Write the token manager tokens as plain text
- genTokenInterchange(tm);
- }
- exitIfError();
- }
- } catch (IOException e) {
- antlrTool.reportException(e, null);
- }
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The {...} action to generate
- */
- public void gen(ActionElement action) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("genAction(" + action + ")");
- if (action.isSemPred) {
- genSemPred(action.actionText, action.line);
- } else {
- if (grammar.hasSyntacticPredicate) {
- println("if 0 == inputState.guessing:");
- tabs++;
- }
- ActionTransInfo tInfo = new ActionTransInfo();
- String actionStr = processActionForSpecialSymbols(
- action.actionText, action.getLine(), currentRule, tInfo);
- if (tInfo.refRuleRoot != null) {
- // Somebody referenced "#rule", make sure translated var is
- // valid
- // assignment to #rule is left as a ref also, meaning that
- // assignments
- // with no other refs like "#rule = foo();" still forces this
- // code to be
- // generated (unnecessarily).
- println(tInfo.refRuleRoot + " = cast(" + labeledElementASTType
- + ", currentAST).root");
- }
- // dump the translated action
- printAction(actionStr);
- if (tInfo.assignToRoot) {
- // Somebody did a "#rule=", reset internal currentAST.root
- println("currentAST.root = " + tInfo.refRuleRoot);
- // reset the child pointer too to be last sibling in sibling
- // list
- println("if (" + tInfo.refRuleRoot + " is not null) and ("
- + tInfo.refRuleRoot + ".getFirstChild() is not null):");
- tabs++;
- println("currentAST.child = " + tInfo.refRuleRoot
- + ".getFirstChild()");
- tabs--;
- println("else:");
- tabs++;
- println("currentAST.child = " + tInfo.refRuleRoot);
- tabs--;
- println("currentAST.advanceChildToEnd()");
- }
- if (grammar.hasSyntacticPredicate) {
- tabs--;
- }
- }
- }
-
- protected void printAction(String s) {
- if (null == s) {
- return;
- }
-
- List nonEmptyLines = new java.util.ArrayList();
- String[] lines = s.replaceAll("\r\n", "\n").split("\n");
- for (int i=0; i<lines.length; ++i) {
- String line = lines[i];
- if (line.trim().length() > 0) {
- nonEmptyLines.add(line);
- }
- }
-
- if (0 == nonEmptyLines.size()) {
- return;
- }
-
- Iterator iterator = nonEmptyLines.iterator();
-
- String indent = getStartingWhitespace((String)nonEmptyLines.get(0));
- if (0 == indent.length()) {
- while (iterator.hasNext()) {
- String line = (String) iterator.next();
- println(line);
- }
- } else {
- while (iterator.hasNext()) {
- String line = (String) iterator.next();
- println(line.substring(indent.length()));
- }
- }
- }
-
- private static String getStartingWhitespace(String s) {
- for (int i=0; i<s.length(); ++i) {
- char ch = s.charAt(i);
- if (!Character.isWhitespace(ch)) {
- return s.substring(0, i);
- }
- }
- return s;
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The "x|y|z|..." block to generate
- */
- public void gen(AlternativeBlock blk) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("gen(" + blk + ")");
- //println("block:");
- //tabs++;
- genBlockPreamble(blk);
- genBlockInitAction(blk);
- // Tell AST generation to build subrule result
- String saveCurrentASTResult = currentASTResult;
- if (blk.getLabel() != null) {
- currentASTResult = blk.getLabel();
- }
- boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
- BooBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
- genBlockFinish(howToFinish, throwNoViable);
- //tabs--;
- // Restore previous AST generation
- currentASTResult = saveCurrentASTResult;
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The block-end element to generate. Block-end elements are
- * synthesized by the grammar parser to represent the end of a
- * block.
- */
- public void gen(BlockEndElement end) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("genRuleEnd(" + end + ")");
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The character literal reference to generate
- */
- public void gen(CharLiteralElement atom) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("genChar(" + atom + ")");
- if (atom.getLabel() != null) {
- println(atom.getLabel() + " = " + lt1Value);
- }
- boolean oldsaveText = saveText;
- saveText = saveText
- && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE;
- genMatch(atom);
- saveText = oldsaveText;
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The character-range reference to generate
- */
- public void gen(CharRangeElement r) {
- if (r.getLabel() != null && syntacticPredLevel == 0) {
- println(r.getLabel() + " = " + lt1Value);
- }
- boolean flag = (grammar instanceof LexerGrammar && (!saveText || (r
- .getAutoGenType() == GrammarElement.AUTO_GEN_BANG)));
- if (flag)
- println("_saveIndex = text.Length");
- println("matchRange(" + OctalToUnicode(r.beginText) + ","
- + OctalToUnicode(r.endText) + ")");
- if (flag)
- println("text.Length = _saveIndex");
- }
- /** Generate the lexer Boo file */
- public void gen(LexerGrammar g) throws IOException {
- // If debugging, create a new sempred vector for this grammar
- if (g.debuggingOutput)
- semPreds = new Vector();
- setGrammar(g);
- if (!(grammar instanceof LexerGrammar)) {
- antlrTool.panic("Internal error generating lexer");
- }
- genBody(g);
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The (...)+ block to generate
- */
- public void gen(OneOrMoreBlock blk) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("gen+(" + blk + ")");
- String label;
- String cnt;
- //println("block: // ( ... )+");
- //tabs++;
- genBlockPreamble(blk);
- if (blk.getLabel() != null) {
- cnt = "_cnt_" + blk.getLabel();
- } else {
- cnt = "_cnt" + blk.ID;
- }
- println(cnt + " as int = 0");
- if (blk.getLabel() != null) {
- label = blk.getLabel();
- } else {
- label = "_loop" + blk.ID;
- }
- println("while true:");
- tabs++;
- // generate the init action for ()+ ()* inside the loop
- // this allows us to do usefull EOF checking...
- genBlockInitAction(blk);
- // Tell AST generation to build subrule result
- String saveCurrentASTResult = currentASTResult;
- if (blk.getLabel() != null) {
- currentASTResult = blk.getLabel();
- }
- boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
- // generate exit test if greedy set to false
- // and an alt is ambiguous with exit branch
- // or when lookahead derived purely from end-of-file
- // Lookahead analysis stops when end-of-file is hit,
- // returning set {epsilon}. Since {epsilon} is not
- // ambig with any real tokens, no error is reported
- // by deterministic() routines and we have to check
- // for the case where the lookahead depth didn't get
- // set to NONDETERMINISTIC (this only happens when the
- // FOLLOW contains real atoms + epsilon).
- boolean generateNonGreedyExitPath = false;
- int nonGreedyExitDepth = grammar.maxk;
- if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk
- && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) {
- generateNonGreedyExitPath = true;
- nonGreedyExitDepth = blk.exitLookaheadDepth;
- } else if (!blk.greedy
- && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) {
- generateNonGreedyExitPath = true;
- }
- // generate exit test if greedy set to false
- // and an alt is ambiguous with exit branch
- if (generateNonGreedyExitPath) {
- if (DEBUG_CODE_GENERATOR) {
- System.out.println("nongreedy (...)+ loop; exit depth is "
- + blk.exitLookaheadDepth);
- }
- String predictExit = getLookaheadTestExpression(blk.exitCache,
- nonGreedyExitDepth);
- println("// nongreedy exit test");
- println("if ((" + cnt + " >= 1) and " + predictExit + "):");
- printSingleLineBlock("goto " + label + "_breakloop");
- }
- BooBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
-
- final String finalcnt = cnt;
- final String finalLabel = label;
- genBlockFinish(howToFinish, new Runnable() {
- public void run() {
- println("if (" + finalcnt + " >= 1):");
- printSingleLineBlock("goto " + finalLabel + "_breakloop");
- println("else:");
- printSingleLineBlock(throwNoViable);
- }
- });
- println("++" + cnt);
- tabs--;
- println(":" + label + "_breakloop");
- //tabs--;
- //println("// ( ... )+");
- // Restore previous AST generation
- currentASTResult = saveCurrentASTResult;
- }
- private void printSingleLineBlock(String stmt) {
- tabs++;
- println(stmt);
- tabs--;
- }
- /** Generate the parser Boo file */
- public void gen(ParserGrammar g) throws IOException {
- // if debugging, set up a new vector to keep track of sempred
- // strings for this grammar
- if (g.debuggingOutput)
- semPreds = new Vector();
- setGrammar(g);
- if (!(grammar instanceof ParserGrammar)) {
- antlrTool.panic("Internal error generating parser");
- }
- genBody(g);
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The rule-reference to generate
- */
- public void gen(RuleRefElement rr) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("genRR(" + rr + ")");
- RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
- if (rs == null || !rs.isDefined()) {
- // Is this redundant???
- antlrTool.error("Rule '" + rr.targetRule + "' is not defined",
- grammar.getFilename(), rr.getLine(), rr.getColumn());
- return;
- }
- if (!(rs instanceof RuleSymbol)) {
- // Is this redundant???
- antlrTool.error("'" + rr.targetRule
- + "' does not name a grammar rule", grammar.getFilename(),
- rr.getLine(), rr.getColumn());
- return;
- }
- genErrorTryForElement(rr);
- // AST value for labeled rule refs in tree walker.
- // This is not AST construction; it is just the input tree node value.
- if (grammar instanceof TreeWalkerGrammar && rr.getLabel() != null
- && syntacticPredLevel == 0) {
- println(rr.getLabel() + " = _t == ASTNULL ? null : " + lt1Value);
- }
- // if in lexer and ! on rule ref or alt or rule, save buffer index to
- // kill later
- if (grammar instanceof LexerGrammar
- && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
- println("_saveIndex = text.Length");
- }
- // Process return value assignment if any
- printTabs();
- if (rr.idAssign != null) {
- // Warn if the rule has no return type
- if (rs.block.returnAction == null) {
- antlrTool.warning("Rule '" + rr.targetRule
- + "' has no return type", grammar.getFilename(), rr
- .getLine(), rr.getColumn());
- }
- _print(rr.idAssign + "=");
- } else {
- // Warn about return value if any, but not inside syntactic
- // predicate
- if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0
- && rs.block.returnAction != null) {
- antlrTool.warning("Rule '" + rr.targetRule
- + "' returns a value", grammar.getFilename(), rr
- .getLine(), rr.getColumn());
- }
- }
- // Call the rule
- GenRuleInvocation(rr);
- // if in lexer and ! on element or alt or rule, save buffer index to
- // kill later
- if (grammar instanceof LexerGrammar
- && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
- println("text.Length = _saveIndex");
- }
- // if not in a syntactic predicate
- if (syntacticPredLevel == 0) {
- boolean doNoGuessTest = (grammar.hasSyntacticPredicate && (grammar.buildAST
- && rr.getLabel() != null || (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)));
- if (doNoGuessTest) {
- println("if (0 == inputState.guessing):");
- tabs++;
- }
- if (grammar.buildAST && rr.getLabel() != null) {
- // always gen variable for rule return on labeled rules
- println(rr.getLabel() + "_AST = cast(" + labeledElementASTType
- + ", returnAST)");
- }
- if (genAST) {
- switch (rr.getAutoGenType()) {
- case GrammarElement.AUTO_GEN_NONE:
- if (usingCustomAST)
- println("astFactory.addASTChild(currentAST, cast(AST, returnAST))");
- else
- println("astFactory.addASTChild(currentAST, returnAST)");
- break;
- case GrammarElement.AUTO_GEN_CARET:
- antlrTool
- .error("Internal: encountered ^ after rule reference");
- break;
- default:
- break;
- }
- }
- // if a lexer and labeled, Token label defined at rule level, just
- // set it here
- if (grammar instanceof LexerGrammar && rr.getLabel() != null) {
- println(rr.getLabel() + " = returnToken_");
- }
- if (doNoGuessTest) {
- tabs--;
- }
- }
- genErrorCatchForElement(rr);
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The string-literal reference to generate
- */
- public void gen(StringLiteralElement atom) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("genString(" + atom + ")");
- // Variable declarations for labeled elements
- if (atom.getLabel() != null && syntacticPredLevel == 0) {
- println(atom.getLabel() + " = " + lt1Value);
- }
- // AST
- genElementAST(atom);
- // is there a bang on the literal?
- boolean oldsaveText = saveText;
- saveText = saveText
- && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE;
- // matching
- genMatch(atom);
- saveText = oldsaveText;
- // tack on tree cursor motion if doing a tree walker
- if (grammar instanceof TreeWalkerGrammar) {
- println("_t = _t.getNextSibling()");
- }
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The token-range reference to generate
- */
- public void gen(TokenRangeElement r) {
- genErrorTryForElement(r);
- if (r.getLabel() != null && syntacticPredLevel == 0) {
- println(r.getLabel() + " = " + lt1Value);
- }
- // AST
- genElementAST(r);
- // match
- println("matchRange(" + OctalToUnicode(r.beginText) + ","
- + OctalToUnicode(r.endText) + ")");
- genErrorCatchForElement(r);
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The token-reference to generate
- */
- public void gen(TokenRefElement atom) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("genTokenRef(" + atom + ")");
- if (grammar instanceof LexerGrammar) {
- antlrTool.panic("Token reference found in lexer");
- }
- genErrorTryForElement(atom);
- // Assign Token value to token label variable
- if (atom.getLabel() != null && syntacticPredLevel == 0) {
- println(atom.getLabel() + " = " + lt1Value);
- }
- // AST
- genElementAST(atom);
- // matching
- genMatch(atom);
- genErrorCatchForElement(atom);
- // tack on tree cursor motion if doing a tree walker
- if (grammar instanceof TreeWalkerGrammar) {
- println("_t = _t.getNextSibling()");
- }
- }
- public void gen(TreeElement t) {
- // save AST cursor
- println("__t" + t.ID + " as AST " + " = _t");
- // If there is a label on the root, then assign that to the variable
- if (t.root.getLabel() != null) {
- println(t.root.getLabel() + " = (ASTNULL == _t) ? null : cast("
- + labeledElementASTType + ", _t)");
- }
- // check for invalid modifiers ! and ^ on tree element roots
- if (t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG) {
- antlrTool.error(
- "Suffixing a root node with '!' is not implemented",
- grammar.getFilename(), t.getLine(), t.getColumn());
- t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
- }
- if (t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET) {
- antlrTool
- .warning(
- "Suffixing a root node with '^' is redundant; already a root",
- grammar.getFilename(), t.getLine(), t.getColumn());
- t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
- }
- // Generate AST variables
- genElementAST(t.root);
- if (grammar.buildAST) {
- // Save the AST construction state
- println("__currentAST" + t.ID + " as ASTPair = currentAST.copy()");
- // Make the next item added a child of the TreeElement root
- println("currentAST.root = currentAST.child");
- println("currentAST.child = null");
- }
- // match root
- if (t.root instanceof WildcardElement) {
- println("raise MismatchedTokenException() if _t is null");
- } else {
- genMatch(t.root);
- }
- // move to list of children
- println("_t = _t.getFirstChild()");
- // walk list of children, generating code for each
- for (int i = 0; i < t.getAlternatives().size(); i++) {
- Alternative a = t.getAlternativeAt(i);
- AlternativeElement e = a.head;
- while (e != null) {
- e.generate();
- e = e.next;
- }
- }
- if (grammar.buildAST) {
- // restore the AST construction state to that just after the
- // tree root was added
- println("ASTPair.PutInstance(currentAST)");
- println("currentAST = __currentAST" + t.ID);
- }
- // restore AST cursor
- println("_t = __t" + t.ID);
- // move cursor to sibling of tree just parsed
- println("_t = _t.getNextSibling()");
- }
- /** Generate the tree-parser Boo file */
- public void gen(TreeWalkerGrammar g) throws IOException {
- // SAS: debugging stuff removed for now...
- setGrammar(g);
- if (!(grammar instanceof TreeWalkerGrammar)) {
- antlrTool.panic("Internal error generating tree-walker");
- }
- genBody(g);
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param wc
- * The wildcard element to generate
- */
- public void gen(WildcardElement wc) {
- // Variable assignment for labeled elements
- if (wc.getLabel() != null && syntacticPredLevel == 0) {
- println(wc.getLabel() + " = " + lt1Value);
- }
- // AST
- genElementAST(wc);
- // Match anything but EOF
- if (grammar instanceof TreeWalkerGrammar) {
- println("raise MismatchedTokenException() if _t is null");
- } else if (grammar instanceof LexerGrammar) {
- if (grammar instanceof LexerGrammar
- && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
- println("_saveIndex = text.Length");
- }
- println("matchNot(EOF/*_CHAR*/)");
- if (grammar instanceof LexerGrammar
- && (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
- println("text.Length = _saveIndex"); // kill text atom put in
- // buffer
- }
- } else {
- println("matchNot(" + getValueString(Token.EOF_TYPE) + ")");
- }
- // tack on tree cursor motion if doing a tree walker
- if (grammar instanceof TreeWalkerGrammar) {
- println("_t = _t.getNextSibling()");
- }
- }
- /**
- * Generate code for the given grammar element.
- *
- * @param blk
- * The (...)* block to generate
- */
- public void gen(ZeroOrMoreBlock blk) {
- if (DEBUG_CODE_GENERATOR)
- System.out.println("gen*(" + blk + ")");
- //println("block: // ( ... )*");
- //tabs++;
- genBlockPreamble(blk);
- String label;
- if (blk.getLabel() != null) {
- label = blk.getLabel();
- } else {
- label = "_loop" + blk.ID;
- }
- println("while true:");
- tabs++;
- // generate the init action for ()+ ()* inside the loop
- // this allows us to do usefull EOF checking...
- genBlockInitAction(blk);
- // Tell AST generation to build subrule result
- String saveCurrentASTResult = currentASTResult;
- if (blk.getLabel() != null) {
- currentASTResult = blk.getLabel();
- }
- boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
- // generate exit test if greedy set to false
- // and an alt is ambiguous with exit branch
- // or when lookahead derived purely from end-of-file
- // Lookahead analysis stops when end-of-file is hit,
- // returning set {epsilon}. Since {epsilon} is not
- // ambig with any real tokens, no error is reported
- // by deterministic() routines and we have to check
- // for the case where the lookahead depth didn't get
- // set to NONDETERMINISTIC (this only happens when the
- // FOLLOW contains real atoms + epsilon).
- boolean generateNonGreedyExitPath = false;
- int nonGreedyExitDepth = grammar.maxk;
- if (!blk.greedy && blk.exitLookaheadDepth <= grammar.maxk
- && blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) {
- generateNonGreedyExitPath = true;
- nonGreedyExitDepth = blk.exitLookaheadDepth;
- } else if (!blk.greedy
- && blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) {
- generateNonGreedyExitPath = true;
- }
- if (generateNonGreedyExitPath) {
- if (DEBUG_CODE_GENERATOR) {
- System.out.println("nongreedy (...)* loop; exit depth is "
- + blk.exitLookaheadDepth);
- }
- String predictExit = getLookaheadTestExpression(blk.exitCache,
- nonGreedyExitDepth);
- println("// nongreedy exit test");
- println("goto " + label + "_breakloop if " + predictExit);
- }
- BooBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
- genBlockFinish(howToFinish, "goto " + label + "_breakloop");
- tabs--;
- println(":" + label + "_breakloop");
- //tabs--;
- //println("// ( ... )*");
- // Restore previous AST generation
- currentASTResult = saveCurrentASTResult;
- }
- /**
- * Generate an alternative.
- *
- * @param alt
- * The alternative to generate
- * @param blk
- * The block to which the alternative belongs
- */
- protected void genAlt(Alternative alt, AlternativeBlock blk) {
- // Save the AST generation state, and set it to that of the alt
- boolean savegenAST = genAST;
- genAST = genAST && alt.getAutoGen();
- boolean oldsaveTest = saveText;
- saveText = saveText && alt.getAutoGen();
- // Reset the variable name map for the alternative
- Hashtable saveMap = treeVariableMap;
- treeVariableMap = new Hashtable();
- // Generate try block around the alt for error handling
- if (alt.exceptionSpec != null) {
- println("try: // for error handling");
- tabs++;
- }
- int generatedElements = 0;
- AlternativeElement elem = alt.head;
- while (!(elem instanceof BlockEndElement)) {
- elem.generate(); // alt can begin with anything. Ask target to
- // gen.
- ++generatedElements;
- elem = elem.next;
- }
-
- if (0 == generatedElements) {
- println("pass // 947");
- }
- if (genAST) {
- if (blk instanceof RuleBlock) {
- // Set the AST return value for the rule
- RuleBlock rblk = (RuleBlock) blk;
- if (usingCustomAST) {
- println(rblk.getRuleName() + "_AST = cast("
- + labeledElementASTType + ", currentAST.root)");
- } else {
- println(rblk.getRuleName() + "_AST = currentAST.root");
- }
- } else if (blk.getLabel() != null) {
- // ### future: also set AST value for labeled subrules.
- // println(blk.getLabel() + "_AST =
- // ("+labeledElementASTType+")currentAST.root;");
- antlrTool.warning("Labeled subrules not yet supported", grammar
- .getFilename(), blk.getLine(), blk.getColumn());
- }
- }
- if (alt.exceptionSpec != null) {
- // close try block
- tabs--;
- genErrorHandler(alt.exceptionSpec);
- }
- genAST = savegenAST;
- saveText = oldsaveTest;
- treeVariableMap = saveMap;
- }
- /**
- * Generate all the bitsets to be used in the parser or lexer Generate the
- * raw bitset data like "long _tokenSet1_data[] = {...};" and the BitSet
- * object declarations like "BitSet _tokenSet1 = new
- * BitSet(_tokenSet1_data);" Note that most languages do not support object
- * initialization inside a class definition, so other code-generators may
- * have to separate the bitset declarations from the initializations (e.g.,
- * put the initializations in the generated constructor instead).
- *
- * @param bitsetList
- * The list of bitsets to generate.
- * @param maxVocabulary
- * Ensure that each generated bitset can contain at least this
- * value.
- */
- protected void genBitsets(Vector bitsetList, int maxVocabulary) {
- println("");
- for (int i = 0; i < bitsetList.size(); i++) {
- BitSet p = (BitSet) bitsetList.elementAt(i);
- // Ensure that generated BitSet is large enough for vocabulary
- p.growToInclude(maxVocabulary);
- genBitSet(p, i);
- }
- }
- /**
- * Do something simple like: private static final long[] mk_tokenSet_0() {
- * long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L };
- * return data; } public static final BitSet _tokenSet_0 = new
- * BitSet(mk_tokenSet_0()); Or, for large bitsets, optimize init so ranges
- * are collapsed into loops. This is most useful for lexers using unicode.
- */
- private void genBitSet(BitSet p, int id) {
- // initialization data
- println("private static def mk_" + getBitsetName(id) + "() as (long):");
- tabs++;
- int n = p.lengthInLongWords();
- if (n < BITSET_OPTIMIZE_INIT_THRESHOLD) {
- println("data = (" + p.toStringOfWords() + ", )");
- } else {
- // will init manually, allocate space then set values
- println("data = array(long, " + n + ")");
- long[] elems = p.toPackedArray();
- for (int i = 0; i < elems.length;) {
- if ((i + 1) == elems.length || elems[i] != elems[i + 1]) {
- // last number or no run of numbers, just dump assignment
- println("data[" + i + "]=" + elems[i] + "L");
- i++;
- } else {
- // scan to find end of run
- int j;
- for (j = i + 1; j < elems.length && elems[j] == elems[i]; j++) {
- ;
- }
- // j-1 is last member of run
- println("i = " + i);
- println("while i<=" + (j - 1) + ":");
- ++tabs;
- println("data[i] = " + elems[i] + "L");
- println("++i");
- --tabs;
- i = j;
- }
- }
- }
- println("return data");
- tabs--;
- // BitSet object
- println("public static final " + getBitsetName(id)
- + " = BitSet(" + "mk_" + getBitsetName(id) + "()" + ")");
- }
- /**
- * Given the index of a bitset in the bitset list, generate a unique name.
- * Specific code-generators may want to override this if the language does
- * not allow '_' or numerals in identifiers.
- *
- * @param index
- * The index of the bitset in the bitset list.
- */
- protected String getBitsetName(int index) {
- return "tokenSet_" + index + "_";
- }
- /**
- * Generate the finish of a block, using a combination of the info returned
- * from genCommonBlock() and the action to perform when no alts were taken
- *
- * @param howToFinish
- * The return of genCommonBlock()
- * @param noViableAction
- * What to generate when no alt is taken
- */
- private void genBlockFinish(BooBlockFinishingInfo howToFinish,
- Runnable noViableAction) {
-
- boolean ifOrSwitch = (howToFinish.generatedAnIf || howToFinish.generatedSwitch);
-
- if (howToFinish.needAnErrorClause
- && ifOrSwitch) {
- if (howToFinish.generatedAnIf) {
- println("else:");
- }
- ++tabs;
- noViableAction.run();
- --tabs;
- }
- if (howToFinish.postscript != null) {
- println(howToFinish.postscript);
- }
-
- if (howToFinish.generatedSwitch) {
- --tabs;
- }
- }
-
- private void genBlockFinish(BooBlockFinishingInfo howToFinish, final String noViableAction) {
- genBlockFinish(howToFinish, new Runnable() {
- public void run() {
- println(noViableAction);
- }
- });
- }
- /**
- * Generate the init action for a block, which may be a RuleBlock or a plain
- * AlternativeBLock.
- *
- * @blk The block for which the preamble is to be generated.
- */
- protected void genBlockInitAction(AlternativeBlock blk) {
- // dump out init action
- if (blk.initAction != null) {
- printAction(processActionForSpecialSymbols(blk.initAction, blk
- .getLine(), currentRule, null));
- }
- }
- /**
- * Generate the header for a block, which may be a RuleBlock or a plain
- * AlternativeBLock. This generates any variable declarations and
- * syntactic-predicate-testing variables.
- *
- * @blk The block for which the preamble is to be generated.
- */
- protected void genBlockPreamble(AlternativeBlock blk) {
- // define labels for rule blocks.
- if (blk instanceof RuleBlock) {
- RuleBlock rblk = (RuleBlock) blk;
- if (rblk.labeledElements != null) {
- for (int i = 0; i < rblk.labeledElements.size(); i++) {
- AlternativeElement a = (AlternativeElement) rblk.labeledElements
- .elementAt(i);
- // System.out.println("looking at labeled element: "+a);
- // Variables for labeled rule refs and
- // subrules are different than variables for
- // grammar atoms. This test is a little tricky
- // because we want to get all rule refs and ebnf,
- // but not rule blocks or syntactic predicates
- if (a instanceof RuleRefElement
- || a instanceof AlternativeBlock
- && !(a instanceof RuleBlock)
- && !(a instanceof SynPredBlock)) {
- if (!(a instanceof RuleRefElement)
- && ((AlternativeBlock) a).not
- && analyzer.subruleCanBeInverted(
- ((AlternativeBlock) a),
- grammar instanceof LexerGrammar)) {
- // Special case for inverted subrules that
- // will be inlined. Treat these like
- // token or char literal references
- println(a.getLabel() + " as " + labeledElementType
- + " = " + labeledElementInit);
- if (grammar.buildAST) {
- genASTDeclaration(a);
- }
- } else {
- if (grammar.buildAST) {
- // Always gen AST variables for
- // labeled elements, even if the
- // element itself is marked with !
- genASTDeclaration(a);
- }
- if (grammar instanceof LexerGrammar) {
- println(a.getLabel() + " as IToken");
- }
- if (grammar instanceof TreeWalkerGrammar) {
- // always generate rule-ref variables
- // for tree walker
- println(a.getLabel() + " as " + labeledElementType
- + " = " + labeledElementInit);
- }
- }
- } else {
- // It is a token or literal reference. Generate the
- // correct variable type for this grammar
- println(a.getLabel() + " as " + labeledElementType + " = "
- + labeledElementInit);
- // In addition, generate *_AST variables if building
- // ASTs
- if (grammar.buildAST) {
- // println(labeledElementASTType+" " + a.getLabel()
- // + "_AST = null;");
- if (a instanceof GrammarAtom
- && ((GrammarAtom) a).getASTNodeType() != null) {
- GrammarAtom ga = (GrammarAtom) a;
- genASTDeclaration(a, ga.getASTNodeType());
- } else {
- genASTDeclaration(a);
- }
- }
- }
- }
- }
- }
- }
- public void genBody(LexerGrammar g) throws IOException {
- // SAS: moved output creation to method so a subclass can change
- // how the output is generated (for VAJ interface)
- setupOutput(grammar.getClassName());
- genAST = false; // no way to gen trees.
- saveText = true; // save consumed characters.
- tabs = 0;
- // Generate header common to all Boo output files
- genHeader();
- // Do not use printAction because we assume tabs==0
- println(behavior.getHeaderAction(""));
- // Generate the Boo namespace declaration (if specified)
- if (nameSpace != null)
- nameSpace.emitDeclarations(currentOutput);
- // Generate header specific to lexer Boo file
- println("// Generate header specific to lexer Boo file");
- println("import System");
- println("import System.IO.Stream as Stream");
- println("import System.IO.TextReader as TextReader");
- println("import System.Collections.Hashtable as Hashtable");
- println("import System.Collections.Comparer as Comparer");
- if (!(g.caseSensitiveLiterals)) {
- println("import System.Collections.CaseInsensitiveHashCodeProvider as CaseInsensitiveHashCodeProvider");
- println("import System.Collections.CaseInsensitiveComparer as CaseInsensitiveComparer");
- }
- println("");
- println("import antlr.TokenStreamException as TokenStreamException");
- println("import antlr.TokenStreamIOException as TokenStreamIOException");
- println("import antlr.TokenStreamRecognitionException as TokenStreamRecognitionException");
- println("import antlr.CharStreamException as CharStreamException");
- println("import antlr.CharStreamIOException as CharStreamIOException");
- println("import antlr.ANTLRException as ANTLRException");
- println("import antlr.CharScanner as CharScanner");
- println("import antlr.InputBuffer as InputBuffer");
- println("import antlr.ByteBuffer as ByteBuffer");
- println("import antlr.CharBuffer as CharBuffer");
- println("import antlr.Token as Token");
- println("import antlr.IToken as IToken");
- println("import antlr.CommonToken as CommonToken");
- println("import antlr.SemanticException as SemanticException");
- println("import antlr.RecognitionException as RecognitionException");
- println("import antlr.NoViableAltForCharException as NoViableAltForCharException");
- println("import antlr.MismatchedCharException as MismatchedCharException");
- println("import antlr.TokenStream as TokenStream");
- println("import antlr.LexerSharedInputState as LexerSharedInputState");
- println("import antlr.collections.impl.BitSet as BitSet");
- // Generate user-defined lexer file preamble
- println(grammar.preambleAction.getText());
- // Generate lexer class definition
- String sup = null;
- if (grammar.superClass != null) {
- sup = grammar.superClass;
- } else {
- sup = "antlr." + grammar.getSuperClass();
- }
- // print javadoc comment if any
- if (grammar.comment != null) {
- _println(grammar.comment);
- }
- Token tprefix = (Token) grammar.options.get("classHeaderPrefix");
- if (tprefix != null) {
- String p = StringUtils
- .stripFrontBack(tprefix.getText(), "\"", "\"");
- if (p != null) {
- print(p + " ");
- }
- }
- print("class " + grammar.getClassName() + "(" + sup);
- print(", TokenStream");
- Token tsuffix = (Token) grammar.options.get("classHeaderSuffix");
- if (tsuffix != null) {
- String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"",
- "\"");
- if (suffix != null) {
- print(", " + suffix); // must be an interface name for Boo
- }
- }
- println("):");
- tabs++;
- // Generate 'const' definitions for Token IDs
- genTokenDefinitions(grammar.tokenManager);
- // Generate user-defined lexer class members
- print(processActionForSpecialSymbols(grammar.classMemberAction
- .getText(), grammar.classMemberAction.getLine(), currentRule,
- null));
- //
- // Generate the constructor from InputStream, which in turn
- // calls the ByteBuffer constructor
- //
- println("def constructor(ins as Stream):");
- printSingleLineBlock("self(ByteBuffer(ins))");
- println("");
- //
- // Generate the constructor from Reader, which in turn
- // calls the CharBuffer constructor
- //
- println("def constructor(r as TextReader):");
- printSingleLineBlock("self(CharBuffer(r))");
- println("");
- println("def constructor(ib as InputBuffer):");
- // if debugging, wrap the input buffer in a debugger
- if (grammar.debuggingOutput)
- printSingleLineBlock("self(LexerSharedInputState(antlr.debug.DebuggingInputBuffer(ib)))");
- else
- printSingleLineBlock("self(LexerSharedInputState(ib))");
- println("");
- //
- // Generate the constructor from InputBuffer (char or byte)
- //
- println("def constructor(state as LexerSharedInputState):");
- ++tabs;
- println("super(state)");
- println("initialize()");
- tabs--;
- println("");
- // Generate the initialize function
- println("private def initialize():");
- tabs++;
- // if debugging, set up array variables and call user-overridable
- // debugging setup method
- if (grammar.debuggingOutput) {
- println("ruleNames = _ruleNames");
- println("semPredNames = _semPredNames");
- println("setupDebugging()");
- }
- // Generate the setting of various generated options.
- // These need to be before the literals since ANTLRHashString depends on
- // the casesensitive stuff.
- println("caseSensitiveLiterals = " + g.caseSensitiveLiterals);
- println("setCaseSensitive(" + g.caseSensitive + ")");
- // Generate the initialization of a hashtable
- // containing the string literals used in the lexer
- // The literals variable itself is in CharScanner
- if (g.caseSensitiveLiterals)
- println("literals = Hashtable(100, 0.4f, null, Comparer.Default)");
- else
- println("literals = Hashtable(100, 0.4f, CaseInsensitiveHashCodeProvider.Default, CaseInsensitiveComparer.Default)");
- Enumeration keys = grammar.tokenManager.getTokenSymbolKeys();
- while (keys.hasMoreElements()) {
- String key = (String) keys.nextElement();
- if (key.charAt(0) != '"') {
- continue;
- }
- TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
- if (sym instanceof StringLiteralSymbol) {
- StringLiteralSymbol s = (StringLiteralSymbol) sym;
- println("literals.Add(" + s.getId() + ", " + s.getTokenType()
- + ")");
- }
- }
- Enumeration ids;
- tabs--;
- // generate the rule name array for debugging
- if (grammar.debuggingOutput) {
- println("private static final _ruleNames = (");
- ids = grammar.rules.elements();
- int ruleNum = 0;
- while (ids.hasMoreElements()) {
- GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
- if (sym instanceof RuleSymbol)
- println(" \"" + ((RuleSymbol) sym).getId() + "\",");
- }
- println(")");
- }
- // Generate nextToken() rule.
- // nextToken() is a synthetic lexer rule that is the implicit OR of all
- // user-defined lexer rules.
- genNextToken();
- // Generate code for each rule in the lexer
- ids = grammar.rules.elements();
- int ruleNum = 0;
- while (ids.hasMoreElements()) {
- RuleSymbol sym = (RuleSymbol) ids.nextElement();
- // Don't generate the synthetic rules
- if (!sym.getId().equals("mnextToken")) {
- genRule(sym, false, ruleNum++, grammar.tokenManager);
- }
- exitIfError();
- }
- // Generate the semantic predicate map for debugging
- if (grammar.debuggingOutput)
- genSemPredMap();
- // Generate the bitsets used throughout the lexer
- genBitsets(bitsetsUsed, ((LexerGrammar) grammar).charVocabulary.size());
- println("");
- tabs--;
-
- // Generate the Boo namespace closures (if required)
- if (nameSpace != null)
- nameSpace.emitClosures(currentOutput);
- // Close the lexer output stream
- currentOutput.close();
- currentOutput = null;
- }
- public void genInitFactory(Grammar g) {
- if (g.buildAST) {
- // Generate the method to initialize an ASTFactory when we're
- // building AST's
- println("static def initializeASTFactory(factory as ASTFactory):");
- tabs++;
- println("factory.setMaxNodeType(" + g.tokenManager.maxTokenType()
- + ")");
- // Walk the token vocabulary and generate code to register every
- // TokenID->ASTNodeType
- // mapping specified in the tokens {...} section with the
- // ASTFactory.
- Vector v = g.tokenManager.getVocabulary();
- for (int i = 0; i < v.size(); i++) {
- String s = (String) v.elementAt(i);
- if (s != null) {
- TokenSymbol ts = g.tokenManager.getTokenSymbol(s);
- if (ts != null && ts.getASTNodeType() != null) {
- println("factory.setTokenTypeASTNodeType(" + s + ", \""
- + ts.getASTNodeType() + "\")");
- }
- }
- }
- tabs--;
- }
- }
- public void genBody(ParserGrammar g) throws IOException {
- // Open the output stream for the parser and set the currentOutput
- // SAS: moved file setup so subclass could do it (for VAJ interface)
- setupOutput(grammar.getClassName());
- genAST = grammar.buildAST;
- tabs = 0;
- // Generate the header common to all output files.
- genHeader();
- // Do not use printAction because we assume tabs==0
- println(behavior.getHeaderAction(""));
- // Generate the Boo namespace declaration (if specified)
- if (nameSpace != null)
- nameSpace.emitDeclarations(currentOutput);
- // Generate header for the parser
- println("// Generate the header common to all output files.");
- println("import System");
- println("");
- println("import antlr.TokenBuffer as TokenBuffer");
- println("import antlr.TokenStreamException as TokenStreamException");
- println("import antlr.TokenStreamIOException as TokenStreamIOException");
- println("import antlr.ANTLRException as ANTLRException");
- String qualifiedClassName = grammar.getSuperClass();
- String[] unqualifiedClassName = split(qualifiedClassName, ".");
- println("import "
- + "antlr." + qualifiedClassName
- + " as "
- + unqualifiedClassName[unqualifiedClassName.length - 1]);
- println("import antlr.Token as Token");
- println("import antlr.IToken as IToken");
- println("import antlr.TokenStream as TokenStream");
- println("import antlr.RecognitionException as RecognitionException");
- println("import antlr.NoViableAltException as NoViableAltException");
- println("import antlr.MismatchedTokenException as MismatchedTokenException");
- println("import antlr.SemanticException as SemanticException");
- println("import antlr.ParserSharedInputState as ParserSharedInputState");
- println("import antlr.collections.impl.BitSet as BitSet");
- if (genAST) {
- println("import antlr.collections.AST as AST");
- println("import antlr.ASTPair as ASTPair");
- println("import antlr.ASTFactory as ASTFactory");
- println("import antlr.collections.impl.ASTArray as ASTArray");
- }
- // Output the user-defined parser preamble
- println(grammar.preambleAction.getText());
- // Generate parser class definition
- String sup = null;
- if (grammar.superClass != null)
- sup = grammar.superClass;
- else
- sup = "antlr." + grammar.getSuperClass();
- // print javadoc comment if any
- if (grammar.comment != null) {
- _println(grammar.comment);
- }
- Token tprefix = (Token) grammar.options.get("classHeaderPrefix");
- if (tprefix != null) {
- String p = StringUtils
- .stripFrontBack(tprefix.getText(), "\"", "\"");
- if (p != null) {
- print(p + " ");
- }
- }
- print("class " + grammar.getClassName() + "(" + sup);
- Token tsuffix = (Token) grammar.options.get("classHeaderSuffix");
- if (tsuffix != null) {
- String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"",
- "\"");
- if (suffix != null)
- print(", " + suffix); // must be an interface name
- // for Boo
- }
- _println("):");
- tabs++;
- // Generate 'const' definitions for Token IDs
- genTokenDefinitions(grammar.tokenManager);
- // set up an array of all the rule names so the debugger can
- // keep track of them only by number -- less to store in tree...
- if (grammar.debuggingOutput) {
- println("private static final _ruleNames = (");
- tabs++;
- Enumeration ids = grammar.rules.elements();
- int ruleNum = 0;
- while (ids.hasMoreElements()) {
- GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
- if (sym instanceof RuleSymbol)
- println(" \"" + ((RuleSymbol) sym).getId() + "\",");
- }
- tabs--;
- println(")");
- }
- // Generate user-defined parser class members
- print(processActionForSpecialSymbols(grammar.classMemberAction
- .getText(), grammar.classMemberAction.getLine(), currentRule,
- null));
- // Generate parser class constructor from TokenBuffer
- println("");
- println("protected def initialize():");
- tabs++;
- println("tokenNames = tokenNames_");
- if (grammar.buildAST)
- println("initializeFactory()");
- // if debugging, set up arrays and call the user-overridable
- // debugging setup method
- if (grammar.debuggingOutput) {
- println("ruleNames = _ruleNames");
- println("semPredNames = _semPredNames");
- println("setupDebugging(tokenBuf)");
- }
- tabs--;
- println("");
- println("");
- println("protected def constructor(tokenBuf as TokenBuffer, k as int):");
- tabs++;
- println("super(tokenBuf, k)");
- println("initialize()");
- tabs--;
- println("");
- println("def constructor(tokenBuf as TokenBuffer):");
- printSingleLineBlock("self(tokenBuf, " + …
Large files files are truncated, but you can click here to view the full file