PageRenderTime 51ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 0ms

/Dependencies/boo/lib/antlr-2.7.5/antlr/MakeGrammar.java

https://github.com/w4x/boolangstudio
Java | 791 lines | 598 code | 86 blank | 107 comment | 118 complexity | 895cd56eee2e2a87606788eca947222b MD5 | raw file
Possible License(s): GPL-2.0
  1. package antlr;
  2. /* ANTLR Translator Generator
  3. * Project led by Terence Parr at http://www.jGuru.com
  4. * Software rights: http://www.antlr.org/license.html
  5. *
  6. * $Id: //depot/code/org.antlr/release/antlr-2.7.5/antlr/MakeGrammar.java#1 $
  7. */
  8. import antlr.collections.Stack;
  9. import antlr.collections.impl.LList;
  10. import antlr.collections.impl.Vector;
  11. public class MakeGrammar extends DefineGrammarSymbols {
  12. protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
  13. protected RuleRefElement lastRuleRef;
  14. protected RuleEndElement ruleEnd; // used if not nested
  15. protected RuleBlock ruleBlock; // points to block of current rule.
  16. protected int nested = 0; // nesting inside a subrule
  17. protected boolean grammarError = false;
  18. ExceptionSpec currentExceptionSpec = null;
  19. public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
  20. super(tool_, args_, analyzer_);
  21. }
  22. /** Abort the processing of a grammar (due to syntax errors) */
  23. public void abortGrammar() {
  24. String s = "unknown grammar";
  25. if (grammar != null) {
  26. s = grammar.getClassName();
  27. }
  28. tool.error("aborting grammar '" + s + "' due to errors");
  29. super.abortGrammar();
  30. }
  31. protected void addElementToCurrentAlt(AlternativeElement e) {
  32. e.enclosingRuleName = ruleBlock.ruleName;
  33. context().addAlternativeElement(e);
  34. }
  35. public void beginAlt(boolean doAutoGen_) {
  36. super.beginAlt(doAutoGen_);
  37. Alternative alt = new Alternative();
  38. alt.setAutoGen(doAutoGen_);
  39. context().block.addAlternative(alt);
  40. }
  41. public void beginChildList() {
  42. super.beginChildList();
  43. context().block.addAlternative(new Alternative());
  44. }
  45. /** Add an exception group to a rule (currently a no-op) */
  46. public void beginExceptionGroup() {
  47. super.beginExceptionGroup();
  48. if (!(context().block instanceof RuleBlock)) {
  49. tool.panic("beginExceptionGroup called outside of rule block");
  50. }
  51. }
  52. /** Add an exception spec to an exception group or rule block */
  53. public void beginExceptionSpec(Token label) {
  54. // Hack the label string a bit to remove leading/trailing space.
  55. if (label != null) {
  56. label.setText(StringUtils.stripFront(StringUtils.stripBack(label.getText(), " \n\r\t"), " \n\r\t"));
  57. }
  58. super.beginExceptionSpec(label);
  59. // Don't check for currentExceptionSpec!=null because syntax errors
  60. // may leave it set to something.
  61. currentExceptionSpec = new ExceptionSpec(label);
  62. }
  63. public void beginSubRule(Token label, Token start, boolean not) {
  64. super.beginSubRule(label, start, not);
  65. // we don't know what kind of subrule it is yet.
  66. // push a dummy one that will allow us to collect the
  67. // alternatives. Later, we'll switch to real object.
  68. blocks.push(new BlockContext());
  69. context().block = new AlternativeBlock(grammar, start, not);
  70. context().altNum = 0; // reset alternative number
  71. nested++;
  72. // create a final node to which the last elememt of each
  73. // alternative will point.
  74. context().blockEnd = new BlockEndElement(grammar);
  75. // make sure end node points to start of block
  76. context().blockEnd.block = context().block;
  77. labelElement(context().block, label);
  78. }
  79. public void beginTree(Token tok) throws SemanticException {
  80. if (!(grammar instanceof TreeWalkerGrammar)) {
  81. tool.error("Trees only allowed in TreeParser", grammar.getFilename(), tok.getLine(), tok.getColumn());
  82. throw new SemanticException("Trees only allowed in TreeParser");
  83. }
  84. super.beginTree(tok);
  85. blocks.push(new TreeBlockContext());
  86. context().block = new TreeElement(grammar, tok);
  87. context().altNum = 0; // reset alternative number
  88. }
  89. public BlockContext context() {
  90. if (blocks.height() == 0) {
  91. return null;
  92. }
  93. else {
  94. return (BlockContext)blocks.top();
  95. }
  96. }
  97. /**Used to build nextToken() for the lexer.
  98. * This builds a rule which has every "public" rule in the given Vector of
  99. * rules as it's alternate. Each rule ref generates a Token object.
  100. * @param g The Grammar that is being processed
  101. * @param lexRules A vector of lexer rules that will be used to create an alternate block.
  102. * @param rname The name of the resulting rule.
  103. */
  104. public static RuleBlock createNextTokenRule(Grammar g, Vector lexRules, String rname) {
  105. // create actual rule data structure
  106. RuleBlock rb = new RuleBlock(g, rname);
  107. rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
  108. RuleEndElement ruleEnd = new RuleEndElement(g);
  109. rb.setEndElement(ruleEnd);
  110. ruleEnd.block = rb;
  111. // Add an alternative for each element of the rules vector.
  112. for (int i = 0; i < lexRules.size(); i++) {
  113. RuleSymbol r = (RuleSymbol)lexRules.elementAt(i);
  114. if (!r.isDefined()) {
  115. g.antlrTool.error("Lexer rule " + r.id.substring(1) + " is not defined");
  116. }
  117. else {
  118. if (r.access.equals("public")) {
  119. Alternative alt = new Alternative(); // create alt we'll add to ref rule
  120. RuleBlock targetRuleBlock = r.getBlock();
  121. Vector targetRuleAlts = targetRuleBlock.getAlternatives();
  122. // collect a sem pred if only one alt and it's at the start;
  123. // simple, but faster to implement until real hoisting
  124. if ( targetRuleAlts!=null && targetRuleAlts.size()==1 ) {
  125. Alternative onlyAlt = (Alternative)targetRuleAlts.elementAt(0);
  126. if ( onlyAlt.semPred!=null ) {
  127. // ok, has sem pred, make this rule ref alt have a pred
  128. alt.semPred = onlyAlt.semPred;
  129. // REMOVE predicate from target rule??? NOPE, another
  130. // rule other than nextToken() might invoke it.
  131. }
  132. }
  133. // create a rule ref to lexer rule
  134. // the Token is a RULE_REF not a TOKEN_REF since the
  135. // conversion to mRulename has already taken place
  136. RuleRefElement rr =
  137. new RuleRefElement(g,
  138. new CommonToken(ANTLRTokenTypes.RULE_REF, r.getId()),
  139. GrammarElement.AUTO_GEN_NONE);
  140. rr.setLabel("theRetToken");
  141. rr.enclosingRuleName = "nextToken";
  142. rr.next = ruleEnd;
  143. alt.addElement(rr); // add rule ref to alt
  144. alt.setAutoGen(true); // keep text of elements
  145. rb.addAlternative(alt); // add alt to rule block
  146. r.addReference(rr); // track ref to this rule in rule blk
  147. }
  148. }
  149. }
  150. rb.setAutoGen(true); // keep text of elements
  151. rb.prepareForAnalysis();
  152. //System.out.println(rb);
  153. return rb;
  154. }
  155. /** Return block as if they had typed: "( rule )?" */
  156. private AlternativeBlock createOptionalRuleRef(String rule, Token start) {
  157. // Make the subrule
  158. AlternativeBlock blk = new AlternativeBlock(grammar, start, false);
  159. // Make sure rule is defined
  160. String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
  161. if (!grammar.isDefined(mrule)) {
  162. grammar.define(new RuleSymbol(mrule));
  163. }
  164. // Make the rule ref element
  165. // RK: fixme probably easier to abuse start token..
  166. Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
  167. t.setLine(start.getLine());
  168. t.setLine(start.getColumn());
  169. RuleRefElement rref =
  170. new RuleRefElement(grammar, t, GrammarElement.AUTO_GEN_NONE);
  171. rref.enclosingRuleName = ruleBlock.ruleName;
  172. // Make the end of block element
  173. BlockEndElement end = new BlockEndElement(grammar);
  174. end.block = blk; // end block points back to start of blk
  175. // Make an alternative, putting the rule ref into it
  176. Alternative alt = new Alternative(rref);
  177. alt.addElement(end); // last element in alt points to end of block
  178. // Add the alternative to this block
  179. blk.addAlternative(alt);
  180. // create an empty (optional) alt and add to blk
  181. Alternative optAlt = new Alternative();
  182. optAlt.addElement(end); // points immediately to end of block
  183. blk.addAlternative(optAlt);
  184. blk.prepareForAnalysis();
  185. return blk;
  186. }
  187. public void defineRuleName(Token r,
  188. String access,
  189. boolean ruleAutoGen,
  190. String docComment)
  191. throws SemanticException {
  192. // if ( Character.isUpperCase(r.getText().charAt(0)) ) {
  193. if (r.type == ANTLRTokenTypes.TOKEN_REF) {
  194. if (!(grammar instanceof LexerGrammar)) {
  195. tool.error("Lexical rule " + r.getText() +
  196. " defined outside of lexer",
  197. grammar.getFilename(), r.getLine(), r.getColumn());
  198. r.setText(r.getText().toLowerCase());
  199. }
  200. }
  201. else {
  202. if (grammar instanceof LexerGrammar) {
  203. tool.error("Lexical rule names must be upper case, '" + r.getText() +
  204. "' is not",
  205. grammar.getFilename(), r.getLine(), r.getColumn());
  206. r.setText(r.getText().toUpperCase());
  207. }
  208. }
  209. super.defineRuleName(r, access, ruleAutoGen, docComment);
  210. String id = r.getText();
  211. // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
  212. if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
  213. id = CodeGenerator.encodeLexerRuleName(id);
  214. }
  215. RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
  216. RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(), ruleAutoGen);
  217. // Lexer rules do not generate default error handling
  218. rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
  219. ruleBlock = rb;
  220. blocks.push(new BlockContext()); // enter new context
  221. context().block = rb;
  222. rs.setBlock(rb);
  223. ruleEnd = new RuleEndElement(grammar);
  224. rb.setEndElement(ruleEnd);
  225. nested = 0;
  226. }
  227. public void endAlt() {
  228. super.endAlt();
  229. if (nested == 0) { // all rule-level alts link to ruleEnd node
  230. addElementToCurrentAlt(ruleEnd);
  231. }
  232. else {
  233. addElementToCurrentAlt(context().blockEnd);
  234. }
  235. context().altNum++;
  236. }
  237. public void endChildList() {
  238. super.endChildList();
  239. // create a final node to which the last elememt of the single
  240. // alternative will point. Done for compatibility with analyzer.
  241. // Does NOT point to any block like alternative blocks because the
  242. // TreeElement is not a block. This is used only as a placeholder.
  243. BlockEndElement be = new BlockEndElement(grammar);
  244. be.block = context().block;
  245. addElementToCurrentAlt(be);
  246. }
  247. public void endExceptionGroup() {
  248. super.endExceptionGroup();
  249. }
  250. public void endExceptionSpec() {
  251. super.endExceptionSpec();
  252. if (currentExceptionSpec == null) {
  253. tool.panic("exception processing internal error -- no active exception spec");
  254. }
  255. if (context().block instanceof RuleBlock) {
  256. // Named rule
  257. ((RuleBlock)context().block).addExceptionSpec(currentExceptionSpec);
  258. }
  259. else {
  260. // It must be a plain-old alternative block
  261. if (context().currentAlt().exceptionSpec != null) {
  262. tool.error("Alternative already has an exception specification", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
  263. }
  264. else {
  265. context().currentAlt().exceptionSpec = currentExceptionSpec;
  266. }
  267. }
  268. currentExceptionSpec = null;
  269. }
  270. /** Called at the end of processing a grammar */
  271. public void endGrammar() {
  272. if (grammarError) {
  273. abortGrammar();
  274. }
  275. else {
  276. super.endGrammar();
  277. }
  278. }
  279. public void endRule(String rule) {
  280. super.endRule(rule);
  281. BlockContext ctx = (BlockContext)blocks.pop(); // remove scope
  282. // record the start of this block in the ending node
  283. ruleEnd.block = ctx.block;
  284. ruleEnd.block.prepareForAnalysis();
  285. //System.out.println(ctx.block);
  286. }
  287. public void endSubRule() {
  288. super.endSubRule();
  289. nested--;
  290. // remove subrule context from scope stack
  291. BlockContext ctx = (BlockContext)blocks.pop();
  292. AlternativeBlock block = ctx.block;
  293. // If the subrule is marked with ~, check that it is
  294. // a valid candidate for analysis
  295. if (
  296. block.not &&
  297. !(block instanceof SynPredBlock) &&
  298. !(block instanceof ZeroOrMoreBlock) &&
  299. !(block instanceof OneOrMoreBlock)
  300. ) {
  301. if (!analyzer.subruleCanBeInverted(block, grammar instanceof LexerGrammar)) {
  302. String newline = System.getProperty("line.separator");
  303. tool.error(
  304. "This subrule cannot be inverted. Only subrules of the form:" + newline +
  305. " (T1|T2|T3...) or" + newline +
  306. " ('c1'|'c2'|'c3'...)" + newline +
  307. "may be inverted (ranges are also allowed).",
  308. grammar.getFilename(),
  309. block.getLine(), block.getColumn()
  310. );
  311. }
  312. }
  313. // add the subrule as element if not a syn pred
  314. if (block instanceof SynPredBlock) {
  315. // record a reference to the recently-recognized syn pred in the
  316. // enclosing block.
  317. SynPredBlock synpred = (SynPredBlock)block;
  318. context().block.hasASynPred = true;
  319. context().currentAlt().synPred = synpred;
  320. grammar.hasSyntacticPredicate = true;
  321. synpred.removeTrackingOfRuleRefs(grammar);
  322. }
  323. else {
  324. addElementToCurrentAlt(block);
  325. }
  326. ctx.blockEnd.block.prepareForAnalysis();
  327. }
  328. public void endTree() {
  329. super.endTree();
  330. BlockContext ctx = (BlockContext)blocks.pop();
  331. addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt.
  332. }
  333. /** Remember that a major error occured in the grammar */
  334. public void hasError() {
  335. grammarError = true;
  336. }
  337. private void labelElement(AlternativeElement el, Token label) {
  338. if (label != null) {
  339. // Does this label already exist?
  340. for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
  341. AlternativeElement altEl = (AlternativeElement)ruleBlock.labeledElements.elementAt(i);
  342. String l = altEl.getLabel();
  343. if (l != null && l.equals(label.getText())) {
  344. tool.error("Label '" + label.getText() + "' has already been defined", grammar.getFilename(), label.getLine(), label.getColumn());
  345. return;
  346. }
  347. }
  348. // add this node to the list of labeled elements
  349. el.setLabel(label.getText());
  350. ruleBlock.labeledElements.appendElement(el);
  351. }
  352. }
  353. public void noAutoGenSubRule() {
  354. context().block.setAutoGen(false);
  355. }
  356. public void oneOrMoreSubRule() {
  357. if (context().block.not) {
  358. tool.error("'~' cannot be applied to (...)* subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
  359. }
  360. // create the right kind of object now that we know what that is
  361. // and switch the list of alternatives. Adjust the stack of blocks.
  362. // copy any init action also.
  363. OneOrMoreBlock b = new OneOrMoreBlock(grammar);
  364. setBlock(b, context().block);
  365. BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
  366. blocks.push(new BlockContext());
  367. context().block = b;
  368. context().blockEnd = old.blockEnd;
  369. context().blockEnd.block = b;
  370. }
  371. public void optionalSubRule() {
  372. if (context().block.not) {
  373. tool.error("'~' cannot be applied to (...)? subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
  374. }
  375. // convert (X)? -> (X|) so that we can ignore optional blocks altogether!
  376. // It already thinks that we have a simple subrule, just add option block.
  377. beginAlt(false);
  378. endAlt();
  379. }
  380. public void refAction(Token action) {
  381. super.refAction(action);
  382. context().block.hasAnAction = true;
  383. addElementToCurrentAlt(new ActionElement(grammar, action));
  384. }
  385. public void setUserExceptions(String thr) {
  386. ((RuleBlock)context().block).throwsSpec = thr;
  387. }
  388. // Only called for rule blocks
  389. public void refArgAction(Token action) {
  390. ((RuleBlock)context().block).argAction = action.getText();
  391. }
  392. public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
  393. if (!(grammar instanceof LexerGrammar)) {
  394. tool.error("Character literal only valid in lexer", grammar.getFilename(), lit.getLine(), lit.getColumn());
  395. return;
  396. }
  397. super.refCharLiteral(lit, label, inverted, autoGenType, lastInRule);
  398. CharLiteralElement cl = new CharLiteralElement((LexerGrammar)grammar, lit, inverted, autoGenType);
  399. // Generate a warning for non-lowercase ASCII when case-insensitive
  400. if (
  401. !((LexerGrammar)grammar).caseSensitive && cl.getType() < 128 &&
  402. Character.toLowerCase((char)cl.getType()) != (char)cl.getType()
  403. ) {
  404. tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
  405. }
  406. addElementToCurrentAlt(cl);
  407. labelElement(cl, label);
  408. // if ignore option is set, must add an optional call to the specified rule.
  409. String ignore = ruleBlock.getIgnoreRule();
  410. if (!lastInRule && ignore != null) {
  411. addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
  412. }
  413. }
  414. public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
  415. if (!(grammar instanceof LexerGrammar)) {
  416. tool.error("Character range only valid in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
  417. return;
  418. }
  419. int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
  420. int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
  421. if (rangeMax < rangeMin) {
  422. tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
  423. return;
  424. }
  425. // Generate a warning for non-lowercase ASCII when case-insensitive
  426. if (!((LexerGrammar)grammar).caseSensitive) {
  427. if (rangeMin < 128 && Character.toLowerCase((char)rangeMin) != (char)rangeMin) {
  428. tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t1.getLine(), t1.getColumn());
  429. }
  430. if (rangeMax < 128 && Character.toLowerCase((char)rangeMax) != (char)rangeMax) {
  431. tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t2.getLine(), t2.getColumn());
  432. }
  433. }
  434. super.refCharRange(t1, t2, label, autoGenType, lastInRule);
  435. CharRangeElement cr = new CharRangeElement((LexerGrammar)grammar, t1, t2, autoGenType);
  436. addElementToCurrentAlt(cr);
  437. labelElement(cr, label);
  438. // if ignore option is set, must add an optional call to the specified rule.
  439. String ignore = ruleBlock.getIgnoreRule();
  440. if (!lastInRule && ignore != null) {
  441. addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
  442. }
  443. }
  444. public void refTokensSpecElementOption(Token tok,
  445. Token option,
  446. Token value) {
  447. /*
  448. System.out.println("setting tokens spec option for "+tok.getText());
  449. System.out.println(option.getText()+","+value.getText());
  450. */
  451. TokenSymbol ts = (TokenSymbol)
  452. grammar.tokenManager.getTokenSymbol(tok.getText());
  453. if (ts == null) {
  454. tool.panic("cannot find " + tok.getText() + "in tokens {...}");
  455. }
  456. if (option.getText().equals("AST")) {
  457. ts.setASTNodeType(value.getText());
  458. }
  459. else {
  460. grammar.antlrTool.error("invalid tokens {...} element option:" +
  461. option.getText(),
  462. grammar.getFilename(),
  463. option.getLine(), option.getColumn());
  464. }
  465. }
  466. public void refElementOption(Token option, Token value) {
  467. /*
  468. System.out.println("setting option for "+context().currentElement());
  469. System.out.println(option.getText()+","+value.getText());
  470. */
  471. AlternativeElement e = context().currentElement();
  472. if (e instanceof StringLiteralElement ||
  473. e instanceof TokenRefElement ||
  474. e instanceof WildcardElement) {
  475. ((GrammarAtom)e).setOption(option, value);
  476. }
  477. else {
  478. tool.error("cannot use element option (" + option.getText() +
  479. ") for this kind of element",
  480. grammar.getFilename(), option.getLine(), option.getColumn());
  481. }
  482. }
  483. /** Add an exception handler to an exception spec */
  484. public void refExceptionHandler(Token exTypeAndName, Token action) {
  485. super.refExceptionHandler(exTypeAndName, action);
  486. if (currentExceptionSpec == null) {
  487. tool.panic("exception handler processing internal error");
  488. }
  489. currentExceptionSpec.addHandler(new ExceptionHandler(exTypeAndName, action));
  490. }
  491. public void refInitAction(Token action) {
  492. super.refAction(action);
  493. context().block.setInitAction(action.getText());
  494. }
  495. public void refMemberAction(Token act) {
  496. grammar.classMemberAction = act;
  497. }
  498. public void refPreambleAction(Token act) {
  499. super.refPreambleAction(act);
  500. }
  501. // Only called for rule blocks
  502. public void refReturnAction(Token returnAction) {
  503. if (grammar instanceof LexerGrammar) {
  504. String name = CodeGenerator.encodeLexerRuleName(((RuleBlock)context().block).getRuleName());
  505. RuleSymbol rs = (RuleSymbol)grammar.getSymbol(name);
  506. if (rs.access.equals("public")) {
  507. tool.warning("public Lexical rules cannot specify return type", grammar.getFilename(), returnAction.getLine(), returnAction.getColumn());
  508. return;
  509. }
  510. }
  511. ((RuleBlock)context().block).returnAction = returnAction.getText();
  512. }
  513. public void refRule(Token idAssign,
  514. Token r,
  515. Token label,
  516. Token args,
  517. int autoGenType) {
  518. // Disallow parser rule references in the lexer
  519. if (grammar instanceof LexerGrammar) {
  520. // if (!Character.isUpperCase(r.getText().charAt(0))) {
  521. if (r.type != ANTLRTokenTypes.TOKEN_REF) {
  522. tool.error("Parser rule " + r.getText() + " referenced in lexer");
  523. return;
  524. }
  525. if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
  526. tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), r.getLine(), r.getColumn());
  527. }
  528. }
  529. super.refRule(idAssign, r, label, args, autoGenType);
  530. lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
  531. if (args != null) {
  532. lastRuleRef.setArgs(args.getText());
  533. }
  534. if (idAssign != null) {
  535. lastRuleRef.setIdAssign(idAssign.getText());
  536. }
  537. addElementToCurrentAlt(lastRuleRef);
  538. String id = r.getText();
  539. // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
  540. if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
  541. id = CodeGenerator.encodeLexerRuleName(id);
  542. }
  543. // update symbol table so it knows what nodes reference the rule.
  544. RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
  545. rs.addReference(lastRuleRef);
  546. labelElement(lastRuleRef, label);
  547. }
  548. public void refSemPred(Token pred) {
  549. //System.out.println("refSemPred "+pred.getText());
  550. super.refSemPred(pred);
  551. //System.out.println("context().block: "+context().block);
  552. if (context().currentAlt().atStart()) {
  553. context().currentAlt().semPred = pred.getText();
  554. }
  555. else {
  556. ActionElement a = new ActionElement(grammar, pred);
  557. a.isSemPred = true;
  558. addElementToCurrentAlt(a);
  559. }
  560. //System.out.println("DONE refSemPred "+pred.getText());
  561. }
  562. public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
  563. super.refStringLiteral(lit, label, autoGenType, lastInRule);
  564. if (grammar instanceof TreeWalkerGrammar && autoGenType == GrammarElement.AUTO_GEN_CARET) {
  565. tool.error("^ not allowed in here for tree-walker", grammar.getFilename(), lit.getLine(), lit.getColumn());
  566. }
  567. StringLiteralElement sl = new StringLiteralElement(grammar, lit, autoGenType);
  568. // If case-insensitive, then check each char of the stirng literal
  569. if (grammar instanceof LexerGrammar && !((LexerGrammar)grammar).caseSensitive) {
  570. for (int i = 1; i < lit.getText().length() - 1; i++) {
  571. char c = lit.getText().charAt(i);
  572. if (c < 128 && Character.toLowerCase(c) != c) {
  573. tool.warning("Characters of string literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
  574. break;
  575. }
  576. }
  577. }
  578. addElementToCurrentAlt(sl);
  579. labelElement(sl, label);
  580. // if ignore option is set, must add an optional call to the specified rule.
  581. String ignore = ruleBlock.getIgnoreRule();
  582. if (!lastInRule && ignore != null) {
  583. addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
  584. }
  585. }
  586. public void refToken(Token idAssign, Token t, Token label, Token args,
  587. boolean inverted, int autoGenType, boolean lastInRule) {
  588. if (grammar instanceof LexerGrammar) {
  589. // In lexer, token references are really rule references
  590. if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
  591. tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
  592. }
  593. if (inverted) {
  594. tool.error("~TOKEN is not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
  595. }
  596. refRule(idAssign, t, label, args, autoGenType);
  597. // if ignore option is set, must add an optional call to the specified token rule.
  598. String ignore = ruleBlock.getIgnoreRule();
  599. if (!lastInRule && ignore != null) {
  600. addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
  601. }
  602. }
  603. else {
  604. // Cannot have token ref args or assignment outside of lexer
  605. if (idAssign != null) {
  606. tool.error("Assignment from token reference only allowed in lexer", grammar.getFilename(), idAssign.getLine(), idAssign.getColumn());
  607. }
  608. if (args != null) {
  609. tool.error("Token reference arguments only allowed in lexer", grammar.getFilename(), args.getLine(), args.getColumn());
  610. }
  611. super.refToken(idAssign, t, label, args, inverted, autoGenType, lastInRule);
  612. TokenRefElement te = new TokenRefElement(grammar, t, inverted, autoGenType);
  613. addElementToCurrentAlt(te);
  614. labelElement(te, label);
  615. }
  616. }
  617. public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
  618. if (grammar instanceof LexerGrammar) {
  619. tool.error("Token range not allowed in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
  620. return;
  621. }
  622. super.refTokenRange(t1, t2, label, autoGenType, lastInRule);
  623. TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2, autoGenType);
  624. if (tr.end < tr.begin) {
  625. tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
  626. return;
  627. }
  628. addElementToCurrentAlt(tr);
  629. labelElement(tr, label);
  630. }
  631. public void refTreeSpecifier(Token treeSpec) {
  632. context().currentAlt().treeSpecifier = treeSpec;
  633. }
  634. public void refWildcard(Token t, Token label, int autoGenType) {
  635. super.refWildcard(t, label, autoGenType);
  636. WildcardElement wc = new WildcardElement(grammar, t, autoGenType);
  637. addElementToCurrentAlt(wc);
  638. labelElement(wc, label);
  639. }
  640. /** Get ready to process a new grammar */
  641. public void reset() {
  642. super.reset();
  643. blocks = new LList();
  644. lastRuleRef = null;
  645. ruleEnd = null;
  646. ruleBlock = null;
  647. nested = 0;
  648. currentExceptionSpec = null;
  649. grammarError = false;
  650. }
  651. public void setArgOfRuleRef(Token argAction) {
  652. super.setArgOfRuleRef(argAction);
  653. lastRuleRef.setArgs(argAction.getText());
  654. }
  655. public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
  656. b.setAlternatives(src.getAlternatives());
  657. b.initAction = src.initAction;
  658. //b.lookaheadDepth = src.lookaheadDepth;
  659. b.label = src.label;
  660. b.hasASynPred = src.hasASynPred;
  661. b.hasAnAction = src.hasAnAction;
  662. b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
  663. b.generateAmbigWarnings = src.generateAmbigWarnings;
  664. b.line = src.line;
  665. b.greedy = src.greedy;
  666. b.greedySet = src.greedySet;
  667. }
  668. public void setRuleOption(Token key, Token value) {
  669. //((RuleBlock)context().block).setOption(key, value);
  670. ruleBlock.setOption(key, value);
  671. }
  672. public void setSubruleOption(Token key, Token value) {
  673. ((AlternativeBlock)context().block).setOption(key, value);
  674. }
  675. public void synPred() {
  676. if (context().block.not) {
  677. tool.error("'~' cannot be applied to syntactic predicate", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
  678. }
  679. // create the right kind of object now that we know what that is
  680. // and switch the list of alternatives. Adjust the stack of blocks.
  681. // copy any init action also.
  682. SynPredBlock b = new SynPredBlock(grammar);
  683. setBlock(b, context().block);
  684. BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
  685. blocks.push(new BlockContext());
  686. context().block = b;
  687. context().blockEnd = old.blockEnd;
  688. context().blockEnd.block = b;
  689. }
  690. public void zeroOrMoreSubRule() {
  691. if (context().block.not) {
  692. tool.error("'~' cannot be applied to (...)+ subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
  693. }
  694. // create the right kind of object now that we know what that is
  695. // and switch the list of alternatives. Adjust the stack of blocks.
  696. // copy any init action also.
  697. ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
  698. setBlock(b, context().block);
  699. BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
  700. blocks.push(new BlockContext());
  701. context().block = b;
  702. context().blockEnd = old.blockEnd;
  703. context().blockEnd.block = b;
  704. }
  705. }