KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > antlr > JavaCodeGenerator


1 package antlr;
2
3 /* ANTLR Translator Generator
4  * Project led by Terence Parr at http://www.jGuru.com
5  * Software rights: http://www.antlr.org/RIGHTS.html
6  *
7  * $Id: //depot/code/org.antlr/main/main/antlr/JavaCodeGenerator.java#22 $
8  */

9
10 import java.util.Enumeration JavaDoc;
11 import java.util.Hashtable JavaDoc;
12 import java.util.HashSet JavaDoc;
13
14 import antlr.collections.impl.BitSet;
15 import antlr.collections.impl.Vector;
16
17 import java.io.PrintWriter JavaDoc; //SAS: changed for proper text file io
18
import java.io.IOException JavaDoc;
19 import java.io.FileWriter JavaDoc;
20
21 /**Generate MyParser.java, MyLexer.java and MyParserTokenTypes.java */
22 public class JavaCodeGenerator extends CodeGenerator {
23     // non-zero if inside syntactic predicate generation
24
protected int syntacticPredLevel = 0;
25
26     // Are we generating ASTs (for parsers and tree parsers) right now?
27
protected boolean genAST = false;
28
29     // Are we saving the text consumed (for lexers) right now?
30
protected boolean saveText = false;
31
32     // Grammar parameters set up to handle different grammar classes.
33
// These are used to get instanceof tests out of code generation
34
String JavaDoc labeledElementType;
35     String JavaDoc labeledElementASTType;
36     String JavaDoc labeledElementInit;
37     String JavaDoc commonExtraArgs;
38     String JavaDoc commonExtraParams;
39     String JavaDoc commonLocalVars;
40     String JavaDoc lt1Value;
41     String JavaDoc exceptionThrown;
42     String JavaDoc throwNoViable;
43
44     /** Tracks the rule being generated. Used for mapTreeId */
45     RuleBlock currentRule;
46
47     /** Tracks the rule or labeled subrule being generated. Used for
48      AST generation. */

49     String JavaDoc currentASTResult;
50
51     /** Mapping between the ids used in the current alt, and the
52      * names of variables used to represent their AST values.
53      */

54     Hashtable JavaDoc treeVariableMap = new Hashtable JavaDoc();
55
56     /** Used to keep track of which AST variables have been defined in a rule
57      * (except for the #rule_name and #rule_name_in var's
58      */

59     HashSet JavaDoc declaredASTVariables = new HashSet JavaDoc();
60
61     /* Count of unnamed generated variables */
62     int astVarNumber = 1;
63
64     /** Special value used to mark duplicate in treeVariableMap */
65     protected static final String JavaDoc NONUNIQUE = new String JavaDoc();
66
67     public static final int caseSizeThreshold = 127; // ascii is max
68

69     private Vector semPreds;
70
71     /** Create a Java code-generator using the given Grammar.
72      * The caller must still call setTool, setBehavior, and setAnalyzer
73      * before generating code.
74      */

75     public JavaCodeGenerator() {
76         super();
77         charFormatter = new JavaCharFormatter();
78     }
79
80     /** Adds a semantic predicate string to the sem pred vector
81      These strings will be used to build an array of sem pred names
82      when building a debugging parser. This method should only be
83      called when the debug option is specified
84      */

85     protected int addSemPred(String JavaDoc predicate) {
86         semPreds.appendElement(predicate);
87         return semPreds.size() - 1;
88     }
89
90     public void exitIfError() {
91         if (antlrTool.hasError()) {
92             System.out.println("Exiting due to errors.");
93             System.exit(1);
94         }
95     }
96
97     /**Generate the parser, lexer, treeparser, and token types in Java */
98     public void gen() {
99         // Do the code generation
100
try {
101             // Loop over all grammars
102
Enumeration JavaDoc grammarIter = behavior.grammars.elements();
103             while (grammarIter.hasMoreElements()) {
104                 Grammar g = (Grammar)grammarIter.nextElement();
105                 // Connect all the components to each other
106
g.setGrammarAnalyzer(analyzer);
107                 g.setCodeGenerator(this);
108                 analyzer.setGrammar(g);
109                 // To get right overloading behavior across hetrogeneous grammars
110
setupGrammarParameters(g);
111                 g.generate();
112                 // print out the grammar with lookahead sets (and FOLLOWs)
113
// System.out.print(g.toString());
114
exitIfError();
115             }
116
117             // Loop over all token managers (some of which are lexers)
118
Enumeration JavaDoc tmIter = behavior.tokenManagers.elements();
119             while (tmIter.hasMoreElements()) {
120                 TokenManager tm = (TokenManager)tmIter.nextElement();
121                 if (!tm.isReadOnly()) {
122                     // Write the token manager tokens as Java
123
// this must appear before genTokenInterchange so that
124
// labels are set on string literals
125
genTokenTypes(tm);
126                     // Write the token manager tokens as plain text
127
genTokenInterchange(tm);
128                 }
129                 exitIfError();
130             }
131         }
132         catch (IOException JavaDoc e) {
133             System.out.println(e.getMessage());
134         }
135     }
136
137     /** Generate code for the given grammar element.
138      * @param blk The {...} action to generate
139      */

140     public void gen(ActionElement action) {
141         if (DEBUG_CODE_GENERATOR) System.out.println("genAction(" + action + ")");
142         if (action.isSemPred) {
143             genSemPred(action.actionText, action.line);
144         }
145         else {
146             if (grammar.hasSyntacticPredicate) {
147                 println("if ( inputState.guessing==0 ) {");
148                 tabs++;
149             }
150
151             ActionTransInfo tInfo = new ActionTransInfo();
152             String JavaDoc actionStr = processActionForTreeSpecifiers(action.actionText, action.getLine(), currentRule, tInfo);
153
154             if (tInfo.refRuleRoot != null) {
155                 // Somebody referenced "#rule", make sure translated var is valid
156
// assignment to #rule is left as a ref also, meaning that assignments
157
// with no other refs like "#rule = foo();" still forces this code to be
158
// generated (unnecessarily).
159
println(tInfo.refRuleRoot + " = (" + labeledElementASTType + ")currentAST.root;");
160             }
161
162             // dump the translated action
163
printAction(actionStr);
164
165             if (tInfo.assignToRoot) {
166                 // Somebody did a "#rule=", reset internal currentAST.root
167
println("currentAST.root = " + tInfo.refRuleRoot + ";");
168                 // reset the child pointer too to be last sibling in sibling list
169
println("currentAST.child = " + tInfo.refRuleRoot + "!=null &&" + tInfo.refRuleRoot + ".getFirstChild()!=null ?");
170                 tabs++;
171                 println(tInfo.refRuleRoot + ".getFirstChild() : " + tInfo.refRuleRoot + ";");
172                 tabs--;
173                 println("currentAST.advanceChildToEnd();");
174             }
175
176             if (grammar.hasSyntacticPredicate) {
177                 tabs--;
178                 println("}");
179             }
180         }
181     }
182
183     /** Generate code for the given grammar element.
184      * @param blk The "x|y|z|..." block to generate
185      */

186     public void gen(AlternativeBlock blk) {
187         if (DEBUG_CODE_GENERATOR) System.out.println("gen(" + blk + ")");
188         println("{");
189         genBlockPreamble(blk);
190         genBlockInitAction(blk);
191
192         // Tell AST generation to build subrule result
193
String JavaDoc saveCurrentASTResult = currentASTResult;
194         if (blk.getLabel() != null) {
195             currentASTResult = blk.getLabel();
196         }
197
198         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
199
200         JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
201         genBlockFinish(howToFinish, throwNoViable);
202
203         println("}");
204
205         // Restore previous AST generation
206
currentASTResult = saveCurrentASTResult;
207     }
208
209     /** Generate code for the given grammar element.
210      * @param blk The block-end element to generate. Block-end
211      * elements are synthesized by the grammar parser to represent
212      * the end of a block.
213      */

214     public void gen(BlockEndElement end) {
215         if (DEBUG_CODE_GENERATOR) System.out.println("genRuleEnd(" + end + ")");
216     }
217
218     /** Generate code for the given grammar element.
219      * @param blk The character literal reference to generate
220      */

221     public void gen(CharLiteralElement atom) {
222         if (DEBUG_CODE_GENERATOR) System.out.println("genChar(" + atom + ")");
223
224         if (atom.getLabel() != null) {
225             println(atom.getLabel() + " = " + lt1Value + ";");
226         }
227
228         boolean oldsaveText = saveText;
229         saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE;
230         genMatch(atom);
231         saveText = oldsaveText;
232     }
233
234     /** Generate code for the given grammar element.
235      * @param blk The character-range reference to generate
236      */

237     public void gen(CharRangeElement r) {
238         if (r.getLabel() != null && syntacticPredLevel == 0) {
239             println(r.getLabel() + " = " + lt1Value + ";");
240         }
241         boolean flag = ( grammar instanceof LexerGrammar &&
242             ( !saveText ||
243             r.getAutoGenType() ==
244             GrammarElement.AUTO_GEN_BANG ) );
245         if (flag) {
246             println("_saveIndex=text.length();");
247         }
248
249         println("matchRange(" + r.beginText + "," + r.endText + ");");
250
251         if (flag) {
252             println("text.setLength(_saveIndex);");
253         }
254     }
255
256     /** Generate the lexer Java file */
257     public void gen(LexerGrammar g) throws IOException JavaDoc {
258         // If debugging, create a new sempred vector for this grammar
259
if (g.debuggingOutput)
260             semPreds = new Vector();
261
262         setGrammar(g);
263         if (!(grammar instanceof LexerGrammar)) {
264             antlrTool.panic("Internal error generating lexer");
265         }
266
267         // SAS: moved output creation to method so a subclass can change
268
// how the output is generated (for VAJ interface)
269
setupOutput(grammar.getClassName());
270
271         genAST = false; // no way to gen trees.
272
saveText = true; // save consumed characters.
273

274         tabs = 0;
275
276         // Generate header common to all Java output files
277
genHeader();
278         // Do not use printAction because we assume tabs==0
279
println(behavior.getHeaderAction(""));
280
281         // Generate header specific to lexer Java file
282
// println("import java.io.FileInputStream;");
283
println("import java.io.InputStream;");
284         println("import antlr.TokenStreamException;");
285         println("import antlr.TokenStreamIOException;");
286         println("import antlr.TokenStreamRecognitionException;");
287         println("import antlr.CharStreamException;");
288         println("import antlr.CharStreamIOException;");
289         println("import antlr.ANTLRException;");
290         println("import java.io.Reader;");
291         println("import java.util.Hashtable;");
292         println("import antlr." + grammar.getSuperClass() + ";");
293         println("import antlr.InputBuffer;");
294         println("import antlr.ByteBuffer;");
295         println("import antlr.CharBuffer;");
296         println("import antlr.Token;");
297         println("import antlr.CommonToken;");
298         println("import antlr.RecognitionException;");
299         println("import antlr.NoViableAltForCharException;");
300         println("import antlr.MismatchedCharException;");
301         println("import antlr.TokenStream;");
302         println("import antlr.ANTLRHashString;");
303         println("import antlr.LexerSharedInputState;");
304         println("import antlr.collections.impl.BitSet;");
305         println("import antlr.SemanticException;");
306
307         // Generate user-defined lexer file preamble
308
println(grammar.preambleAction.getText());
309
310         // Generate lexer class definition
311
String JavaDoc sup = null;
312         if (grammar.superClass != null) {
313             sup = grammar.superClass;
314         }
315         else {
316             sup = "antlr." + grammar.getSuperClass();
317         }
318
319         // print javadoc comment if any
320
if (grammar.comment != null) {
321             _println(grammar.comment);
322         }
323
324         print("public class " + grammar.getClassName() + " extends " + sup);
325         println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix + ", TokenStream");
326         Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
327         if (tsuffix != null) {
328             String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
329             if (suffix != null) {
330                 print(", " + suffix); // must be an interface name for Java
331
}
332         }
333         println(" {");
334
335         // Generate user-defined lexer class members
336
print(
337             processActionForTreeSpecifiers(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
338         );
339
340         //
341
// Generate the constructor from InputStream, which in turn
342
// calls the ByteBuffer constructor
343
//
344
println("public " + grammar.getClassName() + "(InputStream in) {");
345         tabs++;
346         println("this(new ByteBuffer(in));");
347         tabs--;
348         println("}");
349
350         //
351
// Generate the constructor from Reader, which in turn
352
// calls the CharBuffer constructor
353
//
354
println("public " + grammar.getClassName() + "(Reader in) {");
355         tabs++;
356         println("this(new CharBuffer(in));");
357         tabs--;
358         println("}");
359
360         println("public " + grammar.getClassName() + "(InputBuffer ib) {");
361         tabs++;
362         // if debugging, wrap the input buffer in a debugger
363
if (grammar.debuggingOutput)
364             println("this(new LexerSharedInputState(new antlr.debug.DebuggingInputBuffer(ib)));");
365         else
366             println("this(new LexerSharedInputState(ib));");
367         tabs--;
368         println("}");
369
370         //
371
// Generate the constructor from InputBuffer (char or byte)
372
//
373
println("public " + grammar.getClassName() + "(LexerSharedInputState state) {");
374         tabs++;
375
376         println("super(state);");
377         // if debugging, set up array variables and call user-overridable
378
// debugging setup method
379
if (grammar.debuggingOutput) {
380             println(" ruleNames = _ruleNames;");
381             println(" semPredNames = _semPredNames;");
382             println(" setupDebugging();");
383         }
384
385         // Generate the setting of various generated options.
386
// These need to be before the literals since ANTLRHashString depends on
387
// the casesensitive stuff.
388
println("caseSensitiveLiterals = " + g.caseSensitiveLiterals + ";");
389         println("setCaseSensitive(" + g.caseSensitive + ");");
390
391         // Generate the initialization of a hashtable
392
// containing the string literals used in the lexer
393
// The literals variable itself is in CharScanner
394
println("literals = new Hashtable();");
395         Enumeration JavaDoc keys = grammar.tokenManager.getTokenSymbolKeys();
396         while (keys.hasMoreElements()) {
397             String JavaDoc key = (String JavaDoc)keys.nextElement();
398             if (key.charAt(0) != '"') {
399                 continue;
400             }
401             TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
402             if (sym instanceof StringLiteralSymbol) {
403                 StringLiteralSymbol s = (StringLiteralSymbol)sym;
404                 println("literals.put(new ANTLRHashString(" + s.getId() + ", this), new Integer(" + s.getTokenType() + "));");
405             }
406         }
407         tabs--;
408
409         Enumeration JavaDoc ids;
410         println("}");
411
412         // generate the rule name array for debugging
413
if (grammar.debuggingOutput) {
414             println("private static final String _ruleNames[] = {");
415
416             ids = grammar.rules.elements();
417             int ruleNum = 0;
418             while (ids.hasMoreElements()) {
419                 GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
420                 if (sym instanceof RuleSymbol)
421                     println(" \"" + ((RuleSymbol)sym).getId() + "\",");
422             }
423             println("};");
424         }
425
426         // Generate nextToken() rule.
427
// nextToken() is a synthetic lexer rule that is the implicit OR of all
428
// user-defined lexer rules.
429
genNextToken();
430
431         // Generate code for each rule in the lexer
432
ids = grammar.rules.elements();
433         int ruleNum = 0;
434         while (ids.hasMoreElements()) {
435             RuleSymbol sym = (RuleSymbol)ids.nextElement();
436             // Don't generate the synthetic rules
437
if (!sym.getId().equals("mnextToken")) {
438                 genRule(sym, false, ruleNum++);
439             }
440             exitIfError();
441         }
442
443         // Generate the semantic predicate map for debugging
444
if (grammar.debuggingOutput)
445             genSemPredMap();
446
447         // Generate the bitsets used throughout the lexer
448
genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
449
450         println("");
451         println("}");
452
453         // Close the lexer output stream
454
currentOutput.close();
455         currentOutput = null;
456     }
457
458     /** Generate code for the given grammar element.
459      * @param blk The (...)+ block to generate
460      */

461     public void gen(OneOrMoreBlock blk) {
462         if (DEBUG_CODE_GENERATOR) System.out.println("gen+(" + blk + ")");
463         String JavaDoc label;
464         String JavaDoc cnt;
465         println("{");
466         genBlockPreamble(blk);
467         if (blk.getLabel() != null) {
468             cnt = "_cnt_" + blk.getLabel();
469         }
470         else {
471             cnt = "_cnt" + blk.ID;
472         }
473         println("int " + cnt + "=0;");
474         if (blk.getLabel() != null) {
475             label = blk.getLabel();
476         }
477         else {
478             label = "_loop" + blk.ID;
479         }
480         println(label + ":");
481         println("do {");
482         tabs++;
483         // generate the init action for ()+ ()* inside the loop
484
// this allows us to do usefull EOF checking...
485
genBlockInitAction(blk);
486
487         // Tell AST generation to build subrule result
488
String JavaDoc saveCurrentASTResult = currentASTResult;
489         if (blk.getLabel() != null) {
490             currentASTResult = blk.getLabel();
491         }
492
493         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
494
495         // generate exit test if greedy set to false
496
// and an alt is ambiguous with exit branch
497
// or when lookahead derived purely from end-of-file
498
// Lookahead analysis stops when end-of-file is hit,
499
// returning set {epsilon}. Since {epsilon} is not
500
// ambig with any real tokens, no error is reported
501
// by deterministic() routines and we have to check
502
// for the case where the lookahead depth didn't get
503
// set to NONDETERMINISTIC (this only happens when the
504
// FOLLOW contains real atoms + epsilon).
505
boolean generateNonGreedyExitPath = false;
506         int nonGreedyExitDepth = grammar.maxk;
507
508         if (!blk.greedy &&
509             blk.exitLookaheadDepth <= grammar.maxk &&
510             blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) {
511             generateNonGreedyExitPath = true;
512             nonGreedyExitDepth = blk.exitLookaheadDepth;
513         }
514         else if (!blk.greedy &&
515             blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) {
516             generateNonGreedyExitPath = true;
517         }
518
519         // generate exit test if greedy set to false
520
// and an alt is ambiguous with exit branch
521
if (generateNonGreedyExitPath) {
522             if (DEBUG_CODE_GENERATOR) {
523                 System.out.println("nongreedy (...)+ loop; exit depth is " +
524                                    blk.exitLookaheadDepth);
525             }
526             String JavaDoc predictExit =
527                 getLookaheadTestExpression(blk.exitCache,
528                                            nonGreedyExitDepth);
529             println("// nongreedy exit test");
530             println("if ( " + cnt + ">=1 && " + predictExit + ") break " + label + ";");
531         }
532
533         JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
534         genBlockFinish(
535             howToFinish,
536             "if ( " + cnt + ">=1 ) { break " + label + "; } else {" + throwNoViable + "}"
537         );
538
539         println(cnt + "++;");
540         tabs--;
541         println("} while (true);");
542         println("}");
543
544         // Restore previous AST generation
545
currentASTResult = saveCurrentASTResult;
546     }
547
548     /** Generate the parser Java file */
549     public void gen(ParserGrammar g) throws IOException JavaDoc {
550
551         // if debugging, set up a new vector to keep track of sempred
552
// strings for this grammar
553
if (g.debuggingOutput)
554             semPreds = new Vector();
555
556         setGrammar(g);
557         if (!(grammar instanceof ParserGrammar)) {
558             antlrTool.panic("Internal error generating parser");
559         }
560
561         // Open the output stream for the parser and set the currentOutput
562
// SAS: moved file setup so subclass could do it (for VAJ interface)
563
setupOutput(grammar.getClassName());
564
565         genAST = grammar.buildAST;
566
567         tabs = 0;
568
569         // Generate the header common to all output files.
570
genHeader();
571         // Do not use printAction because we assume tabs==0
572
println(behavior.getHeaderAction(""));
573
574         // Generate header for the parser
575
println("import antlr.TokenBuffer;");
576         println("import antlr.TokenStreamException;");
577         println("import antlr.TokenStreamIOException;");
578         println("import antlr.ANTLRException;");
579         println("import antlr." + grammar.getSuperClass() + ";");
580         println("import antlr.Token;");
581         println("import antlr.TokenStream;");
582         println("import antlr.RecognitionException;");
583         println("import antlr.NoViableAltException;");
584         println("import antlr.MismatchedTokenException;");
585         println("import antlr.SemanticException;");
586         println("import antlr.ParserSharedInputState;");
587         println("import antlr.collections.impl.BitSet;");
588         if ( genAST ) {
589             println("import antlr.collections.AST;");
590             println("import antlr.ASTPair;");
591             println("import antlr.collections.impl.ASTArray;");
592         }
593
594         // Output the user-defined parser preamble
595
println(grammar.preambleAction.getText());
596
597         // Generate parser class definition
598
String JavaDoc sup = null;
599         if (grammar.superClass != null)
600             sup = grammar.superClass;
601         else
602             sup = "antlr." + grammar.getSuperClass();
603
604         // print javadoc comment if any
605
if (grammar.comment != null) {
606             _println(grammar.comment);
607         }
608
609         println("public class " + grammar.getClassName() + " extends " + sup);
610         println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
611
612         Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
613         if (tsuffix != null) {
614             String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
615             if (suffix != null)
616                 print(", " + suffix); // must be an interface name for Java
617
}
618         println(" {");
619
620         // set up an array of all the rule names so the debugger can
621
// keep track of them only by number -- less to store in tree...
622
if (grammar.debuggingOutput) {
623             println("private static final String _ruleNames[] = {");
624
625             Enumeration JavaDoc ids = grammar.rules.elements();
626             int ruleNum = 0;
627             while (ids.hasMoreElements()) {
628                 GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
629                 if (sym instanceof RuleSymbol)
630                     println(" \"" + ((RuleSymbol)sym).getId() + "\",");
631             }
632             println("};");
633         }
634
635         // Generate user-defined parser class members
636
print(
637             processActionForTreeSpecifiers(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
638         );
639
640         // Generate parser class constructor from TokenBuffer
641
println("");
642         println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) {");
643         println(" super(tokenBuf,k);");
644         println(" tokenNames = _tokenNames;");
645         // if debugging, set up arrays and call the user-overridable
646
// debugging setup method
647
if (grammar.debuggingOutput) {
648             println(" ruleNames = _ruleNames;");
649             println(" semPredNames = _semPredNames;");
650             println(" setupDebugging(tokenBuf);");
651         }
652         println("}");
653         println("");
654
655         println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) {");
656         println(" this(tokenBuf," + grammar.maxk + ");");
657         println("}");
658         println("");
659
660         // Generate parser class constructor from TokenStream
661
println("protected " + grammar.getClassName() + "(TokenStream lexer, int k) {");
662         println(" super(lexer,k);");
663         println(" tokenNames = _tokenNames;");
664
665         // if debugging, set up arrays and call the user-overridable
666
// debugging setup method
667
if (grammar.debuggingOutput) {
668             println(" ruleNames = _ruleNames;");
669             println(" semPredNames = _semPredNames;");
670             println(" setupDebugging(lexer);");
671         }
672         println("}");
673         println("");
674
675         println("public " + grammar.getClassName() + "(TokenStream lexer) {");
676         println(" this(lexer," + grammar.maxk + ");");
677         println("}");
678         println("");
679
680         println("public " + grammar.getClassName() + "(ParserSharedInputState state) {");
681         println(" super(state," + grammar.maxk + ");");
682         println(" tokenNames = _tokenNames;");
683         println("}");
684         println("");
685
686         // Generate code for each rule in the grammar
687
Enumeration JavaDoc ids = grammar.rules.elements();
688         int ruleNum = 0;
689         while (ids.hasMoreElements()) {
690             GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
691             if (sym instanceof RuleSymbol) {
692                 RuleSymbol rs = (RuleSymbol)sym;
693                 genRule(rs, rs.references.size() == 0, ruleNum++);
694             }
695             exitIfError();
696         }
697
698         // Generate the token names
699
genTokenStrings();
700
701         // Generate the bitsets used throughout the grammar
702
genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
703
704         // Generate the semantic predicate map for debugging
705
if (grammar.debuggingOutput)
706             genSemPredMap();
707
708         // Close class definition
709
println("");
710         println("}");
711
712         // Close the parser output stream
713
currentOutput.close();
714         currentOutput = null;
715     }
716
717     /** Generate code for the given grammar element.
718      * @param blk The rule-reference to generate
719      */

720     public void gen(RuleRefElement rr) {
721         if (DEBUG_CODE_GENERATOR) System.out.println("genRR(" + rr + ")");
722         RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
723         if (rs == null || !rs.isDefined()) {
724             // Is this redundant???
725
antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn());
726             return;
727         }
728         if (!(rs instanceof RuleSymbol)) {
729             // Is this redundant???
730
antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn());
731             return;
732         }
733
734         genErrorTryForElement(rr);
735
736         // AST value for labeled rule refs in tree walker.
737
// This is not AST construction; it is just the input tree node value.
738
if (grammar instanceof TreeWalkerGrammar &&
739             rr.getLabel() != null &&
740             syntacticPredLevel == 0) {
741             println(rr.getLabel() + " = _t==ASTNULL ? null : " + lt1Value + ";");
742         }
743
744         // if in lexer and ! on rule ref or alt or rule, save buffer index to kill later
745
if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
746             println("_saveIndex=text.length();");
747         }
748
749         // Process return value assignment if any
750
printTabs();
751         if (rr.idAssign != null) {
752             // Warn if the rule has no return type
753
if (rs.block.returnAction == null) {
754                 antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn());
755             }
756             _print(rr.idAssign + "=");
757         }
758         else {
759             // Warn about return value if any, but not inside syntactic predicate
760
if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) {
761                 antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn());
762             }
763         }
764
765         // Call the rule
766
GenRuleInvocation(rr);
767
768         // if in lexer and ! on element or alt or rule, save buffer index to kill later
769
if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
770             println("text.setLength(_saveIndex);");
771         }
772
773         // if not in a syntactic predicate
774
if (syntacticPredLevel == 0) {
775             boolean doNoGuessTest = (
776                 grammar.hasSyntacticPredicate &&
777                 (
778                 grammar.buildAST && rr.getLabel() != null ||
779                 (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
780                 )
781                 );
782             if (doNoGuessTest) {
783                 // println("if (inputState.guessing==0) {");
784
// tabs++;
785
}
786
787             if (grammar.buildAST && rr.getLabel() != null) {
788                 // always gen variable for rule return on labeled rules
789
println(rr.getLabel() + "_AST = (" + labeledElementASTType + ")returnAST;");
790             }
791             if (genAST) {
792                 switch (rr.getAutoGenType()) {
793                     case GrammarElement.AUTO_GEN_NONE:
794                         // println("theASTFactory.addASTChild(currentAST, returnAST);");
795
println("astFactory.addASTChild(currentAST, returnAST);");
796                         break;
797                     case GrammarElement.AUTO_GEN_CARET:
798                         antlrTool.error("Internal: encountered ^ after rule reference");
799                         break;
800                     default:
801                         break;
802                 }
803             }
804
805             // if a lexer and labeled, Token label defined at rule level, just set it here
806
if (grammar instanceof LexerGrammar && rr.getLabel() != null) {
807                 println(rr.getLabel() + "=_returnToken;");
808             }
809
810             if (doNoGuessTest) {
811                 // tabs--;
812
// println("}");
813
}
814         }
815         genErrorCatchForElement(rr);
816     }
817
818     /** Generate code for the given grammar element.
819      * @param blk The string-literal reference to generate
820      */

821     public void gen(StringLiteralElement atom) {
822         if (DEBUG_CODE_GENERATOR) System.out.println("genString(" + atom + ")");
823
824         // Variable declarations for labeled elements
825
if (atom.getLabel() != null && syntacticPredLevel == 0) {
826             println(atom.getLabel() + " = " + lt1Value + ";");
827         }
828
829         // AST
830
genElementAST(atom);
831
832         // is there a bang on the literal?
833
boolean oldsaveText = saveText;
834         saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE;
835
836         // matching
837
genMatch(atom);
838
839         saveText = oldsaveText;
840
841         // tack on tree cursor motion if doing a tree walker
842
if (grammar instanceof TreeWalkerGrammar) {
843             println("_t = _t.getNextSibling();");
844         }
845     }
846
847     /** Generate code for the given grammar element.
848      * @param blk The token-range reference to generate
849      */

850     public void gen(TokenRangeElement r) {
851         genErrorTryForElement(r);
852         if (r.getLabel() != null && syntacticPredLevel == 0) {
853             println(r.getLabel() + " = " + lt1Value + ";");
854         }
855
856         // AST
857
genElementAST(r);
858
859         // match
860
println("matchRange(" + r.beginText + "," + r.endText + ");");
861         genErrorCatchForElement(r);
862     }
863
864     /** Generate code for the given grammar element.
865      * @param blk The token-reference to generate
866      */

867     public void gen(TokenRefElement atom) {
868         if (DEBUG_CODE_GENERATOR) System.out.println("genTokenRef(" + atom + ")");
869         if (grammar instanceof LexerGrammar) {
870             antlrTool.panic("Token reference found in lexer");
871         }
872         genErrorTryForElement(atom);
873         // Assign Token value to token label variable
874
if (atom.getLabel() != null && syntacticPredLevel == 0) {
875             println(atom.getLabel() + " = " + lt1Value + ";");
876         }
877
878         // AST
879
genElementAST(atom);
880         // matching
881
genMatch(atom);
882         genErrorCatchForElement(atom);
883
884         // tack on tree cursor motion if doing a tree walker
885
if (grammar instanceof TreeWalkerGrammar) {
886             println("_t = _t.getNextSibling();");
887         }
888     }
889
890     public void gen(TreeElement t) {
891         // save AST cursor
892
println("AST __t" + t.ID + " = _t;");
893
894         // If there is a label on the root, then assign that to the variable
895
if (t.root.getLabel() != null) {
896             println(t.root.getLabel() + " = _t==ASTNULL ? null :(" + labeledElementASTType + ")_t;");
897         }
898
899         // check for invalid modifiers ! and ^ on tree element roots
900
if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) {
901             antlrTool.error("Suffixing a root node with '!' is not implemented",
902                          grammar.getFilename(), t.getLine(), t.getColumn());
903             t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
904         }
905         if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) {
906             antlrTool.warning("Suffixing a root node with '^' is redundant; already a root",
907                          grammar.getFilename(), t.getLine(), t.getColumn());
908             t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
909         }
910
911         // Generate AST variables
912
genElementAST(t.root);
913         if (grammar.buildAST) {
914             // Save the AST construction state
915
println("ASTPair __currentAST" + t.ID + " = currentAST.copy();");
916             // Make the next item added a child of the TreeElement root
917
println("currentAST.root = currentAST.child;");
918             println("currentAST.child = null;");
919         }
920
921         // match root
922
if ( t.root instanceof WildcardElement ) {
923             println("if ( _t==null ) throw new MismatchedTokenException();");
924         }
925         else {
926             genMatch(t.root);
927         }
928         // move to list of children
929
println("_t = _t.getFirstChild();");
930
931         // walk list of children, generating code for each
932
for (int i = 0; i < t.getAlternatives().size(); i++) {
933             Alternative a = t.getAlternativeAt(i);
934             AlternativeElement e = a.head;
935             while (e != null) {
936                 e.generate();
937                 e = e.next;
938             }
939         }
940
941         if (grammar.buildAST) {
942             // restore the AST construction state to that just after the
943
// tree root was added
944
println("currentAST = __currentAST" + t.ID + ";");
945         }
946         // restore AST cursor
947
println("_t = __t" + t.ID + ";");
948         // move cursor to sibling of tree just parsed
949
println("_t = _t.getNextSibling();");
950     }
951
952     /** Generate the tree-parser Java file */
953     public void gen(TreeWalkerGrammar g) throws IOException JavaDoc {
954         // SAS: debugging stuff removed for now...
955
setGrammar(g);
956         if (!(grammar instanceof TreeWalkerGrammar)) {
957             antlrTool.panic("Internal error generating tree-walker");
958         }
959         // Open the output stream for the parser and set the currentOutput
960
// SAS: move file open to method so subclass can override it
961
// (mainly for VAJ interface)
962
setupOutput(grammar.getClassName());
963
964         genAST = grammar.buildAST;
965         tabs = 0;
966
967         // Generate the header common to all output files.
968
genHeader();
969         // Do not use printAction because we assume tabs==0
970
println(behavior.getHeaderAction(""));
971
972         // Generate header for the parser
973
println("import antlr." + grammar.getSuperClass() + ";");
974         println("import antlr.Token;");
975         println("import antlr.collections.AST;");
976         println("import antlr.RecognitionException;");
977         println("import antlr.ANTLRException;");
978         println("import antlr.NoViableAltException;");
979         println("import antlr.MismatchedTokenException;");
980         println("import antlr.SemanticException;");
981         println("import antlr.collections.impl.BitSet;");
982         println("import antlr.ASTPair;");
983         println("import antlr.collections.impl.ASTArray;");
984
985         // Output the user-defined parser premamble
986
println(grammar.preambleAction.getText());
987
988         // Generate parser class definition
989
String JavaDoc sup = null;
990         if (grammar.superClass != null) {
991             sup = grammar.superClass;
992         }
993         else {
994             sup = "antlr." + grammar.getSuperClass();
995         }
996         println("");
997
998         // print javadoc comment if any
999
if (grammar.comment != null) {
1000            _println(grammar.comment);
1001        }
1002
1003        println("public class " + grammar.getClassName() + " extends " + sup);
1004        println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
1005        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
1006        if (tsuffix != null) {
1007            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
1008            if (suffix != null) {
1009                print(", " + suffix); // must be an interface name for Java
1010
}
1011        }
1012        println(" {");
1013
1014        // Generate user-defined parser class members
1015
print(
1016            processActionForTreeSpecifiers(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
1017        );
1018
1019        // Generate default parser class constructor
1020
println("public " + grammar.getClassName() + "() {");
1021        tabs++;
1022        println("tokenNames = _tokenNames;");
1023        tabs--;
1024        println("}");
1025        println("");
1026
1027        // Generate code for each rule in the grammar
1028
Enumeration JavaDoc ids = grammar.rules.elements();
1029        int ruleNum = 0;
1030        String JavaDoc ruleNameInits = "";
1031        while (ids.hasMoreElements()) {
1032            GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
1033            if (sym instanceof RuleSymbol) {
1034                RuleSymbol rs = (RuleSymbol)sym;
1035                genRule(rs, rs.references.size() == 0, ruleNum++);
1036            }
1037            exitIfError();
1038        }
1039
1040        // Generate the token names
1041
genTokenStrings();
1042
1043        // Generate the bitsets used throughout the grammar
1044
genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
1045
1046        // Close class definition
1047
println("}");
1048        println("");
1049
1050        // Close the parser output stream
1051
currentOutput.close();
1052        currentOutput = null;
1053    }
1054
1055    /** Generate code for the given grammar element.
1056     * @param wc The wildcard element to generate
1057     */

1058    public void gen(WildcardElement wc) {
1059        // Variable assignment for labeled elements
1060
if (wc.getLabel() != null && syntacticPredLevel == 0) {
1061            println(wc.getLabel() + " = " + lt1Value + ";");
1062        }
1063
1064        // AST
1065
genElementAST(wc);
1066        // Match anything but EOF
1067
if (grammar instanceof TreeWalkerGrammar) {
1068            println("if ( _t==null ) throw new MismatchedTokenException();");
1069        }
1070        else if (grammar instanceof LexerGrammar) {
1071            if (grammar instanceof LexerGrammar &&
1072                (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
1073                println("_saveIndex=text.length();");
1074            }
1075            println("matchNot(EOF_CHAR);");
1076            if (grammar instanceof LexerGrammar &&
1077                (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
1078                println("text.setLength(_saveIndex);"); // kill text atom put in buffer
1079
}
1080        }
1081        else {
1082            println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
1083        }
1084
1085        // tack on tree cursor motion if doing a tree walker
1086
if (grammar instanceof TreeWalkerGrammar) {
1087            println("_t = _t.getNextSibling();");
1088        }
1089    }
1090
1091    /** Generate code for the given grammar element.
1092     * @param blk The (...)* block to generate
1093     */

1094    public void gen(ZeroOrMoreBlock blk) {
1095        if (DEBUG_CODE_GENERATOR) System.out.println("gen*(" + blk + ")");
1096        println("{");
1097        genBlockPreamble(blk);
1098        String JavaDoc label;
1099        if (blk.getLabel() != null) {
1100            label = blk.getLabel();
1101        }
1102        else {
1103            label = "_loop" + blk.ID;
1104        }
1105        println(label + ":");
1106        println("do {");
1107        tabs++;
1108        // generate the init action for ()* inside the loop
1109
// this allows us to do usefull EOF checking...
1110
genBlockInitAction(blk);
1111
1112        // Tell AST generation to build subrule result
1113
String JavaDoc saveCurrentASTResult = currentASTResult;
1114        if (blk.getLabel() != null) {
1115            currentASTResult = blk.getLabel();
1116        }
1117
1118        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
1119
1120        // generate exit test if greedy set to false
1121
// and an alt is ambiguous with exit branch
1122
// or when lookahead derived purely from end-of-file
1123
// Lookahead analysis stops when end-of-file is hit,
1124
// returning set {epsilon}. Since {epsilon} is not
1125
// ambig with any real tokens, no error is reported
1126
// by deterministic() routines and we have to check
1127
// for the case where the lookahead depth didn't get
1128
// set to NONDETERMINISTIC (this only happens when the
1129
// FOLLOW contains real atoms + epsilon).
1130
boolean generateNonGreedyExitPath = false;
1131        int nonGreedyExitDepth = grammar.maxk;
1132
1133        if (!blk.greedy &&
1134            blk.exitLookaheadDepth <= grammar.maxk &&
1135            blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) {
1136            generateNonGreedyExitPath = true;
1137            nonGreedyExitDepth = blk.exitLookaheadDepth;
1138        }
1139        else if (!blk.greedy &&
1140            blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) {
1141            generateNonGreedyExitPath = true;
1142        }
1143        if (generateNonGreedyExitPath) {
1144            if (DEBUG_CODE_GENERATOR) {
1145                System.out.println("nongreedy (...)* loop; exit depth is " +
1146                                   blk.exitLookaheadDepth);
1147            }
1148            String JavaDoc predictExit =
1149                getLookaheadTestExpression(blk.exitCache,
1150                                           nonGreedyExitDepth);
1151            println("// nongreedy exit test");
1152            println("if (" + predictExit + ") break " + label + ";");
1153        }
1154
1155        JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
1156        genBlockFinish(howToFinish, "break " + label + ";");
1157
1158        tabs--;
1159        println("} while (true);");
1160        println("}");
1161
1162        // Restore previous AST generation
1163
currentASTResult = saveCurrentASTResult;
1164    }
1165
1166    /** Generate an alternative.
1167     * @param alt The alternative to generate
1168     * @param blk The block to which the alternative belongs
1169     */

1170    protected void genAlt(Alternative alt, AlternativeBlock blk) {
1171        // Save the AST generation state, and set it to that of the alt
1172
boolean savegenAST = genAST;
1173        genAST = genAST && alt.getAutoGen();
1174
1175        boolean oldsaveTest = saveText;
1176        saveText = saveText && alt.getAutoGen();
1177
1178        // Reset the variable name map for the alternative
1179
Hashtable JavaDoc saveMap = treeVariableMap;
1180        treeVariableMap = new Hashtable JavaDoc();
1181
1182        // Generate try block around the alt for error handling
1183
if (alt.exceptionSpec != null) {
1184            println("try { // for error handling");
1185            tabs++;
1186        }
1187
1188        AlternativeElement elem = alt.head;
1189        while (!(elem instanceof BlockEndElement)) {
1190            elem.generate(); // alt can begin with anything. Ask target to gen.
1191
elem = elem.next;
1192        }
1193
1194        if (genAST) {
1195            if (blk instanceof RuleBlock) {
1196                // Set the AST return value for the rule
1197
RuleBlock rblk = (RuleBlock)blk;
1198                if (grammar.hasSyntacticPredicate) {
1199                    // println("if ( inputState.guessing==0 ) {");
1200
// tabs++;
1201
}
1202                println(rblk.getRuleName() + "_AST = (" + labeledElementASTType + ")currentAST.root;");
1203                if (grammar.hasSyntacticPredicate) {
1204                    // --tabs;
1205
// println("}");
1206
}
1207            }
1208            else if (blk.getLabel() != null) {
1209                // ### future: also set AST value for labeled subrules.
1210
// println(blk.getLabel() + "_AST = ("+labeledElementASTType+")currentAST.root;");
1211
antlrTool.warning("Labeled subrules not yet supported", grammar.getFilename(), blk.getLine(), blk.getColumn());
1212            }
1213        }
1214
1215        if (alt.exceptionSpec != null) {
1216            // close try block
1217
tabs--;
1218            println("}");
1219            genErrorHandler(alt.exceptionSpec);
1220        }
1221
1222        genAST = savegenAST;
1223        saveText = oldsaveTest;
1224
1225        treeVariableMap = saveMap;
1226    }
1227
1228    /** Generate all the bitsets to be used in the parser or lexer
1229     * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
1230     * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
1231     * Note that most languages do not support object initialization inside a
1232     * class definition, so other code-generators may have to separate the
1233     * bitset declarations from the initializations (e.g., put the initializations
1234     * in the generated constructor instead).
1235     * @param bitsetList The list of bitsets to generate.
1236     * @param maxVocabulary Ensure that each generated bitset can contain at least this value.
1237     */

1238    protected void genBitsets(Vector bitsetList,
1239                              int maxVocabulary
1240                              ) {
1241        println("");
1242        for (int i = 0; i < bitsetList.size(); i++) {
1243            BitSet p = (BitSet)bitsetList.elementAt(i);
1244            // Ensure that generated BitSet is large enough for vocabulary
1245
p.growToInclude(maxVocabulary);
1246            genBitSet(p, i);
1247        }
1248    }
1249
1250    /** Do something simple like:
1251     * private static final long[] mk_tokenSet_0() {
1252     * long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L };
1253     * return data;
1254     * }
1255     * public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
1256     *
1257     * Or, for large bitsets, optimize init so ranges are collapsed into loops.
1258     * This is most useful for lexers using unicode.
1259     */

1260    private void genBitSet(BitSet p, int id) {
1261        // initialization data
1262
println(
1263            "private static final long[] mk" + getBitsetName(id) + "() {"
1264        );
1265        int n = p.lengthInLongWords();
1266        if ( n<BITSET_OPTIMIZE_INIT_THRESHOLD ) {
1267            println("\tlong[] data = { " + p.toStringOfWords() + "};");
1268        }
1269        else {
1270            // will init manually, allocate space then set values
1271
println("\tlong[] data = new long["+n+"];");
1272            long[] elems = p.toPackedArray();
1273            for (int i = 0; i < elems.length;) {
1274                if ( (i+1)==elems.length || elems[i]!=elems[i+1] ) {
1275                    // last number or no run of numbers, just dump assignment
1276
println("\tdata["+i+"]="+elems[i]+"L;");
1277                    i++;
1278                }
1279                else {
1280                    // scan to find end of run
1281
int j;
1282                    for (j = i + 1;
1283                         j < elems.length && elems[j]==elems[i];
1284                         j++)
1285                    {
1286                    }
1287                    // j-1 is last member of run
1288
println("\tfor (int i = "+i+"; i<="+(j-1)+"; i++) { data[i]="+
1289                            elems[i]+"L; }");
1290                    i = j;
1291                }
1292            }
1293        }
1294
1295        println("\treturn data;");
1296        println("}");
1297        // BitSet object
1298
println(
1299            "public static final BitSet " + getBitsetName(id) + " = new BitSet(" +
1300            "mk" + getBitsetName(id) + "()" +
1301            ");"
1302        );
1303    }
1304
1305    /** Generate the finish of a block, using a combination of the info
1306     * returned from genCommonBlock() and the action to perform when
1307     * no alts were taken
1308     * @param howToFinish The return of genCommonBlock()
1309     * @param noViableAction What to generate when no alt is taken
1310     */

1311    private void genBlockFinish(JavaBlockFinishingInfo howToFinish, String JavaDoc noViableAction) {
1312        if (howToFinish.needAnErrorClause &&
1313            (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) {
1314            if (howToFinish.generatedAnIf) {
1315                println("else {");
1316            }
1317            else {
1318                println("{");
1319            }
1320            tabs++;
1321            println(noViableAction);
1322            tabs--;
1323            println("}");
1324        }
1325
1326        if (howToFinish.postscript != null) {
1327            println(howToFinish.postscript);
1328        }
1329    }
1330
1331    /** Generate the init action for a block, which may be a RuleBlock or a
1332     * plain AlternativeBLock.
1333     * @blk The block for which the preamble is to be generated.
1334     */

1335    protected void genBlockInitAction(AlternativeBlock blk) {
1336        // dump out init action
1337
if (blk.initAction != null) {
1338            printAction(processActionForTreeSpecifiers(blk.initAction, blk.getLine(), currentRule, null));
1339        }
1340    }
1341
1342    /** Generate the header for a block, which may be a RuleBlock or a
1343     * plain AlternativeBLock. This generates any variable declarations
1344     * and syntactic-predicate-testing variables.
1345     * @blk The block for which the preamble is to be generated.
1346     */

1347    protected void genBlockPreamble(AlternativeBlock blk) {
1348        // define labels for rule blocks.
1349
if (blk instanceof RuleBlock) {
1350            RuleBlock rblk = (RuleBlock)blk;
1351            if (rblk.labeledElements != null) {
1352                for (int i = 0; i < rblk.labeledElements.size(); i++) {
1353                    AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
1354                    // System.out.println("looking at labeled element: "+a);
1355
// Variables for labeled rule refs and
1356
// subrules are different than variables for
1357
// grammar atoms. This test is a little tricky
1358
// because we want to get all rule refs and ebnf,
1359
// but not rule blocks or syntactic predicates
1360
if (
1361                        a instanceof RuleRefElement ||
1362                        a instanceof AlternativeBlock &&
1363                        !(a instanceof RuleBlock) &&
1364                        !(a instanceof SynPredBlock)
1365                    ) {
1366
1367                        if (
1368                            !(a instanceof RuleRefElement) &&
1369                            ((AlternativeBlock)a).not &&
1370                            analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
1371                        ) {
1372                            // Special case for inverted subrules that
1373
// will be inlined. Treat these like
1374
// token or char literal references
1375
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1376                            if (grammar.buildAST) {
1377                                genASTDeclaration(a);
1378                            }
1379                        }
1380                        else {
1381                            if (grammar.buildAST) {
1382                                // Always gen AST variables for
1383
// labeled elements, even if the
1384
// element itself is marked with !
1385
genASTDeclaration(a);
1386                            }
1387                            if (grammar instanceof LexerGrammar) {
1388                                println("Token " + a.getLabel() + "=null;");
1389                            }
1390                            if (grammar instanceof TreeWalkerGrammar) {
1391                                // always generate rule-ref variables
1392
// for tree walker
1393
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1394                            }
1395                        }
1396                    }
1397                    else {
1398                        // It is a token or literal reference. Generate the
1399
// correct variable type for this grammar
1400
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1401
1402                        // In addition, generate *_AST variables if
1403
// building ASTs
1404
if (grammar.buildAST) {
1405                            if (a instanceof GrammarAtom &&
1406                                ((GrammarAtom)a).getASTNodeType() != null) {
1407                                GrammarAtom ga = (GrammarAtom)a;
1408                                genASTDeclaration(a, ga.getASTNodeType());
1409                            }
1410                            else {
1411                                genASTDeclaration(a);
1412                            }
1413                        }
1414                    }
1415                }
1416            }
1417        }
1418    }
1419
1420    /** Generate a series of case statements that implement a BitSet test.
1421     * @param p The Bitset for which cases are to be generated
1422     */

1423    protected void genCases(BitSet p) {
1424        if (DEBUG_CODE_GENERATOR) System.out.println("genCases(" + p + ")");
1425        int[] elems;
1426
1427        elems = p.toArray();
1428        // Wrap cases four-per-line for lexer, one-per-line for parser
1429
int wrap = (grammar instanceof LexerGrammar) ? 4 : 1;
1430        int j = 1;
1431        boolean startOfLine = true;
1432        for (int i = 0; i < elems.length; i++) {
1433            if (j == 1) {
1434                print("");
1435            }
1436            else {
1437                _print(" ");
1438            }
1439            _print("case " + getValueString(elems[i]) + ":");
1440
1441            if (j == wrap) {
1442                _println("");
1443                startOfLine = true;
1444                j = 1;
1445            }
1446            else {
1447                j++;
1448                startOfLine = false;
1449            }
1450        }
1451        if (!startOfLine) {
1452            _println("");
1453        }
1454    }
1455
1456    /**Generate common code for a block of alternatives; return a
1457     * postscript that needs to be generated at the end of the
1458     * block. Other routines may append else-clauses and such for
1459     * error checking before the postfix is generated. If the
1460     * grammar is a lexer, then generate alternatives in an order
1461     * where alternatives requiring deeper lookahead are generated
1462     * first, and EOF in the lookahead set reduces the depth of
1463     * the lookahead. @param blk The block to generate @param
1464     * noTestForSingle If true, then it does not generate a test
1465     * for a single alternative.
1466     */

1467    public JavaBlockFinishingInfo genCommonBlock(AlternativeBlock blk,
1468                                                 boolean noTestForSingle) {
1469        int nIF = 0;
1470        boolean createdLL1Switch = false;
1471        int closingBracesOfIFSequence = 0;
1472        JavaBlockFinishingInfo finishingInfo = new JavaBlockFinishingInfo();
1473        if (DEBUG_CODE_GENERATOR) System.out.println("genCommonBlock(" + blk + ")");
1474
1475        // Save the AST generation state, and set it to that of the block
1476
boolean savegenAST = genAST;
1477        genAST = genAST && blk.getAutoGen();
1478
1479        boolean oldsaveTest = saveText;
1480        saveText = saveText && blk.getAutoGen();
1481
1482        // Is this block inverted? If so, generate special-case code
1483
if (
1484            blk.not &&
1485            analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar)
1486        ) {
1487            if (DEBUG_CODE_GENERATOR) System.out.println("special case: ~(subrule)");
1488            Lookahead p = analyzer.look(1, blk);
1489            // Variable assignment for labeled elements
1490
if (blk.getLabel() != null && syntacticPredLevel == 0) {
1491                println(blk.getLabel() + " = " + lt1Value + ";");
1492            }
1493
1494            // AST
1495
genElementAST(blk);
1496
1497            String JavaDoc astArgs = "";
1498            if (grammar instanceof TreeWalkerGrammar) {
1499                astArgs = "_t,";
1500            }
1501
1502            // match the bitset for the alternative
1503
println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
1504
1505            // tack on tree cursor motion if doing a tree walker
1506
if (grammar instanceof TreeWalkerGrammar) {
1507                println("_t = _t.getNextSibling();");
1508            }
1509            return finishingInfo;
1510        }
1511
1512        // Special handling for single alt
1513
if (blk.getAlternatives().size() == 1) {
1514            Alternative alt = blk.getAlternativeAt(0);
1515            // Generate a warning if there is a synPred for single alt.
1516
if (alt.synPred != null) {
1517                antlrTool.warning(
1518                    "Syntactic predicate superfluous for single alternative",
1519                    grammar.getFilename(),
1520                    blk.getAlternativeAt(0).synPred.getLine(),
1521                    blk.getAlternativeAt(0).synPred.getColumn()
1522                );
1523            }
1524            if (noTestForSingle) {
1525                if (alt.semPred != null) {
1526                    // Generate validating predicate
1527
genSemPred(alt.semPred, blk.line);
1528                }
1529                genAlt(alt, blk);
1530                return finishingInfo;
1531            }
1532        }
1533
1534        // count number of simple LL(1) cases; only do switch for
1535
// many LL(1) cases (no preds, no end of token refs)
1536
// We don't care about exit paths for (...)*, (...)+
1537
// because we don't explicitly have a test for them
1538
// as an alt in the loop.
1539
//
1540
// Also, we now count how many unicode lookahead sets
1541
// there are--they must be moved to DEFAULT or ELSE
1542
// clause.
1543
int nLL1 = 0;
1544        for (int i = 0; i < blk.getAlternatives().size(); i++) {
1545            Alternative a = blk.getAlternativeAt(i);
1546            if (suitableForCaseExpression(a)) {
1547                nLL1++;
1548            }
1549        }
1550
1551        // do LL(1) cases
1552
if (nLL1 >= makeSwitchThreshold) {
1553            // Determine the name of the item to be compared
1554
String JavaDoc testExpr = lookaheadString(1);
1555            createdLL1Switch = true;
1556            // when parsing trees, convert null to valid tree node with NULL lookahead
1557
if (grammar instanceof TreeWalkerGrammar) {
1558                println("if (_t==null) _t=ASTNULL;");
1559            }
1560            println("switch ( " + testExpr + ") {");
1561            for (int i = 0; i < blk.alternatives.size(); i++) {
1562                Alternative alt = blk.getAlternativeAt(i);
1563                // ignore any non-LL(1) alts, predicated alts,
1564
// or end-of-token alts for case expressions
1565
if (!suitableForCaseExpression(alt)) {
1566                    continue;
1567                }
1568                Lookahead p = alt.cache[1];
1569                if (p.fset.degree() == 0 && !p.containsEpsilon()) {
1570                    antlrTool.warning("Alternate omitted due to empty prediction set",
1571                                 grammar.getFilename(),
1572                                 alt.head.getLine(), alt.head.getColumn());
1573                }
1574                else {
1575                    genCases(p.fset);
1576                    println("{");
1577                    tabs++;
1578                    genAlt(alt, blk);
1579                    println("break;");
1580                    tabs--;
1581                    println("}");
1582                }
1583            }
1584            println("default:");
1585            tabs++;
1586        }
1587
1588        // do non-LL(1) and nondeterministic cases This is tricky in
1589
// the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR
1590
// : "*="; Since nextToken is generated without a loop, then
1591
// the STAR will have end-of-token as it's lookahead set for
1592
// LA(2). So, we must generate the alternatives containing
1593
// trailing end-of-token in their lookahead sets *after* the
1594
// alternatives without end-of-token. This implements the
1595
// usual lexer convention that longer matches come before
1596
// shorter ones, e.g. "*=" matches ASSIGN_STAR not STAR
1597
//
1598
// For non-lexer grammars, this does not sort the alternates
1599
// by depth Note that alts whose lookahead is purely
1600
// end-of-token at k=1 end up as default or else clauses.
1601
int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
1602        for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
1603            if (DEBUG_CODE_GENERATOR) System.out.println("checking depth " + altDepth);
1604            for (int i = 0; i < blk.alternatives.size(); i++) {
1605                Alternative alt = blk.getAlternativeAt(i);
1606                if (DEBUG_CODE_GENERATOR) System.out.println("genAlt: " + i);
1607                // if we made a switch above, ignore what we already took care
1608
// of. Specifically, LL(1) alts with no preds
1609
// that do not have end-of-token in their prediction set
1610
// and that are not giant unicode sets.
1611
if (createdLL1Switch && suitableForCaseExpression(alt)) {
1612                    if (DEBUG_CODE_GENERATOR) System.out.println("ignoring alt because it was in the switch");
1613                    continue;
1614                }
1615                String JavaDoc e;
1616
1617                boolean unpredicted = false;
1618
1619                if (grammar instanceof LexerGrammar) {
1620                    // Calculate the "effective depth" of the alt,
1621
// which is the max depth at which
1622
// cache[depth]!=end-of-token
1623
int effectiveDepth = alt.lookaheadDepth;
1624                    if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC) {
1625                        // use maximum lookahead
1626
effectiveDepth = grammar.maxk;
1627                    }
1628                    while (effectiveDepth >= 1 &&
1629                        alt.cache[effectiveDepth].containsEpsilon()) {
1630                        effectiveDepth--;
1631                    }
1632                    // Ignore alts whose effective depth is other than
1633
// the ones we are generating for this iteration.
1634
if (effectiveDepth != altDepth) {
1635                        if (DEBUG_CODE_GENERATOR)
1636                            System.out.println("ignoring alt because effectiveDepth!=altDepth;" + effectiveDepth + "!=" + altDepth);
1637                        continue;
1638                    }
1639                    unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
1640                    e = getLookaheadTestExpression(alt, effectiveDepth);
1641                }
1642                else {
1643                    unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
1644                    e = getLookaheadTestExpression(alt, grammar.maxk);
1645                }
1646
1647                // Was it a big unicode range that forced unsuitability
1648
// for a case expression?
1649
if (alt.cache[1].fset.degree() > caseSizeThreshold &&
1650                    suitableForCaseExpression(alt)) {
1651                    if (nIF == 0) {
1652                        println("if " + e + " {");
1653                    }
1654                    else {
1655                        println("else if " + e + " {");
1656                    }
1657                }
1658                else if (unpredicted &&
1659                    alt.semPred == null &&
1660                    alt.synPred == null) {
1661                    // The alt has empty prediction set and no
1662
// predicate to help out. if we have not
1663
// generated a previous if, just put {...} around
1664
// the end-of-token clause
1665
if (nIF == 0) {
1666                        println("{");
1667                    }
1668                    else {
1669                        println("else {");
1670                    }
1671                    finishingInfo.needAnErrorClause = false;
1672                }
1673                else { // check for sem and syn preds
1674

1675                    // Add any semantic predicate expression to the
1676
// lookahead test
1677
if (alt.semPred != null) {
1678                        // if debugging, wrap the evaluation of the
1679
// predicate in a method translate $ and #
1680
// references
1681
ActionTransInfo tInfo = new ActionTransInfo();
1682                        String JavaDoc actionStr =
1683                            processActionForTreeSpecifiers(alt.semPred,
1684                                                           blk.line,
1685                                                           currentRule,
1686                                                           tInfo);
1687                        // ignore translation info...we don't need to
1688
// do anything with it. call that will inform
1689
// SemanticPredicateListeners of the result
1690
if (((grammar instanceof ParserGrammar) ||
1691                            (grammar instanceof LexerGrammar)) &&
1692                            grammar.debuggingOutput) {
1693                            e = "(" + e + "&& fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.PREDICTING," +
1694                                addSemPred(charFormatter.escapeString(actionStr)) + "," + actionStr + "))";
1695                        }
1696                        else {
1697                            e = "(" + e + "&&(" + actionStr + "))";
1698                        }
1699                    }
1700
1701                    // Generate any syntactic predicates
1702
if (nIF > 0) {
1703                        if (alt.synPred != null) {
1704                            println("else {");
1705                            tabs++;
1706                            genSynPred(alt.synPred, e);
1707                            closingBracesOfIFSequence++;
1708                        }
1709                        else {
1710                            println("else if " + e + " {");
1711                        }
1712                    }
1713                    else {
1714                        if (alt.synPred != null) {
1715                            genSynPred(alt.synPred, e);
1716                        }
1717                        else {
1718                            // when parsing trees, convert null to
1719
// valid tree node with NULL lookahead.
1720
if (grammar instanceof TreeWalkerGrammar) {
1721                                println("if (_t==null) _t=ASTNULL;");
1722                            }
1723                            println("if " + e + " {");
1724                        }
1725                    }
1726
1727                }
1728
1729                nIF++;
1730                tabs++;
1731                genAlt(alt, blk);
1732                tabs--;
1733                println("}");
1734            }
1735        }
1736        String JavaDoc ps = "";
1737        for (int i = 1; i <= closingBracesOfIFSequence; i++) {
1738            ps += "}";
1739        }
1740
1741        // Restore the AST generation state
1742
genAST = savegenAST;
1743
1744        // restore save text state
1745
saveText = oldsaveTest;
1746
1747        // Return the finishing info.
1748
if (createdLL1Switch) {
1749            tabs--;
1750            finishingInfo.postscript = ps + "}";
1751            finishingInfo.generatedSwitch = true;
1752            finishingInfo.generatedAnIf = nIF > 0;
1753            //return new JavaBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
1754

1755        }
1756        else {
1757            finishingInfo.postscript = ps;
1758            finishingInfo.generatedSwitch = false;
1759            finishingInfo.generatedAnIf = nIF > 0;
1760            // return new JavaBlockFinishingInfo(ps, false,nIF>0);
1761
}
1762        return finishingInfo;
1763    }
1764
1765    private static boolean suitableForCaseExpression(Alternative a) {
1766        return
1767            a.lookaheadDepth == 1 &&
1768            a.semPred == null &&
1769            !a.cache[1].containsEpsilon() &&
1770            a.cache[1].fset.degree() <= caseSizeThreshold;
1771    }
1772
1773    /** Generate code to link an element reference into the AST */
1774    private void genElementAST(AlternativeElement el) {
1775        // handle case where you're not building trees, but are in tree walker.
1776
// Just need to get labels set up.
1777
if (grammar instanceof TreeWalkerGrammar && !grammar.buildAST) {
1778            String JavaDoc elementRef;
1779            String JavaDoc astName;
1780
1781            // Generate names and declarations of the AST variable(s)
1782
if (el.getLabel() == null) {
1783                elementRef = lt1Value;
1784                // Generate AST variables for unlabeled stuff
1785
astName = "tmp" + astVarNumber + "_AST";
1786                astVarNumber++;
1787                // Map the generated AST variable in the alternate
1788
mapTreeVariable(el, astName);
1789                // Generate an "input" AST variable also
1790
println(labeledElementASTType + " " + astName + "_in = " + elementRef + ";");
1791            }
1792            return;
1793        }
1794
1795        if (grammar.buildAST && syntacticPredLevel == 0) {
1796            boolean needASTDecl =
1797                (genAST &&
1798                (el.getLabel() != null ||
1799                el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG
1800                )
1801                );
1802
1803            // RK: if we have a grammar element always generate the decl
1804
// since some guy can access it from an action and we can't
1805
// peek ahead (well not without making a mess).
1806
// I'd prefer taking this out.
1807
if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG &&
1808                (el instanceof TokenRefElement))
1809            {
1810                needASTDecl = true;
1811            }
1812
1813            boolean doNoGuessTest =
1814                (grammar.hasSyntacticPredicate && needASTDecl);
1815
1816            String JavaDoc elementRef;
1817            String JavaDoc astNameBase;
1818
1819            // Generate names and declarations of the AST variable(s)
1820
if (el.getLabel() != null) {
1821                elementRef = el.getLabel();
1822                astNameBase = el.getLabel();
1823            }
1824            else {
1825                elementRef = lt1Value;
1826                // Generate AST variables for unlabeled stuff
1827
astNameBase = "tmp" + astVarNumber;
1828                ;
1829                astVarNumber++;
1830            }
1831
1832            // Generate the declaration if required.
1833
if (needASTDecl) {
1834                // Generate the declaration
1835
if (el instanceof GrammarAtom) {
1836                    GrammarAtom ga = (GrammarAtom)el;
1837                    if (ga.getASTNodeType() != null) {
1838                        genASTDeclaration(el, astNameBase, ga.getASTNodeType());
1839// println(ga.getASTNodeType()+" " + astName+" = null;");
1840
}
1841                    else {
1842                        genASTDeclaration(el, astNameBase, labeledElementASTType);
1843// println(labeledElementASTType+" " + astName + " = null;");
1844
}
1845                }
1846                else {
1847                    genASTDeclaration(el, astNameBase, labeledElementASTType);
1848// println(labeledElementASTType+" " + astName + " = null;");
1849
}
1850            }
1851
1852            // for convenience..
1853
String JavaDoc astName = astNameBase + "_AST";
1854
1855            // Map the generated AST variable in the alternate
1856
mapTreeVariable(el, astName);
1857            if (grammar instanceof TreeWalkerGrammar) {
1858                // Generate an "input" AST variable also
1859
println(labeledElementASTType + " " + astName + "_in = null;");
1860            }
1861
1862            // Enclose actions with !guessing
1863
if (doNoGuessTest) {
1864                // println("if (inputState.guessing==0) {");
1865
// tabs++;
1866
}
1867
1868            // if something has a label assume it will be used
1869
// so we must initialize the RefAST
1870
if (el.getLabel() != null) {
1871                if (el instanceof GrammarAtom) {
1872                    println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ";");
1873                }
1874                else {
1875                    println(astName + " = " + getASTCreateString(elementRef) + ";");
1876                }
1877            }
1878
1879            // if it has no label but a declaration exists initialize it.
1880
if (el.getLabel() == null && needASTDecl) {
1881                elementRef = lt1Value;
1882                if (el instanceof GrammarAtom) {
1883                    println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ";");
1884                }
1885                else {
1886                    println(astName + " = " + getASTCreateString(elementRef) + ";");
1887                }
1888                // Map the generated AST variable in the alternate
1889
if (grammar instanceof TreeWalkerGrammar) {
1890                    // set "input" AST variable also
1891
println(astName + "_in = " + elementRef + ";");
1892                }
1893            }
1894
1895            if (genAST) {
1896                switch (el.getAutoGenType()) {
1897                    case GrammarElement.AUTO_GEN_NONE:
1898                        println("astFactory.addASTChild(currentAST, " + astName + ");");
1899                        break;
1900                    case GrammarElement.AUTO_GEN_CARET:
1901                        println("astFactory.makeASTRoot(currentAST, " + astName + ");");
1902                        break;
1903                    default:
1904                        break;
1905                }
1906            }
1907            if (doNoGuessTest) {
1908                // tabs--;
1909
// println("}");
1910
}
1911        }
1912    }
1913
1914    /** Close the try block and generate catch phrases
1915     * if the element has a labeled handler in the rule
1916     */

1917    private void genErrorCatchForElement(AlternativeElement el) {
1918        if (el.getLabel() == null) return;
1919        String JavaDoc r = el.enclosingRuleName;
1920        if (grammar instanceof LexerGrammar) {
1921            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
1922        }
1923        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
1924        if (rs == null) {
1925            antlrTool.panic("Enclosing rule not found!");
1926        }
1927        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
1928        if (ex != null) {
1929            tabs--;
1930            println("}");
1931            genErrorHandler(ex);
1932        }
1933    }
1934
1935    /** Generate the catch phrases for a user-specified error handler */
1936    private void genErrorHandler(ExceptionSpec ex) {
1937        // Each ExceptionHandler in the ExceptionSpec is a separate catch
1938
for (int i = 0; i < ex.handlers.size(); i++) {
1939            ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
1940            // Generate catch phrase
1941
println("catch (" + handler.exceptionTypeAndName.getText() + ") {");
1942            tabs++;
1943            if (grammar.hasSyntacticPredicate) {
1944                println("if (inputState.guessing==0) {");
1945                tabs++;
1946            }
1947
1948            // get the name of the followSet for the current rule so that we
1949
// can replace $lookaheadSet in the .g file.
1950
ActionTransInfo tInfo = null;
1951            if (currentRule != null && (grammar instanceof ParserGrammar)) // only supply follow set if we're parsing
1952
{
1953                tInfo = new ActionTransInfo();
1954                Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, currentRule.endNode);
1955                String JavaDoc followSetName = getBitsetName(markBitsetForGen(follow.fset));
1956                tInfo.lookaheadSetName = followSetName;
1957            }
1958
1959            // When not guessing, execute user handler action
1960
printAction(
1961                processActionForTreeSpecifiers(handler.action.getText(),
1962                                               handler.action.getLine(),
1963                                               currentRule, tInfo)
1964            );
1965
1966            if (grammar.hasSyntacticPredicate) {
1967                tabs--;
1968                println("} else {");
1969                tabs++;
1970                // When guessing, rethrow exception
1971
println(
1972                    "throw " +
1973                    extractIdOfAction(handler.exceptionTypeAndName) +
1974                    ";"
1975                );
1976                tabs--;
1977                println("}");
1978            }
1979            // Close catch phrase
1980
tabs--;
1981            println("}");
1982        }
1983    }
1984
1985    /** Generate a try { opening if the element has a labeled handler in the rule */
1986    private void genErrorTryForElement(AlternativeElement el) {
1987        if (el.getLabel() == null) return;
1988        String JavaDoc r = el.enclosingRuleName;
1989        if (grammar instanceof LexerGrammar) {
1990            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
1991        }
1992        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
1993        if (rs == null) {
1994            antlrTool.panic("Enclosing rule not found!");
1995        }
1996        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
1997        if (ex != null) {
1998            println("try { // for error handling");
1999            tabs++;
2000        }
2001    }
2002
2003    protected void genASTDeclaration(AlternativeElement el) {
2004        genASTDeclaration(el, labeledElementASTType);
2005    }
2006
2007    protected void genASTDeclaration(AlternativeElement el, String JavaDoc node_type) {
2008        genASTDeclaration(el, el.getLabel(), node_type);
2009    }
2010
2011    protected void genASTDeclaration(AlternativeElement el, String JavaDoc var_name, String JavaDoc node_type) {
2012        // already declared?
2013
if (declaredASTVariables.contains(el))
2014            return;
2015
2016        // emit code
2017
println(node_type + " " + var_name + "_AST = null;");
2018
2019        // mark as declared
2020
declaredASTVariables.add(el);
2021    }
2022
2023    /** Generate a header that is common to all Java files */
2024    protected void genHeader() {
2025        println("// $ANTLR " + Tool.version + ": " +
2026                "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" +
2027                " -> " +
2028                "\"" + grammar.getClassName() + ".java\"$");
2029    }
2030
2031    private void genLiteralsTest() {
2032        println("_ttype = testLiteralsTable(_ttype);");
2033    }
2034
2035    private void genLiteralsTestForPartialToken() {
2036        println("_ttype = testLiteralsTable(new String(text.getBuffer(),_begin,text.length()-_begin),_ttype);");
2037    }
2038
2039    protected void genMatch(BitSet b) {
2040    }
2041
2042    protected void genMatch(GrammarAtom atom) {
2043        if (atom instanceof StringLiteralElement) {
2044            if (grammar instanceof LexerGrammar) {
2045                genMatchUsingAtomText(atom);
2046            }
2047            else {
2048                genMatchUsingAtomTokenType(atom);
2049            }
2050        }
2051        else if (atom instanceof CharLiteralElement) {
2052            if (grammar instanceof LexerGrammar) {
2053                genMatchUsingAtomText(atom);
2054            }
2055            else {
2056                antlrTool.error("cannot ref character literals in grammar: " + atom);
2057            }
2058        }
2059        else if (atom instanceof TokenRefElement) {
2060            genMatchUsingAtomText(atom);
2061        }
2062        else if (atom instanceof WildcardElement) {
2063            gen((WildcardElement)atom);
2064        }
2065    }
2066
2067    protected void genMatchUsingAtomText(GrammarAtom atom) {
2068        // match() for trees needs the _t cursor
2069
String JavaDoc astArgs = "";
2070        if (grammar instanceof TreeWalkerGrammar) {
2071            astArgs = "_t,";
2072        }
2073
2074        // if in lexer and ! on element, save buffer index to kill later
2075
if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
2076            println("_saveIndex=text.length();");
2077        }
2078
2079        print(atom.not ? "matchNot(" : "match(");
2080        _print(astArgs);
2081
2082        // print out what to match
2083
if (atom.atomText.equals("EOF")) {
2084            // horrible hack to handle EOF case
2085
_print("Token.EOF_TYPE");
2086        }
2087        else {
2088            _print(atom.atomText);
2089        }
2090        _println(");");
2091
2092        if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
2093            println("text.setLength(_saveIndex);"); // kill text atom put in buffer
2094
}
2095    }
2096
2097    protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
2098        // match() for trees needs the _t cursor
2099
String JavaDoc astArgs = "";
2100        if (grammar instanceof TreeWalkerGrammar) {
2101            astArgs = "_t,";
2102        }
2103
2104        // If the literal can be mangled, generate the symbolic constant instead
2105
String JavaDoc mangledName = null;
2106        String JavaDoc s = astArgs + getValueString(atom.getType());
2107
2108        // matching
2109
println((atom.not ? "matchNot(" : "match(") + s + ");");
2110    }
2111
2112    /** Generate the nextToken() rule. nextToken() is a synthetic
2113     * lexer rule that is the implicit OR of all user-defined
2114     * lexer rules.
2115     */

2116    public void genNextToken() {
2117        // Are there any public rules? If not, then just generate a
2118
// fake nextToken().
2119
boolean hasPublicRules = false;
2120        for (int i = 0; i < grammar.rules.size(); i++) {
2121            RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
2122            if (rs.isDefined() && rs.access.equals("public")) {
2123                hasPublicRules = true;
2124                break;
2125            }
2126        }
2127        if (!hasPublicRules) {
2128            println("");
2129            println("public Token nextToken() throws TokenStreamException {");
2130            println("\ttry {uponEOF();}");
2131            println("\tcatch(CharStreamIOException csioe) {");
2132            println("\t\tthrow new TokenStreamIOException(csioe.io);");
2133            println("\t}");
2134            println("\tcatch(CharStreamException cse) {");
2135            println("\t\tthrow new TokenStreamException(cse.getMessage());");
2136            println("\t}");
2137            println("\treturn new CommonToken(Token.EOF_TYPE, \"\");");
2138            println("}");
2139            println("");
2140            return;
2141        }
2142
2143        // Create the synthesized nextToken() rule
2144
RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
2145        // Define the nextToken rule symbol
2146
RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
2147        nextTokenRs.setDefined();
2148        nextTokenRs.setBlock(nextTokenBlk);
2149        nextTokenRs.access = "private";
2150        grammar.define(nextTokenRs);
2151        // Analyze the nextToken rule
2152
boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
2153
2154        // Generate the next token rule
2155
String JavaDoc filterRule = null;
2156        if (((LexerGrammar)grammar).filterMode) {
2157            filterRule = ((LexerGrammar)grammar).filterRule;
2158        }
2159
2160        println("");
2161        println("public Token nextToken() throws TokenStreamException {");
2162        tabs++;
2163        println("Token theRetToken=null;");
2164        _println("tryAgain:");
2165        println("for (;;) {");
2166        tabs++;
2167        println("Token _token = null;");
2168        println("int _ttype = Token.INVALID_TYPE;");
2169        if (((LexerGrammar)grammar).filterMode) {
2170            println("setCommitToPath(false);");
2171            if (filterRule != null) {
2172                // Here's a good place to ensure that the filter rule actually exists
2173
if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) {
2174                    grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
2175                }
2176                else {
2177                    RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule));
2178                    if (!rs.isDefined()) {
2179                        grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
2180                    }
2181                    else if (rs.access.equals("public")) {
2182                        grammar.antlrTool.error("Filter rule " + filterRule + " must be protected");
2183                    }
2184                }
2185                println("int _m;");
2186                println("_m = mark();");
2187            }
2188        }
2189        println("resetText();");
2190
2191        println("try { // for char stream error handling");
2192        tabs++;
2193
2194        // Generate try around whole thing to trap scanner errors
2195
println("try { // for lexical error handling");
2196        tabs++;
2197
2198        // Test for public lexical rules with empty paths
2199
for (int i = 0; i < nextTokenBlk.getAlternatives().size(); i++) {
2200            Alternative a = nextTokenBlk.getAlternativeAt(i);
2201            if (a.cache[1].containsEpsilon()) {
2202                //String r = a.head.toString();
2203
RuleRefElement rr = (RuleRefElement)a.head;
2204                String JavaDoc r = CodeGenerator.decodeLexerRuleName(rr.targetRule);
2205                antlrTool.warning("public lexical rule "+r+" is optional (can match \"nothing\")");
2206            }
2207        }
2208
2209        // Generate the block
2210
String JavaDoc newline = System.getProperty("line.separator");
2211        JavaBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
2212        String JavaDoc errFinish = "if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}";
2213        errFinish += newline + "\t\t\t\t";
2214        if (((LexerGrammar)grammar).filterMode) {
2215            if (filterRule == null) {
2216                errFinish += "else {consume(); continue tryAgain;}";
2217            }
2218            else {
2219                errFinish += "else {" + newline +
2220                    "\t\t\t\t\tcommit();" + newline +
2221                    "\t\t\t\t\ttry {m" + filterRule + "(false);}" + newline +
2222                    "\t\t\t\t\tcatch(RecognitionException e) {" + newline +
2223                    "\t\t\t\t\t // catastrophic failure" + newline +
2224                    "\t\t\t\t\t reportError(e);" + newline +
2225                    "\t\t\t\t\t consume();" + newline +
2226                    "\t\t\t\t\t}" + newline +
2227                    "\t\t\t\t\tcontinue tryAgain;" + newline +
2228                    "\t\t\t\t}";
2229            }
2230        }
2231        else {
2232            errFinish += "else {" + throwNoViable + "}";
2233        }
2234        genBlockFinish(howToFinish, errFinish);
2235
2236        // at this point a valid token has been matched, undo "mark" that was done
2237
if (((LexerGrammar)grammar).filterMode && filterRule != null) {
2238            println("commit();");
2239        }
2240
2241        // Generate literals test if desired
2242
// make sure _ttype is set first; note _returnToken must be
2243
// non-null as the rule was required to create it.
2244
println("if ( _returnToken==null ) continue tryAgain; // found SKIP token");
2245        println("_ttype = _returnToken.getType();");
2246        if (((LexerGrammar)grammar).getTestLiterals()) {
2247            genLiteralsTest();
2248        }
2249
2250        // return token created by rule reference in switch
2251
println("_returnToken.setType(_ttype);");
2252        println("return _returnToken;");
2253
2254        // Close try block
2255
tabs--;
2256        println("}");
2257        println("catch (RecognitionException e) {");
2258        tabs++;
2259        if (((LexerGrammar)grammar).filterMode) {
2260            if (filterRule == null) {
2261                println("if ( !getCommitToPath() ) {consume(); continue tryAgain;}");
2262            }
2263            else {
2264                println("if ( !getCommitToPath() ) {");
2265                tabs++;
2266                println("rewind(_m);");
2267                println("resetText();");
2268                println("try {m" + filterRule + "(false);}");
2269                println("catch(RecognitionException ee) {");
2270                println(" // horrendous failure: error in filter rule");
2271                println(" reportError(ee);");
2272                println(" consume();");
2273                println("}");
2274                println("continue tryAgain;");
2275                tabs--;
2276                println("}");
2277            }
2278        }
2279        if (nextTokenBlk.getDefaultErrorHandler()) {
2280            println("reportError(e);");
2281            println("consume();");
2282        }
2283        else {
2284            // pass on to invoking routine
2285
println("throw new TokenStreamRecognitionException(e);");
2286        }
2287        tabs--;
2288        println("}");
2289
2290        // close CharStreamException try
2291
tabs--;
2292        println("}");
2293        println("catch (CharStreamException cse) {");
2294        println(" if ( cse instanceof CharStreamIOException ) {");
2295        println(" throw new TokenStreamIOException(((CharStreamIOException)cse).io);");
2296        println(" }");
2297        println(" else {");
2298        println(" throw new TokenStreamException(cse.getMessage());");
2299        println(" }");
2300        println("}");
2301
2302        // close for-loop
2303
tabs--;
2304        println("}");
2305
2306        // close method nextToken
2307
tabs--;
2308        println("}");
2309        println("");
2310    }
2311
2312    /** Gen a named rule block.
2313     * ASTs are generated for each element of an alternative unless
2314     * the rule or the alternative have a '!' modifier.
2315     *
2316     * If an alternative defeats the default tree construction, it
2317     * must set <rule>_AST to the root of the returned AST.
2318     *
2319     * Each alternative that does automatic tree construction, builds
2320     * up root and child list pointers in an ASTPair structure.
2321     *
2322     * A rule finishes by setting the returnAST variable from the
2323     * ASTPair.
2324     *
2325     * @param rule The name of the rule to generate
2326     * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
2327     */

2328    public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum) {
2329        tabs = 1;
2330
2331        if (DEBUG_CODE_GENERATOR) System.out.println("genRule(" + s.getId() + ")");
2332        if (!s.isDefined()) {
2333            antlrTool.error("undefined rule: " + s.getId());
2334            return;
2335        }
2336
2337        // Generate rule return type, name, arguments
2338
RuleBlock rblk = s.getBlock();
2339
2340        currentRule = rblk;
2341        currentASTResult = s.getId();
2342
2343        // clear list of declared ast variables..
2344
declaredASTVariables.clear();
2345
2346        // Save the AST generation state, and set it to that of the rule
2347
boolean savegenAST = genAST;
2348        genAST = genAST && rblk.getAutoGen();
2349
2350        // boolean oldsaveTest = saveText;
2351
saveText = rblk.getAutoGen();
2352
2353        // print javadoc comment if any
2354
if (s.comment != null) {
2355            _println(s.comment);
2356        }
2357
2358        // Gen method access and final qualifier
2359
print(s.access + " final ");
2360
2361        // Gen method return type (note lexer return action set at rule creation)
2362
if (rblk.returnAction != null) {
2363            // Has specified return value
2364
_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
2365        }
2366        else {
2367            // No specified return value
2368
_print("void ");
2369        }
2370
2371        // Gen method name
2372
_print(s.getId() + "(");
2373
2374        // Additional rule parameters common to all rules for this grammar
2375
_print(commonExtraParams);
2376        if (commonExtraParams.length() != 0 && rblk.argAction != null) {
2377            _print(",");
2378        }
2379
2380        // Gen arguments
2381
if (rblk.argAction != null) {
2382            // Has specified arguments
2383
_println("");
2384            tabs++;
2385            println(rblk.argAction);
2386            tabs--;
2387            print(")");
2388        }
2389        else {
2390            // No specified arguments
2391
_print(")");
2392        }
2393
2394        // Gen throws clause and open curly
2395
_print(" throws " + exceptionThrown);
2396        if (grammar instanceof ParserGrammar) {
2397            _print(", TokenStreamException");
2398        }
2399        else if (grammar instanceof LexerGrammar) {
2400            _print(", CharStreamException, TokenStreamException");
2401        }
2402        // Add user-defined exceptions unless lexer (for now)
2403
if (rblk.throwsSpec != null) {
2404            if (grammar instanceof LexerGrammar) {
2405                antlrTool.error("user-defined throws spec not allowed (yet) for lexer rule " + rblk.ruleName);
2406            }
2407            else {
2408                _print(", " + rblk.throwsSpec);
2409            }
2410        }
2411
2412        _println(" {");
2413        tabs++;
2414
2415        // Convert return action to variable declaration
2416
if (rblk.returnAction != null)
2417            println(rblk.returnAction + ";");
2418
2419        // print out definitions needed by rules for various grammar types
2420
println(commonLocalVars);
2421
2422        if (grammar.traceRules) {
2423            if (grammar instanceof TreeWalkerGrammar) {
2424                println("traceIn(\"" + s.getId() + "\",_t);");
2425            }
2426            else {
2427                println("traceIn(\"" + s.getId() + "\");");
2428            }
2429        }
2430
2431        if (grammar instanceof LexerGrammar) {
2432            // lexer rule default return value is the rule's token name
2433
// This is a horrible hack to support the built-in EOF lexer rule.
2434
if (s.getId().equals("mEOF"))
2435                println("_ttype = Token.EOF_TYPE;");
2436            else
2437                println("_ttype = " + s.getId().substring(1) + ";");
2438            println("int _saveIndex;"); // used for element! (so we can kill text matched for element)
2439
/*
2440             println("boolean old_saveConsumedInput=saveConsumedInput;");
2441             if ( !rblk.getAutoGen() ) { // turn off "save input" if ! on rule
2442             println("saveConsumedInput=false;");
2443             }
2444             */

2445        }
2446
2447        // if debugging, write code to mark entry to the rule
2448
if (grammar.debuggingOutput)
2449            if (grammar instanceof ParserGrammar)
2450                println("fireEnterRule(" + ruleNum + ",0);");
2451            else if (grammar instanceof LexerGrammar)
2452                println("fireEnterRule(" + ruleNum + ",_ttype);");
2453
2454        // Generate trace code if desired
2455
if (grammar.debuggingOutput || grammar.traceRules) {
2456            println("try { // debugging");
2457            tabs++;
2458        }
2459
2460        // Initialize AST variables
2461
if (grammar instanceof TreeWalkerGrammar) {
2462            // "Input" value for rule
2463
println(labeledElementASTType + " " + s.getId() + "_AST_in = (" + labeledElementASTType + ")_t;");
2464        }
2465        if (grammar.buildAST) {
2466            // Parser member used to pass AST returns from rule invocations
2467
println("returnAST = null;");
2468            // Tracks AST construction
2469
// println("ASTPair currentAST = (inputState.guessing==0) ? new ASTPair() : null;");
2470
println("ASTPair currentAST = new ASTPair();");
2471            // User-settable return value for rule.
2472
println(labeledElementASTType + " " + s.getId() + "_AST = null;");
2473        }
2474
2475        genBlockPreamble(rblk);
2476        genBlockInitAction(rblk);
2477        println("");
2478
2479        // Search for an unlabeled exception specification attached to the rule
2480
ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
2481
2482        // Generate try block around the entire rule for error handling
2483
if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) {
2484            println("try { // for error handling");
2485            tabs++;
2486        }
2487
2488        // Generate the alternatives
2489
if (rblk.alternatives.size() == 1) {
2490            // One alternative -- use simple form
2491
Alternative alt = rblk.getAlternativeAt(0);
2492            String JavaDoc pred = alt.semPred;
2493            if (pred != null)
2494                genSemPred(pred, currentRule.line);
2495            if (alt.synPred != null) {
2496                antlrTool.warning(
2497                    "Syntactic predicate ignored for single alternative",
2498                    grammar.getFilename(),
2499                    alt.synPred.getLine(),
2500                    alt.synPred.getColumn()
2501                );
2502            }
2503            genAlt(alt, rblk);
2504        }
2505        else {
2506            // Multiple alternatives -- generate complex form
2507
boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
2508
2509            JavaBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
2510            genBlockFinish(howToFinish, throwNoViable);
2511        }
2512
2513        // Generate catch phrase for error handling
2514
if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) {
2515            // Close the try block
2516
tabs--;
2517            println("}");
2518        }
2519
2520        // Generate user-defined or default catch phrases
2521
if (unlabeledUserSpec != null) {
2522            genErrorHandler(unlabeledUserSpec);
2523        }
2524        else if (rblk.getDefaultErrorHandler()) {
2525            // Generate default catch phrase
2526
println("catch (" + exceptionThrown + " ex) {");
2527            tabs++;
2528            // Generate code to handle error if not guessing
2529
if (grammar.hasSyntacticPredicate) {
2530                println("if (inputState.guessing==0) {");
2531                tabs++;
2532            }
2533            println("reportError(ex);");
2534            if (!(grammar instanceof TreeWalkerGrammar)) {
2535                // Generate code to consume until token in k==1 follow set
2536
Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
2537                String JavaDoc followSetName = getBitsetName(markBitsetForGen(follow.fset));
2538                println("consume();");
2539                println("consumeUntil(" + followSetName + ");");
2540            }
2541            else {
2542                // Just consume one token
2543
println("if (_t!=null) {_t = _t.getNextSibling();}");
2544            }
2545            if (grammar.hasSyntacticPredicate) {
2546                tabs--;
2547                // When guessing, rethrow exception
2548
println("} else {");
2549                println(" throw ex;");
2550                println("}");
2551            }
2552            // Close catch phrase
2553
tabs--;
2554            println("}");
2555        }
2556
2557        // Squirrel away the AST "return" value
2558
if (grammar.buildAST) {
2559            println("returnAST = " + s.getId() + "_AST;");
2560        }
2561
2562        // Set return tree value for tree walkers
2563
if (grammar instanceof TreeWalkerGrammar) {
2564            println("_retTree = _t;");
2565        }
2566
2567        // Generate literals test for lexer rules so marked
2568
if (rblk.getTestLiterals()) {
2569            if (s.access.equals("protected")) {
2570                genLiteralsTestForPartialToken();
2571            }
2572            else {
2573                genLiteralsTest();
2574            }
2575        }
2576
2577        // if doing a lexer rule, dump code to create token if necessary
2578
if (grammar instanceof LexerGrammar) {
2579            println("if ( _createToken && _token==null && _ttype!=Token.SKIP ) {");
2580            println(" _token = makeToken(_ttype);");
2581            println(" _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));");
2582            println("}");
2583            println("_returnToken = _token;");
2584        }
2585
2586        // Gen the return statement if there is one (lexer has hard-wired return action)
2587
if (rblk.returnAction != null) {
2588            println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";");
2589        }
2590
2591        if (grammar.debuggingOutput || grammar.traceRules) {
2592            tabs--;
2593            println("} finally { // debugging");
2594            tabs++;
2595
2596            // If debugging, generate calls to mark exit of rule
2597
if (grammar.debuggingOutput)
2598                if (grammar instanceof ParserGrammar)
2599                    println("fireExitRule(" + ruleNum + ",0);");
2600                else if (grammar instanceof LexerGrammar)
2601                    println("fireExitRule(" + ruleNum + ",_ttype);");
2602
2603            if (grammar.traceRules) {
2604                if (grammar instanceof TreeWalkerGrammar) {
2605                    println("traceOut(\"" + s.getId() + "\",_t);");
2606                }
2607                else {
2608                    println("traceOut(\"" + s.getId() + "\");");
2609                }
2610            }
2611
2612            tabs--;
2613            println("}");
2614        }
2615
2616        tabs--;
2617        println("}");
2618        println("");
2619
2620        // Restore the AST generation state
2621
genAST = savegenAST;
2622
2623        // restore char save state
2624
// saveText = oldsaveTest;
2625
}
2626
2627    private void GenRuleInvocation(RuleRefElement rr) {
2628        // dump rule name
2629
_print(rr.targetRule + "(");
2630
2631        // lexers must tell rule if it should set _returnToken
2632
if (grammar instanceof LexerGrammar) {
2633            // if labeled, could access Token, so tell rule to create
2634
if (rr.getLabel() != null) {
2635                _print("true");
2636            }
2637            else {
2638                _print("false");
2639            }
2640            if (commonExtraArgs.length() != 0 || rr.args != null) {
2641                _print(",");
2642            }
2643        }
2644
2645        // Extra arguments common to all rules for this grammar
2646
_print(commonExtraArgs);
2647        if (commonExtraArgs.length() != 0 && rr.args != null) {
2648            _print(",");
2649        }
2650
2651        // Process arguments to method, if any
2652
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
2653        if (rr.args != null) {
2654            // When not guessing, execute user arg action
2655
ActionTransInfo tInfo = new ActionTransInfo();
2656            String JavaDoc args = processActionForTreeSpecifiers(rr.args, 0, currentRule, tInfo);
2657            if (tInfo.assignToRoot || tInfo.refRuleRoot != null) {
2658                antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" +
2659                           currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn());
2660            }
2661            _print(args);
2662
2663            // Warn if the rule accepts no arguments
2664
if (rs.block.argAction == null) {
2665                antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn());
2666            }
2667        }
2668        else {
2669            // For C++, no warning if rule has parameters, because there may be default
2670
// values for all of the parameters
2671
if (rs.block.argAction != null) {
2672                antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn());
2673            }
2674        }
2675        _println(");");
2676
2677        // move down to the first child while parsing
2678
if (grammar instanceof TreeWalkerGrammar) {
2679            println("_t = _retTree;");
2680        }
2681    }
2682
2683    protected void genSemPred(String JavaDoc pred, int line) {
2684        // translate $ and # references
2685
ActionTransInfo tInfo = new ActionTransInfo();
2686        pred = processActionForTreeSpecifiers(pred, line, currentRule, tInfo);
2687        // ignore translation info...we don't need to do anything with it.
2688
String JavaDoc escapedPred = charFormatter.escapeString(pred);
2689
2690        // if debugging, wrap the semantic predicate evaluation in a method
2691
// that can tell SemanticPredicateListeners the result
2692
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)))
2693            pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING,"
2694                + addSemPred(escapedPred) + "," + pred + ")";
2695        println("if (!(" + pred + "))");
2696        println(" throw new SemanticException(\"" + escapedPred + "\");");
2697    }
2698
2699    /** Write an array of Strings which are the semantic predicate
2700     * expressions. The debugger will reference them by number only
2701     */

2702    protected void genSemPredMap() {
2703        Enumeration JavaDoc e = semPreds.elements();
2704        println("private String _semPredNames[] = {");
2705        while (e.hasMoreElements())
2706            println("\"" + e.nextElement() + "\",");
2707        println("};");
2708    }
2709
2710    protected void genSynPred(SynPredBlock blk, String JavaDoc lookaheadExpr) {
2711        if (DEBUG_CODE_GENERATOR) System.out.println("gen=>(" + blk + ")");
2712
2713        // Dump synpred result variable
2714
println("boolean synPredMatched" + blk.ID + " = false;");
2715        // Gen normal lookahead test
2716
println("if (" + lookaheadExpr + ") {");
2717        tabs++;
2718
2719        // Save input state
2720
if (grammar instanceof TreeWalkerGrammar) {
2721            println("AST __t" + blk.ID + " = _t;");
2722        }
2723        else {
2724            println("int _m" + blk.ID + " = mark();");
2725        }
2726
2727        // Once inside the try, assume synpred works unless exception caught
2728
println("synPredMatched" + blk.ID + " = true;");
2729        println("inputState.guessing++;");
2730
2731        // if debugging, tell listeners that a synpred has started
2732
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
2733            (grammar instanceof LexerGrammar))) {
2734            println("fireSyntacticPredicateStarted();");
2735        }
2736
2737        syntacticPredLevel++;
2738        println("try {");
2739        tabs++;
2740        gen((AlternativeBlock)blk); // gen code to test predicate
2741
tabs--;
2742        //println("System.out.println(\"pred "+blk+" succeeded\");");
2743
println("}");
2744        println("catch (" + exceptionThrown + " pe) {");
2745        tabs++;
2746        println("synPredMatched" + blk.ID + " = false;");
2747        //println("System.out.println(\"pred "+blk+" failed\");");
2748
tabs--;
2749        println("}");
2750
2751        // Restore input state
2752
if (grammar instanceof TreeWalkerGrammar) {
2753            println("_t = __t" + blk.ID + ";");
2754        }
2755        else {
2756            println("rewind(_m" + blk.ID + ");");
2757        }
2758
2759        println("inputState.guessing--;");
2760
2761        // if debugging, tell listeners how the synpred turned out
2762
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
2763            (grammar instanceof LexerGrammar))) {
2764            println("if (synPredMatched" + blk.ID + ")");
2765            println(" fireSyntacticPredicateSucceeded();");
2766            println("else");
2767            println(" fireSyntacticPredicateFailed();");
2768        }
2769
2770        syntacticPredLevel--;
2771        tabs--;
2772
2773        // Close lookahead test
2774
println("}");
2775
2776        // Test synred result
2777
println("if ( synPredMatched" + blk.ID + " ) {");
2778    }
2779
2780    /** Generate a static array containing the names of the tokens,
2781     * indexed by the token type values. This static array is used
2782     * to format error messages so that the token identifers or literal
2783     * strings are displayed instead of the token numbers.
2784     *
2785     * If a lexical rule has a paraphrase, use it rather than the
2786     * token label.
2787     */

2788    public void genTokenStrings() {
2789        // Generate a string for each token. This creates a static
2790
// array of Strings indexed by token type.
2791
println("");
2792        println("public static final String[] _tokenNames = {");
2793        tabs++;
2794
2795        // Walk the token vocabulary and generate a Vector of strings
2796
// from the tokens.
2797
Vector v = grammar.tokenManager.getVocabulary();
2798        for (int i = 0; i < v.size(); i++) {
2799            String JavaDoc s = (String JavaDoc)v.elementAt(i);
2800            if (s == null) {
2801                s = "<" + String.valueOf(i) + ">";
2802            }
2803            if (!s.startsWith("\"") && !s.startsWith("<")) {
2804                TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
2805                if (ts != null && ts.getParaphrase() != null) {
2806                    s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\"");
2807                }
2808            }
2809            print(charFormatter.literalString(s));
2810            if (i != v.size() - 1) {
2811                _print(",");
2812            }
2813            _println("");
2814        }
2815
2816        // Close the string array initailizer
2817
tabs--;
2818        println("};");
2819    }
2820
2821    /** Generate the token types Java file */
2822    protected void genTokenTypes(TokenManager tm) throws IOException JavaDoc {
2823        // Open the token output Java file and set the currentOutput stream
2824
// SAS: file open was moved to a method so a subclass can override
2825
// This was mainly for the VAJ interface
2826
setupOutput(tm.getName() + TokenTypesFileSuffix);
2827
2828        tabs = 0;
2829
2830        // Generate the header common to all Java files
2831
genHeader();
2832        // Do not use printAction because we assume tabs==0
2833
println(behavior.getHeaderAction(""));
2834
2835        // Encapsulate the definitions in an interface. This can be done
2836
// because they are all constants.
2837
println("public interface " + tm.getName() + TokenTypesFileSuffix + " {");
2838        tabs++;
2839
2840        // Generate a definition for each token type
2841
Vector v = tm.getVocabulary();
2842
2843        // Do special tokens manually
2844
println("int EOF = " + Token.EOF_TYPE + ";");
2845        println("int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";");
2846
2847        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
2848            String JavaDoc s = (String JavaDoc)v.elementAt(i);
2849            if (s != null) {
2850                if (s.startsWith("\"")) {
2851                    // a string literal
2852
StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
2853                    if (sl == null) {
2854                        antlrTool.panic("String literal " + s + " not in symbol table");
2855                    }
2856                    else if (sl.label != null) {
2857                        println("int " + sl.label + " = " + i + ";");
2858                    }
2859                    else {
2860                        String JavaDoc mangledName = mangleLiteral(s);
2861                        if (mangledName != null) {
2862                            // We were able to create a meaningful mangled token name
2863
println("int " + mangledName + " = " + i + ";");
2864                            // if no label specified, make the label equal to the mangled name
2865
sl.label = mangledName;
2866                        }
2867                        else {
2868                            println("// " + s + " = " + i);
2869                        }
2870                    }
2871                }
2872                else if (!s.startsWith("<")) {
2873                    println("int " + s + " = " + i + ";");
2874                }
2875            }
2876        }
2877
2878        // Close the interface
2879
tabs--;
2880        println("}");
2881
2882        // Close the tokens output file
2883
currentOutput.close();
2884        currentOutput = null;
2885        exitIfError();
2886    }
2887
2888    /** Get a string for an expression to generate creation of an AST subtree.
2889     * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
2890     */

2891    public String JavaDoc getASTCreateString(Vector v) {
2892        if (v.size() == 0) {
2893            return "";
2894        }
2895        StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
2896        buf.append("(" + labeledElementASTType +
2897                   ")astFactory.make( (new ASTArray(" + v.size() +
2898                   "))");
2899        for (int i = 0; i < v.size(); i++) {
2900            buf.append(".add(" + v.elementAt(i) + ")");
2901        }
2902        buf.append(")");
2903        return buf.toString();
2904    }
2905
2906    /** Get a string for an expression to generate creating of an AST node
2907     * @param atom The grammar node for which you are creating the node
2908     * @param str The arguments to the AST constructor
2909     */

2910    public String JavaDoc getASTCreateString(GrammarAtom atom, String JavaDoc str) {
2911        // System.out.println("ASTNodeType for "+atom+" is "+atom.getASTNodeType());
2912
if (atom != null && atom.getASTNodeType() != null) {
2913            return "new " + atom.getASTNodeType() + "(" + str + ")";
2914        }
2915        else {
2916            return "(" + labeledElementASTType + ")astFactory.create(" + str + ")";
2917        }
2918    }
2919
2920    /** Get a string for an expression to generate creating of an AST node
2921     * @param str The arguments to the AST constructor
2922     */

2923    public String JavaDoc getASTCreateString(String JavaDoc str) {
2924        // System.out.println("ASTNodeType for "+atom+" is "+atom.getASTNodeType());
2925
return "(" + labeledElementASTType + ")astFactory.create(" + str + ")";
2926    }
2927
2928    protected String JavaDoc getLookaheadTestExpression(Lookahead[] look, int k) {
2929        StringBuffer JavaDoc e = new StringBuffer JavaDoc(100);
2930        boolean first = true;
2931
2932        e.append("(");
2933        for (int i = 1; i <= k; i++) {
2934            BitSet p = look[i].fset;
2935            if (!first) {
2936                e.append(") && (");
2937            }
2938            first = false;
2939
2940            // Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
2941
// There is no way to predict what that token would be. Just
2942
// allow anything instead.
2943
if (look[i].containsEpsilon()) {
2944                e.append("true");
2945            }
2946            else {
2947                e.append(getLookaheadTestTerm(i, p));
2948            }
2949        }
2950        e.append(")");
2951
2952        return e.toString();
2953    }
2954
2955    /**Generate a lookahead test expression for an alternate. This
2956     * will be a series of tests joined by '&&' and enclosed by '()',
2957     * the number of such tests being determined by the depth of the lookahead.
2958     */

2959    protected String JavaDoc getLookaheadTestExpression(Alternative alt, int maxDepth) {
2960        int depth = alt.lookaheadDepth;
2961        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
2962            // if the decision is nondeterministic, do the best we can: LL(k)
2963
// any predicates that are around will be generated later.
2964
depth = grammar.maxk;
2965        }
2966
2967        if (maxDepth == 0) {
2968            // empty lookahead can result from alt with sem pred
2969
// that can see end of token. E.g., A : {pred}? ('a')? ;
2970
return "( true )";
2971        }
2972
2973        return "(" + getLookaheadTestExpression(alt.cache, depth) + ")";
2974    }
2975
2976    /**Generate a depth==1 lookahead test expression given the BitSet.
2977     * This may be one of:
2978     * 1) a series of 'x==X||' tests
2979     * 2) a range test using >= && <= where possible,
2980     * 3) a bitset membership test for complex comparisons
2981     * @param k The lookahead level
2982     * @param p The lookahead set for level k
2983     */

2984    protected String JavaDoc getLookaheadTestTerm(int k, BitSet p) {
2985        // Determine the name of the item to be compared
2986
String JavaDoc ts = lookaheadString(k);
2987
2988        // Generate a range expression if possible
2989
int[] elems = p.toArray();
2990        if (elementsAreRange(elems)) {
2991            return getRangeExpression(k, elems);
2992        }
2993
2994        // Generate a bitset membership test if possible
2995
StringBuffer JavaDoc e;
2996        int degree = p.degree();
2997        if (degree == 0) {
2998            return "true";
2999        }
3000
3001        if (degree >= bitsetTestThreshold) {
3002            int bitsetIdx = markBitsetForGen(p);
3003            return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
3004        }
3005
3006        // Otherwise, generate the long-winded series of "x==X||" tests
3007
e = new StringBuffer JavaDoc();
3008        for (int i = 0; i < elems.length; i++) {
3009            // Get the compared-to item (token or character value)
3010
String JavaDoc cs = getValueString(elems[i]);
3011
3012            // Generate the element comparison
3013
if (i > 0) e.append("||");
3014            e.append(ts);
3015            e.append("==");
3016            e.append(cs);
3017        }
3018        return e.toString();
3019    }
3020
3021    /** Return an expression for testing a contiguous renage of elements
3022     * @param k The lookahead level
3023     * @param elems The elements representing the set, usually from BitSet.toArray().
3024     * @return String containing test expression.
3025     */

3026    public String JavaDoc getRangeExpression(int k, int[] elems) {
3027        if (!elementsAreRange(elems)) {
3028            antlrTool.panic("getRangeExpression called with non-range");
3029        }
3030        int begin = elems[0];
3031        int end = elems[elems.length - 1];
3032        return
3033            "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " +
3034            lookaheadString(k) + " <= " + getValueString(end) + ")";
3035    }
3036
3037    /** getValueString: get a string representation of a token or char value
3038     * @param value The token or char value
3039     */

3040    private String JavaDoc getValueString(int value) {
3041        String JavaDoc cs;
3042        if (grammar instanceof LexerGrammar) {
3043            cs = charFormatter.literalChar(value);
3044        }
3045        else {
3046            TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value);
3047            if (ts == null) {
3048                return "" + value; // return token type as string
3049
// tool.panic("vocabulary for token type " + value + " is null");
3050
}
3051            String JavaDoc tId = ts.getId();
3052            if (ts instanceof StringLiteralSymbol) {
3053                // if string literal, use predefined label if any
3054
// if no predefined, try to mangle into LITERAL_xxx.
3055
// if can't mangle, use int value as last resort
3056
StringLiteralSymbol sl = (StringLiteralSymbol)ts;
3057                String JavaDoc label = sl.getLabel();
3058                if (label != null) {
3059                    cs = label;
3060                }
3061                else {
3062                    cs = mangleLiteral(tId);
3063                    if (cs == null) {
3064                        cs = String.valueOf(value);
3065                    }
3066                }
3067            }
3068            else {
3069                cs = tId;
3070            }
3071        }
3072        return cs;
3073    }
3074
3075    /**Is the lookahead for this alt empty? */
3076    protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
3077        int depth = alt.lookaheadDepth;
3078        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
3079            depth = grammar.maxk;
3080        }
3081        for (int i = 1; i <= depth && i <= maxDepth; i++) {
3082            BitSet p = alt.cache[i].fset;
3083            if (p.degree() != 0) {
3084                return false;
3085            }
3086        }
3087        return true;
3088    }
3089
3090    private String JavaDoc lookaheadString(int k) {
3091        if (grammar instanceof TreeWalkerGrammar) {
3092            return "_t.getType()";
3093        }
3094        return "LA(" + k + ")";
3095    }
3096
3097    /** Mangle a string literal into a meaningful token name. This is
3098     * only possible for literals that are all characters. The resulting
3099     * mangled literal name is literalsPrefix with the text of the literal
3100     * appended.
3101     * @return A string representing the mangled literal, or null if not possible.
3102     */

3103    private String JavaDoc mangleLiteral(String JavaDoc s) {
3104        String JavaDoc mangled = antlrTool.literalsPrefix;
3105        for (int i = 1; i < s.length() - 1; i++) {
3106            if (!Character.isLetter(s.charAt(i)) &&
3107                s.charAt(i) != '_') {
3108                return null;
3109            }
3110            mangled += s.charAt(i);
3111        }
3112        if (antlrTool.upperCaseMangledLiterals) {
3113            mangled = mangled.toUpperCase();
3114        }
3115        return mangled;
3116    }
3117
3118    /** Map an identifier to it's corresponding tree-node variable.
3119     * This is context-sensitive, depending on the rule and alternative
3120     * being generated
3121     * @param idParam The identifier name to map
3122     * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
3123     */

3124    public String JavaDoc mapTreeId(String JavaDoc idParam, ActionTransInfo transInfo) {
3125        // if not in an action of a rule, nothing to map.
3126
if (currentRule == null) return idParam;
3127
3128        boolean in_var = false;
3129        String JavaDoc id = idParam;
3130        if (grammar instanceof TreeWalkerGrammar) {
3131            if (!grammar.buildAST) {
3132                in_var = true;
3133            }
3134            // If the id ends with "_in", then map it to the input variable
3135
else if (id.length() > 3 && id.lastIndexOf("_in") == id.length() - 3) {
3136                // Strip off the "_in"
3137
id = id.substring(0, id.length() - 3);
3138                in_var = true;
3139            }
3140        }
3141
3142        // Check the rule labels. If id is a label, then the output
3143
// variable is label_AST, and the input variable is plain label.
3144
for (int i = 0; i < currentRule.labeledElements.size(); i++) {
3145            AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
3146            if (elt.getLabel().equals(id)) {
3147                return in_var ? id : id + "_AST";
3148            }
3149        }
3150
3151        // Failing that, check the id-to-variable map for the alternative.
3152
// If the id is in the map, then output variable is the name in the
3153
// map, and input variable is name_in
3154
String JavaDoc s = (String JavaDoc)treeVariableMap.get(id);
3155        if (s != null) {
3156            if (s == NONUNIQUE) {
3157                // There is more than one element with this id
3158
return null;
3159            }
3160            else if (s.equals(currentRule.getRuleName())) {
3161                // a recursive call to the enclosing rule is
3162
// ambiguous with the rule itself.
3163
return null;
3164            }
3165            else {
3166                return in_var ? s + "_in" : s;
3167            }
3168        }
3169
3170        // Failing that, check the rule name itself. Output variable
3171
// is rule_AST; input variable is rule_AST_in (treeparsers).
3172
if (id.equals(currentRule.getRuleName())) {
3173            String JavaDoc r = in_var ? id + "_AST_in" : id + "_AST";
3174            if (transInfo != null) {
3175                if (!in_var) {
3176                    transInfo.refRuleRoot = r;
3177                }
3178            }
3179            return r;
3180        }
3181        else {
3182            // id does not map to anything -- return itself.
3183
return id;
3184        }
3185    }
3186
3187    /** Given an element and the name of an associated AST variable,
3188     * create a mapping between the element "name" and the variable name.
3189     */

3190    private void mapTreeVariable(AlternativeElement e, String JavaDoc name) {
3191        // For tree elements, defer to the root
3192
if (e instanceof TreeElement) {
3193            mapTreeVariable(((TreeElement)e).root, name);
3194            return;
3195        }
3196
3197        // Determine the name of the element, if any, for mapping purposes
3198
String JavaDoc elName = null;
3199
3200        // Don't map labeled items
3201
if (e.getLabel() == null) {
3202            if (e instanceof TokenRefElement) {
3203                // use the token id
3204
elName = ((TokenRefElement)e).atomText;
3205            }
3206            else if (e instanceof RuleRefElement) {
3207                // use the rule name
3208
elName = ((RuleRefElement)e).targetRule;
3209            }
3210        }
3211        // Add the element to the tree variable map if it has a name
3212
if (elName != null) {
3213            if (treeVariableMap.get(elName) != null) {
3214                // Name is already in the map -- mark it as duplicate
3215
treeVariableMap.remove(elName);
3216                treeVariableMap.put(elName, NONUNIQUE);
3217            }
3218            else {
3219                treeVariableMap.put(elName, name);
3220            }
3221        }
3222    }
3223
3224    /** Lexically process tree-specifiers in the action.
3225     * This will replace #id and #(...) with the appropriate
3226     * function calls and/or variables.
3227     */

3228    protected String JavaDoc processActionForTreeSpecifiers(String JavaDoc actionStr,
3229                                                    int line,
3230                                                    RuleBlock currentRule,
3231                                                    ActionTransInfo tInfo) {
3232        if (actionStr == null || actionStr.length() == 0) return null;
3233
3234        // The action trans info tells us (at the moment) whether an
3235
// assignment was done to the rule's tree root.
3236
if (grammar == null)
3237            return actionStr;
3238
3239        // see if we have anything to do...
3240
if ((grammar.buildAST && actionStr.indexOf('#') != -1) ||
3241            grammar instanceof TreeWalkerGrammar ||
3242            ((grammar instanceof LexerGrammar ||
3243            grammar instanceof ParserGrammar)
3244            && actionStr.indexOf('$') != -1)) {
3245            // Create a lexer to read an action and return the translated version
3246
antlr.actions.java.ActionLexer lexer = new antlr.actions.java.ActionLexer(actionStr, currentRule, this, tInfo);
3247
3248            lexer.setLineOffset(line);
3249            lexer.setFilename(grammar.getFilename());
3250            lexer.setTool(antlrTool);
3251
3252            try {
3253                lexer.mACTION(true);
3254                actionStr = lexer.getTokenObject().getText();
3255                // System.out.println("action translated: "+actionStr);
3256
// System.out.println("trans info is "+tInfo);
3257
}
3258            catch (RecognitionException ex) {
3259                lexer.reportError(ex);
3260                return actionStr;
3261            }
3262            catch (TokenStreamException tex) {
3263                antlrTool.panic("Error reading action:" + actionStr);
3264                return actionStr;
3265            }
3266            catch (CharStreamException io) {
3267                antlrTool.panic("Error reading action:" + actionStr);
3268                return actionStr;
3269            }
3270        }
3271        return actionStr;
3272    }
3273
3274    private void setupGrammarParameters(Grammar g) {
3275        if (g instanceof ParserGrammar) {
3276            labeledElementASTType = "AST";
3277            if (g.hasOption("ASTLabelType")) {
3278                Token tsuffix = g.getOption("ASTLabelType");
3279                if (tsuffix != null) {
3280                    String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
3281                    if (suffix != null) {
3282                        labeledElementASTType = suffix;
3283                    }
3284                }
3285            }
3286            labeledElementType = "Token ";
3287            labeledElementInit = "null";
3288            commonExtraArgs = "";
3289            commonExtraParams = "";
3290            commonLocalVars = "";
3291            lt1Value = "LT(1)";
3292            exceptionThrown = "RecognitionException";
3293            throwNoViable = "throw new NoViableAltException(LT(1), getFilename());";
3294        }
3295        else if (g instanceof LexerGrammar) {
3296            labeledElementType = "char ";
3297            labeledElementInit = "'\\0'";
3298            commonExtraArgs = "";
3299            commonExtraParams = "boolean _createToken";
3300            commonLocalVars = "int _ttype; Token _token=null; int _begin=text.length();";
3301            lt1Value = "LA(1)";
3302            exceptionThrown = "RecognitionException";
3303            throwNoViable = "throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());";
3304        }
3305        else if (g instanceof TreeWalkerGrammar) {
3306            labeledElementASTType = "AST";
3307            labeledElementType = "AST";
3308            if (g.hasOption("ASTLabelType")) {
3309                Token tsuffix = g.getOption("ASTLabelType");
3310                if (tsuffix != null) {
3311                    String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
3312                    if (suffix != null) {
3313                        labeledElementASTType = suffix;
3314                        labeledElementType = suffix;
3315                    }
3316                }
3317            }
3318            if (!g.hasOption("ASTLabelType")) {
3319                g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL, "AST"));
3320            }
3321            labeledElementInit = "null";
3322            commonExtraArgs = "_t";
3323            commonExtraParams = "AST _t";
3324            commonLocalVars = "";
3325            lt1Value = "(" + labeledElementASTType + ")_t";
3326            exceptionThrown = "RecognitionException";
3327            throwNoViable = "throw new NoViableAltException(_t);";
3328        }
3329        else {
3330            antlrTool.panic("Unknown grammar type");
3331        }
3332    }
3333
3334    /** This method exists so a subclass, namely VAJCodeGenerator,
3335     * can open the file in its own evil way. JavaCodeGenerator
3336     * simply opens a text file...
3337     */

3338    public void setupOutput(String JavaDoc className) throws IOException JavaDoc {
3339        currentOutput = antlrTool.openOutputFile(className + ".java");
3340    }
3341}
3342
Popular Tags