KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > persistence > antlr > CppCodeGenerator


1 package persistence.antlr;
2
3 /* ANTLR Translator Generator
4  * Project led by Terence Parr at http://www.jGuru.com
5  * Software rights: http://www.antlr.org/license.html
6  *
7  */

8
9 // C++ code generator by Pete Wells: pete@yamuna.demon.co.uk
10
// #line generation contributed by: Ric Klaren <klaren@cs.utwente.nl>
11

12 import java.util.Enumeration JavaDoc;
13 import java.util.Hashtable JavaDoc;
14 import persistence.antlr.collections.impl.BitSet;
15 import persistence.antlr.collections.impl.Vector;
16 import java.io.PrintWriter JavaDoc; //SAS: changed for proper text file io
17
import java.io.IOException JavaDoc;
18 import java.io.FileWriter JavaDoc;
19
20 /** Generate MyParser.cpp, MyParser.hpp, MyLexer.cpp, MyLexer.hpp
21  * and MyParserTokenTypes.hpp
22  */

23 public class CppCodeGenerator extends CodeGenerator {
24     boolean DEBUG_CPP_CODE_GENERATOR = false;
25     // non-zero if inside syntactic predicate generation
26
protected int syntacticPredLevel = 0;
27
28     // Are we generating ASTs (for parsers and tree parsers) right now?
29
protected boolean genAST = false;
30
31     // Are we saving the text consumed (for lexers) right now?
32
protected boolean saveText = false;
33
34     // Generate #line's
35
protected boolean genHashLines = true;
36     // Generate constructors or not
37
protected boolean noConstructors = false;
38
39     // Used to keep track of lineno in output
40
protected int outputLine;
41     protected String JavaDoc outputFile;
42
43     // Grammar parameters set up to handle different grammar classes.
44
// These are used to get instanceof tests out of code generation
45
boolean usingCustomAST = false;
46     String JavaDoc labeledElementType;
47     String JavaDoc labeledElementASTType; // mostly the same as labeledElementType except in parsers
48
String JavaDoc labeledElementASTInit;
49     String JavaDoc labeledElementInit;
50     String JavaDoc commonExtraArgs;
51     String JavaDoc commonExtraParams;
52     String JavaDoc commonLocalVars;
53     String JavaDoc lt1Value;
54     String JavaDoc exceptionThrown;
55     String JavaDoc throwNoViable;
56
57     // Tracks the rule being generated. Used for mapTreeId
58
RuleBlock currentRule;
59     // Tracks the rule or labeled subrule being generated. Used for AST generation.
60
String JavaDoc currentASTResult;
61     // Mapping between the ids used in the current alt, and the
62
// names of variables used to represent their AST values.
63
Hashtable JavaDoc treeVariableMap = new Hashtable JavaDoc();
64
65     /** Used to keep track of which AST variables have been defined in a rule
66      * (except for the #rule_name and #rule_name_in var's
67      */

68     Hashtable JavaDoc declaredASTVariables = new Hashtable JavaDoc();
69
70     // Count of unnamed generated variables
71
int astVarNumber = 1;
72     // Special value used to mark duplicate in treeVariableMap
73
protected static final String JavaDoc NONUNIQUE = new String JavaDoc();
74
75     public static final int caseSizeThreshold = 127; // ascii is max
76

77     private Vector semPreds;
78
79     // Used to keep track of which (heterogeneous AST types are used)
80
// which need to be set in the ASTFactory of the generated parser
81
private Vector astTypes;
82
83     private static String JavaDoc namespaceStd = "ANTLR_USE_NAMESPACE(std)";
84     private static String JavaDoc namespaceAntlr = "ANTLR_USE_NAMESPACE(antlr)";
85     private static NameSpace nameSpace = null;
86
87     private static final String JavaDoc preIncludeCpp = "pre_include_cpp";
88     private static final String JavaDoc preIncludeHpp = "pre_include_hpp";
89     private static final String JavaDoc postIncludeCpp = "post_include_cpp";
90     private static final String JavaDoc postIncludeHpp = "post_include_hpp";
91
92     /** Create a C++ code-generator using the given Grammar.
93      * The caller must still call setTool, setBehavior, and setAnalyzer
94      * before generating code.
95      */

96     public CppCodeGenerator() {
97         super();
98         charFormatter = new CppCharFormatter();
99     }
100     /** Adds a semantic predicate string to the sem pred vector
101         These strings will be used to build an array of sem pred names
102         when building a debugging parser. This method should only be
103         called when the debug option is specified
104      */

105     protected int addSemPred(String JavaDoc predicate) {
106         semPreds.appendElement(predicate);
107         return semPreds.size()-1;
108     }
109     public void exitIfError()
110     {
111         if (antlrTool.hasError())
112         {
113             antlrTool.fatalError("Exiting due to errors.");
114         }
115     }
116     protected int countLines( String JavaDoc s )
117     {
118         int lines = 0;
119         for( int i = 0; i < s.length(); i++ )
120         {
121             if( s.charAt(i) == '\n' )
122                 lines++;
123         }
124         return lines;
125     }
126     /** Output a String to the currentOutput stream.
127      * Ignored if string is null.
128      * @param s The string to output
129      */

130     protected void _print(String JavaDoc s)
131     {
132         if (s != null)
133         {
134             outputLine += countLines(s);
135             currentOutput.print(s);
136         }
137     }
138     /** Print an action without leading tabs, attempting to
139      * preserve the current indentation level for multi-line actions
140      * Ignored if string is null.
141      * @param s The action string to output
142      */

143     protected void _printAction(String JavaDoc s)
144     {
145         if (s != null)
146         {
147             outputLine += countLines(s)+1;
148             super._printAction(s);
149         }
150     }
151     /** Print an action stored in a token surrounded by #line stuff */
152     public void printAction(Token t)
153     {
154         if (t != null)
155         {
156             genLineNo(t.getLine());
157             printTabs();
158             _printAction(processActionForSpecialSymbols(t.getText(), t.getLine(),
159                                                                 null, null) );
160             genLineNo2();
161         }
162     }
163     /** Print a header action by #line stuff also process any tree construction
164      * @param name The name of the header part
165      */

166     public void printHeaderAction(String JavaDoc name)
167     {
168         Token a = (persistence.antlr.Token)behavior.headerActions.get(name);
169         if (a != null)
170         {
171             genLineNo(a.getLine());
172             println(processActionForSpecialSymbols(a.getText(), a.getLine(),
173                                                                 null, null) );
174             genLineNo2();
175         }
176     }
177     /** Output a String followed by newline, to the currentOutput stream.
178      * Ignored if string is null.
179      * @param s The string to output
180      */

181     protected void _println(String JavaDoc s) {
182         if (s != null) {
183             outputLine += countLines(s)+1;
184             currentOutput.println(s);
185         }
186     }
187     /** Output tab indent followed by a String followed by newline,
188      * to the currentOutput stream. Ignored if string is null.
189      * @param s The string to output
190      */

191     protected void println(String JavaDoc s) {
192         if (s != null) {
193             printTabs();
194             outputLine += countLines(s)+1;
195             currentOutput.println(s);
196         }
197     }
198
199     /** Generate a #line or // line depending on options */
200     public void genLineNo(int line) {
201         if ( line == 0 ) {
202             line++;
203         }
204         if( genHashLines )
205             _println("#line "+line+" \""+antlrTool.fileMinusPath(antlrTool.grammarFile)+"\"");
206     }
207
208     /** Generate a #line or // line depending on options */
209     public void genLineNo(GrammarElement el)
210     {
211         if( el != null )
212             genLineNo(el.getLine());
213     }
214     /** Generate a #line or // line depending on options */
215     public void genLineNo(Token t)
216     {
217         if (t != null)
218             genLineNo(t.getLine());
219     }
220     /** Generate a #line or // line depending on options */
221     public void genLineNo2()
222     {
223         if( genHashLines )
224         {
225             _println("#line "+(outputLine+1)+" \""+outputFile+"\"");
226         }
227     }
228     /** Sanitize a string coming from antlr's lexer to something that's ok
229      * Also bomb out on multibyte char attempts.
230      * The bombing out on mb char's is a bit crude but alas.
231      */

232     private String JavaDoc convertJavaToCppString( String JavaDoc s )
233     {
234         String JavaDoc ret = new String JavaDoc();
235
236         int i = 0;
237         int val;
238         while ( i < s.length() )
239         {
240             if( s.charAt(i) == '\\' )
241             {
242                 // deal with escaped junk
243
switch ( s.charAt(i+1) ) {
244                 case 'b' :
245                 case 'r' :
246                 case 't' :
247                 case 'n' :
248                 case 'f' :
249                 case '"' :
250                 case '\'' :
251                 case '\\' :
252                     ret += "\\"+s.charAt(i+1);
253                     i+=2;
254                     continue;
255
256                 case 'u' :
257                     // Unicode char \u1234
258
val = Character.digit(s.charAt(i+2), 16) * 16 * 16 * 16 +
259                         Character.digit(s.charAt(i+3), 16) * 16 * 16 +
260                         Character.digit(s.charAt(i+4), 16) * 16 +
261                         Character.digit(s.charAt(i+5), 16);
262                     i += 6;
263                     break;
264
265                 case '0' : // \123
266
case '1' :
267                 case '2' :
268                 case '3' :
269                     if( Character.isDigit(s.charAt(i+2)) )
270                     {
271                         if( Character.isDigit(s.charAt(i+3)) )
272                         {
273                             val = (s.charAt(i+1)-'0')*8*8 + (s.charAt(i+2)-'0')*8 +
274                                 (s.charAt(i+3)-'0');
275                             i += 4;
276                         }
277                         else
278                         {
279                             val = (s.charAt(i+1)-'0')*8 + (s.charAt(i+2)-'0');
280                             i += 3;
281                         }
282                     }
283                     else
284                     {
285                         val = s.charAt(i+1)-'0';
286                         i += 2;
287                     }
288                     break;
289
290                 case '4' :
291                 case '5' :
292                 case '6' :
293                 case '7' :
294                     if ( Character.isDigit(s.charAt(i+2)) )
295                     {
296                         val = (s.charAt(i+1)-'0')*8 + (s.charAt(i+2)-'0');
297                         i += 3;
298                     }
299                     else
300                     {
301                         val = s.charAt(i+1)-'0';
302                         i += 2;
303                     }
304                 default:
305                     antlrTool.error("Unhandled escape in string constant: '"+s+"'");
306                     val = 0;
307                 }
308                 if( val >= ' ' && val <= 126 ) // just concat if printable
309
ret += (char)val;
310                 else if( val > 255 ) // abort if multibyte
311
antlrTool.error("Multibyte character found in string constant: '"+s+"'");
312                 else
313                     ret += charFormatter.escapeChar(val,true);
314             }
315             else if( s.charAt(i) >= ' ' && s.charAt(i) <= 126 )
316                 ret += s.charAt(i++);
317             else
318                 ret += charFormatter.escapeChar(s.charAt(i++),true);
319         }
320         // System.out.println("convertJavaToCppString: "+s+" -> "+ret);
321
return ret;
322     }
323     /** Generate the parser, lexer, treeparser, and token types in C++
324      */

325     public void gen() {
326         // Do the code generation
327
try {
328             // Loop over all grammars
329
Enumeration JavaDoc grammarIter = behavior.grammars.elements();
330             while (grammarIter.hasMoreElements()) {
331                 Grammar g = (Grammar)grammarIter.nextElement();
332                 if ( g.debuggingOutput ) {
333                     antlrTool.error(g.getFilename()+": C++ mode does not support -debug");
334                 }
335                 // Connect all the components to each other
336
g.setGrammarAnalyzer(analyzer);
337                 g.setCodeGenerator(this);
338                 analyzer.setGrammar(g);
339                 // To get right overloading behavior across hetrogeneous grammars
340
setupGrammarParameters(g);
341                 g.generate();
342                 exitIfError();
343             }
344
345             // Loop over all token managers (some of which are lexers)
346
Enumeration JavaDoc tmIter = behavior.tokenManagers.elements();
347             while (tmIter.hasMoreElements()) {
348                 TokenManager tm = (TokenManager)tmIter.nextElement();
349                 if (!tm.isReadOnly()) {
350                     // Write the token manager tokens as C++
351
// this must appear before genTokenInterchange so that
352
// labels are set on string literals
353
genTokenTypes(tm);
354                     // Write the token manager tokens as plain text
355
genTokenInterchange(tm);
356                 }
357                 exitIfError();
358             }
359         }
360         catch (IOException JavaDoc e) {
361             antlrTool.reportException(e, null);
362         }
363     }
364     /** Generate code for the given grammar element.
365      * @param blk The {...} action to generate
366      */

367     public void gen(ActionElement action) {
368         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genAction("+action+")");
369         if ( action.isSemPred ) {
370             genSemPred(action.actionText, action.line);
371         }
372         else {
373             if ( grammar.hasSyntacticPredicate ) {
374                 println("if ( inputState->guessing==0 ) {");
375                 tabs++;
376             }
377
378             ActionTransInfo tInfo = new ActionTransInfo();
379             String JavaDoc actionStr = processActionForSpecialSymbols(action.actionText,
380                                                                               action.getLine(),
381                                                                               currentRule, tInfo);
382
383             if ( tInfo.refRuleRoot!=null ) {
384                 // Somebody referenced "#rule", make sure translated var is valid
385
// assignment to #rule is left as a ref also, meaning that assignments
386
// with no other refs like "#rule = foo();" still forces this code to be
387
// generated (unnecessarily).
388
println(tInfo.refRuleRoot + " = "+labeledElementASTType+"(currentAST.root);");
389             }
390
391             // dump the translated action
392
genLineNo(action);
393             printAction(actionStr);
394             genLineNo2();
395
396             if ( tInfo.assignToRoot ) {
397                 // Somebody did a "#rule=", reset internal currentAST.root
398
println("currentAST.root = "+tInfo.refRuleRoot+";");
399                 // reset the child pointer too to be last sibling in sibling list
400
// now use if else in stead of x ? y : z to shut CC 4.2 up.
401
println("if ( "+tInfo.refRuleRoot+"!="+labeledElementASTInit+" &&");
402                 tabs++;
403                 println(tInfo.refRuleRoot+"->getFirstChild() != "+labeledElementASTInit+" )");
404                 println(" currentAST.child = "+tInfo.refRuleRoot+"->getFirstChild();");
405                 tabs--;
406                 println("else");
407                 tabs++;
408                 println("currentAST.child = "+tInfo.refRuleRoot+";");
409                 tabs--;
410                 println("currentAST.advanceChildToEnd();");
411             }
412
413             if ( grammar.hasSyntacticPredicate ) {
414                 tabs--;
415                 println("}");
416             }
417         }
418     }
419
420     /** Generate code for the given grammar element.
421      * @param blk The "x|y|z|..." block to generate
422      */

423     public void gen(AlternativeBlock blk) {
424         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen("+blk+")");
425         println("{");
426         genBlockPreamble(blk);
427         genBlockInitAction(blk);
428
429         // Tell AST generation to build subrule result
430
String JavaDoc saveCurrentASTResult = currentASTResult;
431         if (blk.getLabel() != null) {
432             currentASTResult = blk.getLabel();
433         }
434
435         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
436
437         CppBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
438         genBlockFinish(howToFinish, throwNoViable);
439
440         println("}");
441
442         // Restore previous AST generation
443
currentASTResult = saveCurrentASTResult;
444     }
445     /** Generate code for the given grammar element.
446      * @param blk The block-end element to generate. Block-end
447      * elements are synthesized by the grammar parser to represent
448      * the end of a block.
449      */

450     public void gen(BlockEndElement end) {
451         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")");
452     }
453     /** Generate code for the given grammar element.
454      * @param blk The character literal reference to generate
455      */

456     public void gen(CharLiteralElement atom) {
457         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
458             System.out.println("genChar("+atom+")");
459
460         if ( atom.getLabel()!=null ) {
461             println(atom.getLabel() + " = " + lt1Value + ";");
462         }
463
464         boolean oldsaveText = saveText;
465         saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
466         genMatch(atom);
467         saveText = oldsaveText;
468     }
469     /** Generate code for the given grammar element.
470      * @param blk The character-range reference to generate
471      */

472     public void gen(CharRangeElement r) {
473         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
474             System.out.println("genCharRangeElement("+r.beginText+".."+r.endText+")");
475
476         if ( r.getLabel()!=null && syntacticPredLevel == 0) {
477             println(r.getLabel() + " = " + lt1Value + ";");
478         }
479         // Correctly take care of saveIndex stuff...
480
boolean save = ( grammar instanceof LexerGrammar &&
481                               ( !saveText ||
482                                  r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG )
483                            );
484         if (save)
485          println("_saveIndex=text.length();");
486
487         if( grammar instanceof LexerGrammar )
488             println("matchRange("+convertJavaToCppString(r.beginText)+","+convertJavaToCppString(r.endText)+");");
489         else
490             println("matchRange("+r.beginText+","+r.endText+");");
491
492         if (save)
493          println("text.setLength(_saveIndex);");
494     }
495     /** Generate the lexer C++ files */
496     public void gen(LexerGrammar g) throws IOException JavaDoc {
497         // If debugging, create a new sempred vector for this grammar
498
if (g.debuggingOutput)
499             semPreds = new Vector();
500
501         if( g.charVocabulary.size() > 256 )
502             antlrTool.warning(g.getFilename()+": C++ mode does not support more than 8 bit characters (vocabulary size now: "+g.charVocabulary.size()+")");
503
504         setGrammar(g);
505         if (!(grammar instanceof LexerGrammar)) {
506             antlrTool.panic("Internal error generating lexer");
507         }
508
509         genBody(g);
510         genInclude(g);
511     }
512     /** Generate code for the given grammar element.
513      * @param blk The (...)+ block to generate
514      */

515     public void gen(OneOrMoreBlock blk) {
516         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen+("+blk+")");
517         String JavaDoc label;
518         String JavaDoc cnt;
519         println("{ // ( ... )+");
520         genBlockPreamble(blk);
521         if ( blk.getLabel() != null ) {
522             cnt = "_cnt_"+blk.getLabel();
523         }
524         else {
525             cnt = "_cnt" + blk.ID;
526         }
527         println("int "+cnt+"=0;");
528         if ( blk.getLabel() != null ) {
529             label = blk.getLabel();
530         }
531         else {
532             label = "_loop" + blk.ID;
533         }
534
535         println("for (;;) {");
536         tabs++;
537         // generate the init action for ()+ ()* inside the loop
538
// this allows us to do usefull EOF checking...
539
genBlockInitAction(blk);
540
541         // Tell AST generation to build subrule result
542
String JavaDoc saveCurrentASTResult = currentASTResult;
543         if (blk.getLabel() != null) {
544             currentASTResult = blk.getLabel();
545         }
546
547         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
548
549         // generate exit test if greedy set to false
550
// and an alt is ambiguous with exit branch
551
// or when lookahead derived purely from end-of-file
552
// Lookahead analysis stops when end-of-file is hit,
553
// returning set {epsilon}. Since {epsilon} is not
554
// ambig with any real tokens, no error is reported
555
// by deterministic() routines and we have to check
556
// for the case where the lookahead depth didn't get
557
// set to NONDETERMINISTIC (this only happens when the
558
// FOLLOW contains real atoms + epsilon).
559
boolean generateNonGreedyExitPath = false;
560         int nonGreedyExitDepth = grammar.maxk;
561
562         if ( !blk.greedy &&
563              blk.exitLookaheadDepth<=grammar.maxk &&
564              blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
565         {
566             generateNonGreedyExitPath = true;
567             nonGreedyExitDepth = blk.exitLookaheadDepth;
568         }
569         else if ( !blk.greedy &&
570                   blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
571         {
572             generateNonGreedyExitPath = true;
573         }
574
575         // generate exit test if greedy set to false
576
// and an alt is ambiguous with exit branch
577
if ( generateNonGreedyExitPath ) {
578             if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) {
579                 System.out.println("nongreedy (...)+ loop; exit depth is "+
580                                    blk.exitLookaheadDepth);
581             }
582             String JavaDoc predictExit =
583                 getLookaheadTestExpression(blk.exitCache,
584                                            nonGreedyExitDepth);
585             println("// nongreedy exit test");
586             println("if ( "+cnt+">=1 && "+predictExit+") goto "+label+";");
587         }
588
589         CppBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
590         genBlockFinish(
591             howToFinish,
592             "if ( "+cnt+">=1 ) { goto "+label+"; } else {" + throwNoViable + "}"
593         );
594
595         println(cnt+"++;");
596         tabs--;
597         println("}");
598         println(label+":;");
599         println("} // ( ... )+");
600
601         // Restore previous AST generation
602
currentASTResult = saveCurrentASTResult;
603     }
604     /** Generate the parser C++ file */
605     public void gen(ParserGrammar g) throws IOException JavaDoc {
606
607         // if debugging, set up a new vector to keep track of sempred
608
// strings for this grammar
609
if (g.debuggingOutput)
610             semPreds = new Vector();
611
612         setGrammar(g);
613         if (!(grammar instanceof ParserGrammar)) {
614             antlrTool.panic("Internal error generating parser");
615         }
616
617         genBody(g);
618         genInclude(g);
619     }
620     /** Generate code for the given grammar element.
621      * @param blk The rule-reference to generate
622      */

623     public void gen(RuleRefElement rr)
624     {
625         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRR("+rr+")");
626         RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
627         if (rs == null || !rs.isDefined())
628         {
629             // Is this redundant???
630
antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn());
631             return;
632         }
633         if (!(rs instanceof RuleSymbol))
634         {
635             // Is this redundant???
636
antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn());
637             return;
638         }
639
640         genErrorTryForElement(rr);
641
642         // AST value for labeled rule refs in tree walker.
643
// This is not AST construction; it is just the input tree node value.
644
if ( grammar instanceof TreeWalkerGrammar &&
645             rr.getLabel() != null &&
646             syntacticPredLevel == 0 )
647         {
648             println(rr.getLabel() + " = (_t == ASTNULL) ? "+labeledElementASTInit+" : "+lt1Value+";");
649         }
650
651         // if in lexer and ! on rule ref or alt or rule, save buffer index to
652
// kill later
653
if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) )
654         {
655             println("_saveIndex = text.length();");
656         }
657
658         // Process return value assignment if any
659
printTabs();
660         if (rr.idAssign != null)
661         {
662             // Warn if the rule has no return type
663
if (rs.block.returnAction == null)
664             {
665                 antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn());
666             }
667             _print(rr.idAssign + "=");
668         } else {
669             // Warn about return value if any, but not inside syntactic predicate
670
if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
671             {
672                 antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn());
673             }
674         }
675
676         // Call the rule
677
GenRuleInvocation(rr);
678
679         // if in lexer and ! on element or alt or rule, save buffer index to kill later
680
if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
681             println("text.erase(_saveIndex);");
682         }
683
684         // if not in a syntactic predicate
685
if (syntacticPredLevel == 0)
686         {
687             boolean doNoGuessTest = (
688                 grammar.hasSyntacticPredicate &&
689                 (
690                     grammar.buildAST && rr.getLabel() != null ||
691                     (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
692                 )
693             );
694
695             if (doNoGuessTest) {
696                 println("if (inputState->guessing==0) {");
697                 tabs++;
698             }
699
700             if (grammar.buildAST && rr.getLabel() != null)
701             {
702                 // always gen variable for rule return on labeled rules
703
// RK: hmm do I know here if the returnAST needs a cast ?
704
println(rr.getLabel() + "_AST = returnAST;");
705             }
706
707             if (genAST)
708             {
709                 switch (rr.getAutoGenType())
710                 {
711                 case GrammarElement.AUTO_GEN_NONE:
712                     if( usingCustomAST )
713                         println("astFactory->addASTChild(currentAST, "+namespaceAntlr+"RefAST(returnAST));");
714                     else
715                         println("astFactory->addASTChild( currentAST, returnAST );");
716                     break;
717                 case GrammarElement.AUTO_GEN_CARET:
718                     // FIXME: RK: I'm not so sure this should be an error..
719
// I think it might actually work and be usefull at times.
720
antlrTool.error("Internal: encountered ^ after rule reference");
721                     break;
722                 default:
723                     break;
724                 }
725             }
726
727             // if a lexer and labeled, Token label defined at rule level, just set it here
728
if ( grammar instanceof LexerGrammar && rr.getLabel() != null )
729             {
730                 println(rr.getLabel()+"=_returnToken;");
731             }
732
733             if (doNoGuessTest)
734             {
735                 tabs--;
736                 println("}");
737             }
738         }
739         genErrorCatchForElement(rr);
740     }
741     /** Generate code for the given grammar element.
742      * @param blk The string-literal reference to generate
743      */

744     public void gen(StringLiteralElement atom) {
745         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genString("+atom+")");
746
747         // Variable declarations for labeled elements
748
if (atom.getLabel()!=null && syntacticPredLevel == 0) {
749             println(atom.getLabel() + " = " + lt1Value + ";");
750         }
751
752         // AST
753
genElementAST(atom);
754
755         // is there a bang on the literal?
756
boolean oldsaveText = saveText;
757         saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
758
759         // matching
760
genMatch(atom);
761
762         saveText = oldsaveText;
763
764         // tack on tree cursor motion if doing a tree walker
765
if (grammar instanceof TreeWalkerGrammar) {
766             println("_t = _t->getNextSibling();");
767         }
768     }
769     /** Generate code for the given grammar element.
770      * @param blk The token-range reference to generate
771      */

772     public void gen(TokenRangeElement r) {
773         genErrorTryForElement(r);
774         if ( r.getLabel()!=null && syntacticPredLevel == 0) {
775             println(r.getLabel() + " = " + lt1Value + ";");
776         }
777
778         // AST
779
genElementAST(r);
780
781         // match
782
println("matchRange("+r.beginText+","+r.endText+");");
783         genErrorCatchForElement(r);
784     }
785     /** Generate code for the given grammar element.
786      * @param blk The token-reference to generate
787      */

788     public void gen(TokenRefElement atom) {
789         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")");
790         if ( grammar instanceof LexerGrammar ) {
791             antlrTool.panic("Token reference found in lexer");
792         }
793         genErrorTryForElement(atom);
794         // Assign Token value to token label variable
795
if ( atom.getLabel()!=null && syntacticPredLevel == 0) {
796             println(atom.getLabel() + " = " + lt1Value + ";");
797         }
798
799         // AST
800
genElementAST(atom);
801         // matching
802
genMatch(atom);
803         genErrorCatchForElement(atom);
804
805         // tack on tree cursor motion if doing a tree walker
806
if (grammar instanceof TreeWalkerGrammar) {
807             println("_t = _t->getNextSibling();");
808         }
809     }
810     public void gen(TreeElement t) {
811         // save AST cursor
812
println(labeledElementType+" __t" + t.ID + " = _t;");
813
814         // If there is a label on the root, then assign that to the variable
815
if (t.root.getLabel() != null) {
816             println(t.root.getLabel() + " = (_t == ASTNULL) ? "+labeledElementASTInit+" : _t;");
817         }
818
819         // check for invalid modifiers ! and ^ on tree element roots
820
if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) {
821             antlrTool.error("Suffixing a root node with '!' is not implemented",
822                           grammar.getFilename(), t.getLine(), t.getColumn());
823             t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
824         }
825         if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) {
826             antlrTool.warning("Suffixing a root node with '^' is redundant; already a root",
827                              grammar.getFilename(), t.getLine(), t.getColumn());
828             t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
829         }
830
831         // Generate AST variables
832
genElementAST(t.root);
833         if (grammar.buildAST) {
834             // Save the AST construction state
835
println(namespaceAntlr+"ASTPair __currentAST" + t.ID + " = currentAST;");
836             // Make the next item added a child of the TreeElement root
837
println("currentAST.root = currentAST.child;");
838             println("currentAST.child = "+labeledElementASTInit+";");
839         }
840
841         // match root
842
if ( t.root instanceof WildcardElement ) {
843             println("if ( _t == ASTNULL ) throw "+namespaceAntlr+"MismatchedTokenException();");
844         }
845         else {
846             genMatch(t.root);
847         }
848         // move to list of children
849
println("_t = _t->getFirstChild();");
850
851         // walk list of children, generating code for each
852
for (int i=0; i<t.getAlternatives().size(); i++) {
853             Alternative a = t.getAlternativeAt(i);
854             AlternativeElement e = a.head;
855             while ( e != null ) {
856                 e.generate();
857                 e = e.next;
858             }
859         }
860
861         if (grammar.buildAST) {
862             // restore the AST construction state to that just after the
863
// tree root was added
864
println("currentAST = __currentAST" + t.ID + ";");
865         }
866         // restore AST cursor
867
println("_t = __t" + t.ID + ";");
868         // move cursor to sibling of tree just parsed
869
println("_t = _t->getNextSibling();");
870     }
871     /** Generate the tree-parser C++ files */
872     public void gen(TreeWalkerGrammar g) throws IOException JavaDoc {
873         setGrammar(g);
874         if (!(grammar instanceof TreeWalkerGrammar)) {
875             antlrTool.panic("Internal error generating tree-walker");
876         }
877
878         genBody(g);
879         genInclude(g);
880     }
881     /** Generate code for the given grammar element.
882      * @param wc The wildcard element to generate
883      */

884     public void gen(WildcardElement wc) {
885         // Variable assignment for labeled elements
886
if (wc.getLabel()!=null && syntacticPredLevel == 0) {
887             println(wc.getLabel() + " = " + lt1Value + ";");
888         }
889
890         // AST
891
genElementAST(wc);
892         // Match anything but EOF
893
if (grammar instanceof TreeWalkerGrammar) {
894             println("if ( _t == "+labeledElementASTInit+" ) throw "+namespaceAntlr+"MismatchedTokenException();");
895         }
896         else if (grammar instanceof LexerGrammar) {
897             if ( grammar instanceof LexerGrammar &&
898                     (!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
899                 println("_saveIndex = text.length();");
900             }
901             println("matchNot(EOF/*_CHAR*/);");
902             if ( grammar instanceof LexerGrammar &&
903                     (!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
904                 println("text.erase(_saveIndex);"); // kill text atom put in buffer
905
}
906         }
907         else {
908             println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
909         }
910
911         // tack on tree cursor motion if doing a tree walker
912
if (grammar instanceof TreeWalkerGrammar) {
913             println("_t = _t->getNextSibling();");
914         }
915     }
916     /** Generate code for the given grammar element.
917      * @param blk The (...)* block to generate
918      */

919     public void gen(ZeroOrMoreBlock blk) {
920         if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen*("+blk+")");
921         println("{ // ( ... )*");
922         genBlockPreamble(blk);
923         String JavaDoc label;
924         if ( blk.getLabel() != null ) {
925             label = blk.getLabel();
926         }
927         else {
928             label = "_loop" + blk.ID;
929         }
930         println("for (;;) {");
931         tabs++;
932         // generate the init action for ()+ ()* inside the loop
933
// this allows us to do usefull EOF checking...
934
genBlockInitAction(blk);
935
936         // Tell AST generation to build subrule result
937
String JavaDoc saveCurrentASTResult = currentASTResult;
938         if (blk.getLabel() != null) {
939             currentASTResult = blk.getLabel();
940         }
941
942         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
943
944         // generate exit test if greedy set to false
945
// and an alt is ambiguous with exit branch
946
// or when lookahead derived purely from end-of-file
947
// Lookahead analysis stops when end-of-file is hit,
948
// returning set {epsilon}. Since {epsilon} is not
949
// ambig with any real tokens, no error is reported
950
// by deterministic() routines and we have to check
951
// for the case where the lookahead depth didn't get
952
// set to NONDETERMINISTIC (this only happens when the
953
// FOLLOW contains real atoms + epsilon).
954
boolean generateNonGreedyExitPath = false;
955         int nonGreedyExitDepth = grammar.maxk;
956
957         if ( !blk.greedy &&
958              blk.exitLookaheadDepth<=grammar.maxk &&
959              blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
960         {
961             generateNonGreedyExitPath = true;
962             nonGreedyExitDepth = blk.exitLookaheadDepth;
963         }
964         else if ( !blk.greedy &&
965                   blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
966         {
967             generateNonGreedyExitPath = true;
968         }
969         if ( generateNonGreedyExitPath ) {
970             if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) {
971                 System.out.println("nongreedy (...)* loop; exit depth is "+
972                                    blk.exitLookaheadDepth);
973             }
974             String JavaDoc predictExit =
975                 getLookaheadTestExpression(blk.exitCache,
976                                            nonGreedyExitDepth);
977             println("// nongreedy exit test");
978             println("if ("+predictExit+") goto "+label+";");
979         }
980
981         CppBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
982         genBlockFinish(howToFinish, "goto " + label + ";");
983
984         tabs--;
985         println("}");
986         println(label+":;");
987         println("} // ( ... )*");
988
989         // Restore previous AST generation
990
currentASTResult = saveCurrentASTResult;
991     }
992     /** Generate an alternative.
993       * @param alt The alternative to generate
994       * @param blk The block to which the alternative belongs
995       */

996     protected void genAlt(Alternative alt, AlternativeBlock blk)
997     {
998         // Save the AST generation state, and set it to that of the alt
999
boolean savegenAST = genAST;
1000        genAST = genAST && alt.getAutoGen();
1001
1002        boolean oldsaveTest = saveText;
1003        saveText = saveText && alt.getAutoGen();
1004
1005        // Reset the variable name map for the alternative
1006
Hashtable JavaDoc saveMap = treeVariableMap;
1007        treeVariableMap = new Hashtable JavaDoc();
1008
1009        // Generate try block around the alt for error handling
1010
if (alt.exceptionSpec != null) {
1011            println("try { // for error handling");
1012            tabs++;
1013        }
1014
1015        AlternativeElement elem = alt.head;
1016        while ( !(elem instanceof BlockEndElement) ) {
1017            elem.generate(); // alt can begin with anything. Ask target to gen.
1018
elem = elem.next;
1019        }
1020
1021        if ( genAST)
1022        {
1023            if (blk instanceof RuleBlock)
1024            {
1025                // Set the AST return value for the rule
1026
RuleBlock rblk = (RuleBlock)blk;
1027                if( usingCustomAST )
1028                    println(rblk.getRuleName() + "_AST = "+labeledElementASTType+"(currentAST.root);");
1029                else
1030                    println(rblk.getRuleName() + "_AST = currentAST.root;");
1031            }
1032            else if (blk.getLabel() != null) {
1033                // ### future: also set AST value for labeled subrules.
1034
// println(blk.getLabel() + "_AST = "+labeledElementASTType+"(currentAST.root);");
1035
antlrTool.warning("Labeled subrules are not implemented", grammar.getFilename(), blk.getLine(), blk.getColumn());
1036            }
1037        }
1038
1039        if (alt.exceptionSpec != null)
1040        {
1041            // close try block
1042
tabs--;
1043            println("}");
1044            genErrorHandler(alt.exceptionSpec);
1045        }
1046
1047        genAST = savegenAST;
1048        saveText = oldsaveTest;
1049
1050        treeVariableMap = saveMap;
1051    }
1052    /** Generate all the bitsets to be used in the parser or lexer
1053     * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
1054     * and the BitSet object declarations like
1055     * "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
1056     * Note that most languages do not support object initialization inside a
1057     * class definition, so other code-generators may have to separate the
1058     * bitset declarations from the initializations (e.g., put the
1059     * initializations in the generated constructor instead).
1060     * @param bitsetList The list of bitsets to generate.
1061     * @param maxVocabulary Ensure that each generated bitset can contain at
1062     * least this value.
1063     * @param prefix string glued in from of bitset names used for namespace
1064     * qualifications.
1065     */

1066    protected void genBitsets(
1067        Vector bitsetList,
1068        int maxVocabulary,
1069        String JavaDoc prefix
1070    )
1071    {
1072        TokenManager tm = grammar.tokenManager;
1073
1074        println("");
1075
1076        for (int i = 0; i < bitsetList.size(); i++)
1077        {
1078            BitSet p = (BitSet)bitsetList.elementAt(i);
1079            // Ensure that generated BitSet is large enough for vocabulary
1080
p.growToInclude(maxVocabulary);
1081
1082            // initialization data
1083
println(
1084                "const unsigned long " + prefix + getBitsetName(i) + "_data_" + "[] = { " +
1085                p.toStringOfHalfWords() +
1086                " };"
1087            );
1088
1089            // Dump the contents of the bitset in readable format...
1090
String JavaDoc t = "// ";
1091            for( int j = 0; j < tm.getVocabulary().size(); j++ )
1092            {
1093                if ( p.member( j ) )
1094                {
1095                    if ( (grammar instanceof LexerGrammar) )
1096                        t += tm.getVocabulary().elementAt(j)+" ";
1097                    else
1098                        t += tm.getTokenStringAt(j)+" ";
1099
1100                    if( t.length() > 70 )
1101                    {
1102                        println(t);
1103                        t = "// ";
1104                    }
1105                }
1106            }
1107            if ( t != "// " )
1108                println(t);
1109
1110            // BitSet object
1111
println(
1112                "const "+namespaceAntlr+"BitSet " + prefix + getBitsetName(i) + "(" +
1113                getBitsetName(i) + "_data_," + p.size()/32 +
1114                ");"
1115            );
1116        }
1117    }
1118    protected void genBitsetsHeader(
1119        Vector bitsetList,
1120        int maxVocabulary
1121    ) {
1122        println("");
1123        for (int i = 0; i < bitsetList.size(); i++)
1124        {
1125            BitSet p = (BitSet)bitsetList.elementAt(i);
1126            // Ensure that generated BitSet is large enough for vocabulary
1127
p.growToInclude(maxVocabulary);
1128            // initialization data
1129
println("static const unsigned long " + getBitsetName(i) + "_data_" + "[];");
1130            // BitSet object
1131
println("static const "+namespaceAntlr+"BitSet " + getBitsetName(i) + ";");
1132        }
1133    }
1134    /** Generate the finish of a block, using a combination of the info
1135     * returned from genCommonBlock() and the action to perform when
1136     * no alts were taken
1137     * @param howToFinish The return of genCommonBlock()
1138     * @param noViableAction What to generate when no alt is taken
1139     */

1140    private void genBlockFinish(CppBlockFinishingInfo howToFinish, String JavaDoc noViableAction)
1141    {
1142        if (howToFinish.needAnErrorClause &&
1143             (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) {
1144            if ( howToFinish.generatedAnIf ) {
1145                println("else {");
1146            }
1147            else {
1148                println("{");
1149            }
1150            tabs++;
1151            println(noViableAction);
1152            tabs--;
1153            println("}");
1154        }
1155
1156        if ( howToFinish.postscript!=null ) {
1157            println(howToFinish.postscript);
1158        }
1159    }
1160    /** Generate the initaction for a block, which may be a RuleBlock or a
1161     * plain AlternativeBLock.
1162     * @blk The block for which the preamble is to be generated.
1163     */

1164    protected void genBlockInitAction( AlternativeBlock blk )
1165    {
1166        // dump out init action
1167
if ( blk.initAction!=null ) {
1168            genLineNo(blk);
1169            printAction(processActionForSpecialSymbols(blk.initAction, blk.line,
1170                                                                     currentRule, null) );
1171            genLineNo2();
1172        }
1173    }
1174    /** Generate the header for a block, which may be a RuleBlock or a
1175     * plain AlternativeBlock. This generates any variable declarations
1176     * and syntactic-predicate-testing variables.
1177     * @blk The block for which the preamble is to be generated.
1178     */

1179    protected void genBlockPreamble(AlternativeBlock blk) {
1180        // define labels for rule blocks.
1181
if ( blk instanceof RuleBlock ) {
1182            RuleBlock rblk = (RuleBlock)blk;
1183            if ( rblk.labeledElements!=null ) {
1184                for (int i=0; i<rblk.labeledElements.size(); i++) {
1185
1186                    AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
1187                    //System.out.println("looking at labeled element: "+a);
1188
// Variables for labeled rule refs and subrules are different than
1189
// variables for grammar atoms. This test is a little tricky because
1190
// we want to get all rule refs and ebnf, but not rule blocks or
1191
// syntactic predicates
1192
if (
1193                        a instanceof RuleRefElement ||
1194                        a instanceof AlternativeBlock &&
1195                        !(a instanceof RuleBlock) &&
1196                        !(a instanceof SynPredBlock) )
1197                    {
1198                        if ( !(a instanceof RuleRefElement) &&
1199                              ((AlternativeBlock)a).not &&
1200                              analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
1201                        ) {
1202                            // Special case for inverted subrules that will be
1203
// inlined. Treat these like token or char literal
1204
// references
1205
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1206                            if (grammar.buildAST) {
1207                                genASTDeclaration( a );
1208                            }
1209                        }
1210                        else
1211                        {
1212                            if (grammar.buildAST)
1213                            {
1214                                // Always gen AST variables for labeled elements,
1215
// even if the element itself is marked with !
1216
genASTDeclaration( a );
1217                            }
1218                            if ( grammar instanceof LexerGrammar )
1219                                println(namespaceAntlr+"RefToken "+a.getLabel()+";");
1220
1221                            if (grammar instanceof TreeWalkerGrammar) {
1222                                // always generate rule-ref variables for tree walker
1223
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1224                            }
1225                        }
1226                    }
1227                    else
1228                    {
1229                        // It is a token or literal reference. Generate the
1230
// correct variable type for this grammar
1231
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1232                        // In addition, generate *_AST variables if building ASTs
1233
if (grammar.buildAST)
1234                        {
1235                            if (a instanceof GrammarAtom &&
1236                                 ((GrammarAtom)a).getASTNodeType() != null )
1237                            {
1238                                GrammarAtom ga = (GrammarAtom)a;
1239                                genASTDeclaration( a, "Ref"+ga.getASTNodeType() );
1240                            }
1241                            else
1242                            {
1243                                genASTDeclaration( a );
1244                            }
1245                        }
1246                    }
1247                }
1248            }
1249        }
1250    }
1251    public void genBody(LexerGrammar g) throws IOException JavaDoc
1252    {
1253        outputFile = grammar.getClassName() + ".cpp";
1254        outputLine = 1;
1255        currentOutput = antlrTool.openOutputFile(outputFile);
1256        //SAS: changed for proper text file io
1257

1258        genAST = false; // no way to gen trees.
1259
saveText = true; // save consumed characters.
1260

1261        tabs=0;
1262
1263        // Generate header common to all C++ output files
1264
genHeader(outputFile);
1265
1266        printHeaderAction(preIncludeCpp);
1267        // Generate header specific to lexer C++ file
1268
println("#include \"" + grammar.getClassName() + ".hpp\"");
1269        println("#include <antlr/CharBuffer.hpp>");
1270        println("#include <antlr/TokenStreamException.hpp>");
1271        println("#include <antlr/TokenStreamIOException.hpp>");
1272        println("#include <antlr/TokenStreamRecognitionException.hpp>");
1273        println("#include <antlr/CharStreamException.hpp>");
1274        println("#include <antlr/CharStreamIOException.hpp>");
1275        println("#include <antlr/NoViableAltForCharException.hpp>");
1276        if (grammar.debuggingOutput)
1277            println("#include <antlr/DebuggingInputBuffer.hpp>");
1278        println("");
1279        printHeaderAction(postIncludeCpp);
1280
1281        if (nameSpace != null)
1282            nameSpace.emitDeclarations(currentOutput);
1283
1284        // Generate user-defined lexer file preamble
1285
printAction(grammar.preambleAction);
1286
1287        // Generate lexer class definition
1288
String JavaDoc sup=null;
1289        if ( grammar.superClass!=null ) {
1290            sup = grammar.superClass;
1291        }
1292        else {
1293            sup = grammar.getSuperClass();
1294            if (sup.lastIndexOf('.') != -1)
1295                sup = sup.substring(sup.lastIndexOf('.')+1);
1296            sup = namespaceAntlr + sup;
1297        }
1298
1299        if( noConstructors )
1300        {
1301            println("#if 0");
1302            println("// constructor creation turned of with 'noConstructor' option");
1303        }
1304        //
1305
// Generate the constructor from InputStream
1306
//
1307
println(grammar.getClassName() + "::" + grammar.getClassName() + "(" + namespaceStd + "istream& in)");
1308        tabs++;
1309        // if debugging, wrap the input buffer in a debugger
1310
if (grammar.debuggingOutput)
1311            println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(new "+namespaceAntlr+"CharBuffer(in)),"+g.caseSensitive+")");
1312        else
1313            println(": " + sup + "(new "+namespaceAntlr+"CharBuffer(in),"+g.caseSensitive+")");
1314        tabs--;
1315        println("{");
1316        tabs++;
1317
1318        // if debugging, set up array variables and call user-overridable
1319
// debugging setup method
1320
if ( grammar.debuggingOutput ) {
1321            println("setRuleNames(_ruleNames);");
1322            println("setSemPredNames(_semPredNames);");
1323            println("setupDebugging();");
1324        }
1325
1326// println("setCaseSensitive("+g.caseSensitive+");");
1327
println("initLiterals();");
1328        tabs--;
1329        println("}");
1330        println("");
1331
1332        // Generate the constructor from InputBuffer
1333
println(grammar.getClassName() + "::" + grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib)");
1334        tabs++;
1335        // if debugging, wrap the input buffer in a debugger
1336
if (grammar.debuggingOutput)
1337            println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(ib),"+g.caseSensitive+")");
1338        else
1339            println(": " + sup + "(ib,"+g.caseSensitive+")");
1340        tabs--;
1341        println("{");
1342        tabs++;
1343
1344        // if debugging, set up array variables and call user-overridable
1345
// debugging setup method
1346
if ( grammar.debuggingOutput ) {
1347            println("setRuleNames(_ruleNames);");
1348            println("setSemPredNames(_semPredNames);");
1349            println("setupDebugging();");
1350        }
1351
1352// println("setCaseSensitive("+g.caseSensitive+");");
1353
println("initLiterals();");
1354        tabs--;
1355        println("}");
1356        println("");
1357
1358        // Generate the constructor from LexerSharedInputState
1359
println(grammar.getClassName() + "::" + grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state)");
1360        tabs++;
1361        println(": " + sup + "(state,"+g.caseSensitive+")");
1362        tabs--;
1363        println("{");
1364        tabs++;
1365
1366        // if debugging, set up array variables and call user-overridable
1367
// debugging setup method
1368
if ( grammar.debuggingOutput ) {
1369            println("setRuleNames(_ruleNames);");
1370            println("setSemPredNames(_semPredNames);");
1371            println("setupDebugging();");
1372        }
1373
1374// println("setCaseSensitive("+g.caseSensitive+");");
1375
println("initLiterals();");
1376        tabs--;
1377        println("}");
1378        println("");
1379
1380        if( noConstructors )
1381        {
1382            println("// constructor creation turned of with 'noConstructor' option");
1383            println("#endif");
1384        }
1385
1386        println("void " + grammar.getClassName() + "::initLiterals()");
1387        println("{");
1388        tabs++;
1389        // Generate the initialization of the map
1390
// containing the string literals used in the lexer
1391
// The literals variable itself is in CharScanner
1392
Enumeration JavaDoc keys = grammar.tokenManager.getTokenSymbolKeys();
1393        while ( keys.hasMoreElements() ) {
1394            String JavaDoc key = (String JavaDoc)keys.nextElement();
1395            if ( key.charAt(0) != '"' ) {
1396                continue;
1397            }
1398            TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
1399            if ( sym instanceof StringLiteralSymbol ) {
1400                StringLiteralSymbol s = (StringLiteralSymbol)sym;
1401                println("literals["+s.getId()+"] = "+s.getTokenType()+";");
1402            }
1403        }
1404
1405        // Generate the setting of various generated options.
1406
tabs--;
1407        println("}");
1408
1409        Enumeration JavaDoc ids;
1410        // generate the rule name array for debugging
1411
if (grammar.debuggingOutput) {
1412            println("const char* "+grammar.getClassName()+"::_ruleNames[] = {");
1413            tabs++;
1414
1415            ids = grammar.rules.elements();
1416            int ruleNum=0;
1417            while ( ids.hasMoreElements() ) {
1418                GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1419                if ( sym instanceof RuleSymbol)
1420                    println("\""+((RuleSymbol)sym).getId()+"\",");
1421            }
1422            println("0");
1423            tabs--;
1424            println("};");
1425        }
1426
1427        // Generate nextToken() rule.
1428
// nextToken() is a synthetic lexer rule that is the implicit OR of all
1429
// user-defined lexer rules.
1430
genNextToken();
1431
1432        // Generate code for each rule in the lexer
1433
ids = grammar.rules.elements();
1434        int ruleNum=0;
1435        while ( ids.hasMoreElements() ) {
1436            RuleSymbol sym = (RuleSymbol) ids.nextElement();
1437            // Don't generate the synthetic rules
1438
if (!sym.getId().equals("mnextToken")) {
1439                genRule(sym, false, ruleNum++, grammar.getClassName() + "::");
1440            }
1441            exitIfError();
1442        }
1443
1444        // Generate the semantic predicate map for debugging
1445
if (grammar.debuggingOutput)
1446            genSemPredMap(grammar.getClassName() + "::");
1447
1448        // Generate the bitsets used throughout the lexer
1449
genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size(), grammar.getClassName() + "::" );
1450
1451        println("");
1452        if (nameSpace != null)
1453            nameSpace.emitClosures(currentOutput);
1454
1455        // Close the lexer output stream
1456
currentOutput.close();
1457        currentOutput = null;
1458    }
1459    public void genInitFactory( Grammar g )
1460    {
1461        // Generate the method to initialize an ASTFactory when we're
1462
// building AST's
1463
String JavaDoc param_name = "factory ";
1464        if( ! g.buildAST )
1465            param_name = "";
1466
1467        println("void "+ g.getClassName() + "::initializeASTFactory( "+namespaceAntlr+"ASTFactory& "+param_name+")");
1468        println("{");
1469        tabs++;
1470
1471        if( g.buildAST )
1472        {
1473            // print out elements collected...
1474
Enumeration JavaDoc e = astTypes.elements();
1475            while( e.hasMoreElements() )
1476                println((String JavaDoc)e.nextElement());
1477
1478            println("factory.setMaxNodeType("+grammar.tokenManager.maxTokenType()+");");
1479        }
1480        tabs--;
1481        println("}");
1482    }
1483    // FIXME: and so why are we passing here a g param while inside
1484
// we merrily use the global grammar.
1485
public void genBody(ParserGrammar g) throws IOException JavaDoc
1486    {
1487        // Open the output stream for the parser and set the currentOutput
1488
outputFile = grammar.getClassName() + ".cpp";
1489        outputLine = 1;
1490        currentOutput = antlrTool.openOutputFile(outputFile);
1491
1492        genAST = grammar.buildAST;
1493
1494        tabs = 0;
1495
1496        // Generate the header common to all output files.
1497
genHeader(outputFile);
1498
1499        printHeaderAction(preIncludeCpp);
1500
1501        // Generate header for the parser
1502
println("#include \"" + grammar.getClassName() + ".hpp\"");
1503        println("#include <antlr/NoViableAltException.hpp>");
1504        println("#include <antlr/SemanticException.hpp>");
1505        println("#include <antlr/ASTFactory.hpp>");
1506
1507        printHeaderAction(postIncludeCpp);
1508
1509        if (nameSpace != null)
1510            nameSpace.emitDeclarations(currentOutput);
1511
1512        // Output the user-defined parser preamble
1513
printAction(grammar.preambleAction);
1514
1515        String JavaDoc sup=null;
1516        if ( grammar.superClass!=null )
1517            sup = grammar.superClass;
1518        else {
1519            sup = grammar.getSuperClass();
1520            if (sup.lastIndexOf('.') != -1)
1521                sup = sup.substring(sup.lastIndexOf('.')+1);
1522            sup = namespaceAntlr + sup;
1523        }
1524
1525        // set up an array of all the rule names so the debugger can
1526
// keep track of them only by number -- less to store in tree...
1527
if (grammar.debuggingOutput) {
1528            println("const char* "+grammar.getClassName()+"::_ruleNames[] = {");
1529            tabs++;
1530
1531            Enumeration JavaDoc ids = grammar.rules.elements();
1532            int ruleNum=0;
1533            while ( ids.hasMoreElements() ) {
1534                GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1535                if ( sym instanceof RuleSymbol)
1536                    println("\""+((RuleSymbol)sym).getId()+"\",");
1537            }
1538            println("0");
1539            tabs--;
1540            println("};");
1541        }
1542
1543        // Generate _initialize function
1544
// disabled since it isn't used anymore..
1545

1546// println("void " + grammar.getClassName() + "::_initialize(void)");
1547
// println("{");
1548
// tabs++;
1549

1550        // if debugging, set up arrays and call the user-overridable
1551
// debugging setup method
1552
// if ( grammar.debuggingOutput ) {
1553
// println("setRuleNames(_ruleNames);");
1554
// println("setSemPredNames(_semPredNames);");
1555
// println("setupDebugging();");
1556
// }
1557
// tabs--;
1558
// println("}");
1559
if( noConstructors )
1560        {
1561            println("#if 0");
1562            println("// constructor creation turned of with 'noConstructor' option");
1563        }
1564
1565        // Generate parser class constructor from TokenBuffer
1566
print(grammar.getClassName() + "::" + grammar.getClassName());
1567        println("("+namespaceAntlr+"TokenBuffer& tokenBuf, int k)");
1568        println(": " + sup + "(tokenBuf,k)");
1569        println("{");
1570// tabs++;
1571
// println("_initialize();");
1572
// tabs--;
1573
println("}");
1574        println("");
1575
1576        print(grammar.getClassName() + "::" + grammar.getClassName());
1577        println("("+namespaceAntlr+"TokenBuffer& tokenBuf)");
1578        println(": " + sup + "(tokenBuf," + grammar.maxk + ")");
1579        println("{");
1580// tabs++;
1581
// println("_initialize();");
1582
// tabs--;
1583
println("}");
1584        println("");
1585
1586        // Generate parser class constructor from TokenStream
1587
print(grammar.getClassName() + "::" + grammar.getClassName());
1588        println("("+namespaceAntlr+"TokenStream& lexer, int k)");
1589        println(": " + sup + "(lexer,k)");
1590        println("{");
1591// tabs++;
1592
// println("_initialize();");
1593
// tabs--;
1594
println("}");
1595        println("");
1596
1597        print(grammar.getClassName() + "::" + grammar.getClassName());
1598        println("("+namespaceAntlr+"TokenStream& lexer)");
1599        println(": " + sup + "(lexer," + grammar.maxk + ")");
1600        println("{");
1601// tabs++;
1602
// println("_initialize();");
1603
// tabs--;
1604
println("}");
1605        println("");
1606
1607        print(grammar.getClassName() + "::" + grammar.getClassName());
1608        println("(const "+namespaceAntlr+"ParserSharedInputState& state)");
1609        println(": " + sup + "(state," + grammar.maxk + ")");
1610        println("{");
1611// tabs++;
1612
// println("_initialize();");
1613
// tabs--;
1614
println("}");
1615        println("");
1616
1617        if( noConstructors )
1618        {
1619            println("// constructor creation turned of with 'noConstructor' option");
1620            println("#endif");
1621        }
1622
1623        astTypes = new Vector();
1624
1625        // Generate code for each rule in the grammar
1626
Enumeration JavaDoc ids = grammar.rules.elements();
1627        int ruleNum=0;
1628        while ( ids.hasMoreElements() ) {
1629            GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1630            if ( sym instanceof RuleSymbol) {
1631                RuleSymbol rs = (RuleSymbol)sym;
1632                genRule(rs, rs.references.size()==0, ruleNum++, grammar.getClassName() + "::");
1633            }
1634            exitIfError();
1635        }
1636
1637        genInitFactory( g );
1638
1639        // Generate the token names
1640
genTokenStrings(grammar.getClassName() + "::");
1641
1642        // Generate the bitsets used throughout the grammar
1643
genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType(), grammar.getClassName() + "::" );
1644
1645        // Generate the semantic predicate map for debugging
1646
if (grammar.debuggingOutput)
1647            genSemPredMap(grammar.getClassName() + "::");
1648
1649        // Close class definition
1650
println("");
1651        println("");
1652        if (nameSpace != null)
1653            nameSpace.emitClosures(currentOutput);
1654
1655        // Close the parser output stream
1656
currentOutput.close();
1657        currentOutput = null;
1658    }
1659    public void genBody(TreeWalkerGrammar g) throws IOException JavaDoc
1660    {
1661        // Open the output stream for the parser and set the currentOutput
1662
outputFile = grammar.getClassName() + ".cpp";
1663        outputLine = 1;
1664        currentOutput = antlrTool.openOutputFile(outputFile);
1665        //SAS: changed for proper text file io
1666

1667        genAST = grammar.buildAST;
1668        tabs = 0;
1669
1670        // Generate the header common to all output files.
1671
genHeader(outputFile);
1672
1673        printHeaderAction(preIncludeCpp);
1674
1675        // Generate header for the parser
1676
println("#include \"" + grammar.getClassName() + ".hpp\"");
1677        println("#include <antlr/Token.hpp>");
1678        println("#include <antlr/AST.hpp>");
1679        println("#include <antlr/NoViableAltException.hpp>");
1680        println("#include <antlr/MismatchedTokenException.hpp>");
1681        println("#include <antlr/SemanticException.hpp>");
1682        println("#include <antlr/BitSet.hpp>");
1683
1684        printHeaderAction(postIncludeCpp);
1685
1686        if (nameSpace != null)
1687            nameSpace.emitDeclarations(currentOutput);
1688
1689        // Output the user-defined parser premamble
1690
printAction(grammar.preambleAction);
1691
1692        // Generate parser class definition
1693
String JavaDoc sup = null;
1694        if ( grammar.superClass!=null ) {
1695            sup = grammar.superClass;
1696        }
1697        else {
1698            sup = grammar.getSuperClass();
1699            if (sup.lastIndexOf('.') != -1)
1700                sup = sup.substring(sup.lastIndexOf('.')+1);
1701            sup = namespaceAntlr + sup;
1702        }
1703        if( noConstructors )
1704        {
1705            println("#if 0");
1706            println("// constructor creation turned of with 'noConstructor' option");
1707        }
1708
1709        // Generate default parser class constructor
1710
println(grammar.getClassName() + "::" + grammar.getClassName() + "()");
1711        println("\t: "+namespaceAntlr+"TreeParser() {");
1712        tabs++;
1713// println("setTokenNames(_tokenNames);");
1714
tabs--;
1715        println("}");
1716
1717        if( noConstructors )
1718        {
1719            println("// constructor creation turned of with 'noConstructor' option");
1720            println("#endif");
1721        }
1722        println("");
1723
1724        astTypes = new Vector();
1725
1726        // Generate code for each rule in the grammar
1727
Enumeration JavaDoc ids = grammar.rules.elements();
1728        int ruleNum=0;
1729        String JavaDoc ruleNameInits = "";
1730        while ( ids.hasMoreElements() ) {
1731            GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1732            if ( sym instanceof RuleSymbol) {
1733                RuleSymbol rs = (RuleSymbol)sym;
1734                genRule(rs, rs.references.size()==0, ruleNum++, grammar.getClassName() + "::");
1735            }
1736            exitIfError();
1737        }
1738
1739        // Generate the ASTFactory initialization function
1740
genInitFactory( grammar );
1741        // Generate the token names
1742
genTokenStrings(grammar.getClassName() + "::");
1743
1744        // Generate the bitsets used throughout the grammar
1745
genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType(), grammar.getClassName() + "::" );
1746
1747        // Close class definition
1748
println("");
1749        println("");
1750
1751        if (nameSpace != null)
1752            nameSpace.emitClosures(currentOutput);
1753
1754        // Close the parser output stream
1755
currentOutput.close();
1756        currentOutput = null;
1757    }
1758    /** Generate a series of case statements that implement a BitSet test.
1759     * @param p The Bitset for which cases are to be generated
1760     */

1761    protected void genCases(BitSet p) {
1762        if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCases("+p+")");
1763        int[] elems;
1764
1765        elems = p.toArray();
1766        // Wrap cases four-per-line for lexer, one-per-line for parser
1767
int wrap = 1; //(grammar instanceof LexerGrammar) ? 4 : 1;
1768
int j=1;
1769        boolean startOfLine = true;
1770        for (int i = 0; i < elems.length; i++) {
1771            if (j==1) {
1772                print("");
1773            } else {
1774                _print(" ");
1775            }
1776            _print("case " + getValueString(elems[i]) + ":");
1777
1778            if (j==wrap) {
1779                _println("");
1780                startOfLine = true;
1781                j=1;
1782            }
1783            else {
1784                j++;
1785                startOfLine = false;
1786            }
1787        }
1788        if (!startOfLine) {
1789            _println("");
1790        }
1791    }
1792    /** Generate common code for a block of alternatives; return a postscript
1793     * that needs to be generated at the end of the block. Other routines
1794     * may append else-clauses and such for error checking before the postfix
1795     * is generated.
1796     * If the grammar is a lexer, then generate alternatives in an order where
1797     * alternatives requiring deeper lookahead are generated first, and
1798     * EOF in the lookahead set reduces the depth of the lookahead.
1799     * @param blk The block to generate
1800     * @param noTestForSingle If true, then it does not generate a test for a single alternative.
1801     */

1802    public CppBlockFinishingInfo genCommonBlock(
1803        AlternativeBlock blk,
1804        boolean noTestForSingle )
1805    {
1806        int nIF=0;
1807        boolean createdLL1Switch = false;
1808        int closingBracesOfIFSequence = 0;
1809        CppBlockFinishingInfo finishingInfo = new CppBlockFinishingInfo();
1810        if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCommonBlk("+blk+")");
1811
1812        // Save the AST generation state, and set it to that of the block
1813
boolean savegenAST = genAST;
1814        genAST = genAST && blk.getAutoGen();
1815
1816        boolean oldsaveTest = saveText;
1817        saveText = saveText && blk.getAutoGen();
1818
1819        // Is this block inverted? If so, generate special-case code
1820
if ( blk.not &&
1821            analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar) )
1822        {
1823            Lookahead p = analyzer.look(1, blk);
1824            // Variable assignment for labeled elements
1825
if (blk.getLabel() != null && syntacticPredLevel == 0) {
1826                println(blk.getLabel() + " = " + lt1Value + ";");
1827            }
1828
1829            // AST
1830
genElementAST(blk);
1831
1832            String JavaDoc astArgs="";
1833            if (grammar instanceof TreeWalkerGrammar) {
1834                if( usingCustomAST )
1835                    astArgs=namespaceAntlr+"RefAST"+"(_t),";
1836                else
1837                    astArgs="_t,";
1838            }
1839
1840            // match the bitset for the alternative
1841
println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
1842
1843            // tack on tree cursor motion if doing a tree walker
1844
if (grammar instanceof TreeWalkerGrammar)
1845            {
1846                println("_t = _t->getNextSibling();");
1847            }
1848            return finishingInfo;
1849        }
1850
1851        // Special handling for single alt
1852
if (blk.getAlternatives().size() == 1)
1853        {
1854            Alternative alt = blk.getAlternativeAt(0);
1855            // Generate a warning if there is a synPred for single alt.
1856
if (alt.synPred != null)
1857            {
1858                antlrTool.warning(
1859                                 "Syntactic predicate superfluous for single alternative",
1860                                 grammar.getFilename(),
1861                                 blk.getAlternativeAt(0).synPred.getLine(),
1862                                 blk.getAlternativeAt(0).synPred.getColumn()
1863                );
1864            }
1865            if (noTestForSingle)
1866            {
1867                if (alt.semPred != null)
1868                {
1869                    // Generate validating predicate
1870
genSemPred(alt.semPred, blk.line);
1871                }
1872                genAlt(alt, blk);
1873                return finishingInfo;
1874            }
1875        }
1876
1877        // count number of simple LL(1) cases; only do switch for
1878
// many LL(1) cases (no preds, no end of token refs)
1879
// We don't care about exit paths for (...)*, (...)+
1880
// because we don't explicitly have a test for them
1881
// as an alt in the loop.
1882
//
1883
// Also, we now count how many unicode lookahead sets
1884
// there are--they must be moved to DEFAULT or ELSE
1885
// clause.
1886

1887        int nLL1 = 0;
1888        for (int i=0; i<blk.getAlternatives().size(); i++)
1889        {
1890            Alternative a = blk.getAlternativeAt(i);
1891            if ( suitableForCaseExpression(a) )
1892                nLL1++;
1893        }
1894
1895        // do LL(1) cases
1896
if ( nLL1 >= makeSwitchThreshold )
1897        {
1898            // Determine the name of the item to be compared
1899
String JavaDoc testExpr = lookaheadString(1);
1900            createdLL1Switch = true;
1901            // when parsing trees, convert null to valid tree node with NULL lookahead
1902
if ( grammar instanceof TreeWalkerGrammar )
1903            {
1904                println("if (_t == "+labeledElementASTInit+" )");
1905                tabs++;
1906                println("_t = ASTNULL;");
1907                tabs--;
1908            }
1909            println("switch ( "+testExpr+") {");
1910            for (int i=0; i<blk.alternatives.size(); i++)
1911            {
1912                Alternative alt = blk.getAlternativeAt(i);
1913                // ignore any non-LL(1) alts, predicated alts or end-of-token alts
1914
// or end-of-token alts for case expressions
1915
if ( !suitableForCaseExpression(alt) )
1916                {
1917                    continue;
1918                }
1919                Lookahead p = alt.cache[1];
1920                if (p.fset.degree() == 0 && !p.containsEpsilon())
1921                {
1922                    antlrTool.warning("Alternate omitted due to empty prediction set",
1923                        grammar.getFilename(),
1924                        alt.head.getLine(), alt.head.getColumn());
1925                }
1926                else
1927                {
1928                    genCases(p.fset);
1929                    println("{");
1930                    tabs++;
1931                    genAlt(alt, blk);
1932                    println("break;");
1933                    tabs--;
1934                    println("}");
1935                }
1936            }
1937            println("default:");
1938            tabs++;
1939        }
1940
1941        // do non-LL(1) and nondeterministic cases
1942
// This is tricky in the lexer, because of cases like:
1943
// STAR : '*' ;
1944
// ASSIGN_STAR : "*=";
1945
// Since nextToken is generated without a loop, then the STAR will
1946
// have end-of-token as it's lookahead set for LA(2). So, we must generate the
1947
// alternatives containing trailing end-of-token in their lookahead sets *after*
1948
// the alternatives without end-of-token. This implements the usual
1949
// lexer convention that longer matches come before shorter ones, e.g.
1950
// "*=" matches ASSIGN_STAR not STAR
1951
//
1952
// For non-lexer grammars, this does not sort the alternates by depth
1953
// Note that alts whose lookahead is purely end-of-token at k=1 end up
1954
// as default or else clauses.
1955
int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
1956        for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
1957            if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("checking depth "+altDepth);
1958            for (int i=0; i<blk.alternatives.size(); i++) {
1959                Alternative alt = blk.getAlternativeAt(i);
1960                if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genAlt: "+i);
1961                // if we made a switch above, ignore what we already took care
1962
// of. Specifically, LL(1) alts with no preds
1963
// that do not have end-of-token in their prediction set
1964
if ( createdLL1Switch &&
1965                     suitableForCaseExpression(alt) )
1966                {
1967                    if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
1968                        System.out.println("ignoring alt because it was in the switch");
1969                    continue;
1970                }
1971                String JavaDoc e;
1972
1973                boolean unpredicted = false;
1974
1975                if (grammar instanceof LexerGrammar) {
1976                    // Calculate the "effective depth" of the alt, which is the max
1977
// depth at which cache[depth]!=end-of-token
1978
int effectiveDepth = alt.lookaheadDepth;
1979                    if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC)
1980                    {
1981                        // use maximum lookahead
1982
effectiveDepth = grammar.maxk;
1983                    }
1984                    while ( effectiveDepth >= 1 &&
1985                             alt.cache[effectiveDepth].containsEpsilon() )
1986                    {
1987                        effectiveDepth--;
1988                    }
1989                    // Ignore alts whose effective depth is other than the ones we
1990
// are generating for this iteration.
1991
if (effectiveDepth != altDepth)
1992                    {
1993                        if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
1994                            System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth);
1995                        continue;
1996                    }
1997                    unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
1998                    e = getLookaheadTestExpression(alt, effectiveDepth);
1999                }
2000                else
2001                {
2002                    unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
2003                    e = getLookaheadTestExpression(alt, grammar.maxk);
2004                }
2005
2006                // Was it a big unicode range that forced unsuitability
2007
// for a case expression?
2008
if ( alt.cache[1].fset.degree() > caseSizeThreshold &&
2009                      suitableForCaseExpression(alt))
2010                {
2011                    if ( nIF==0 )
2012                    {
2013                        // generate this only for the first if the elseif's
2014
// are covered by this one
2015
if ( grammar instanceof TreeWalkerGrammar ) {
2016                            println("if (_t == "+labeledElementASTInit+" )");
2017                            tabs++;
2018                            println("_t = ASTNULL;");
2019                            tabs--;
2020                        }
2021                        println("if " + e + " {");
2022                    }
2023                    else
2024                        println("else if " + e + " {");
2025                }
2026                else if (unpredicted &&
2027                            alt.semPred==null &&
2028                            alt.synPred==null)
2029                {
2030                    // The alt has empty prediction set and no
2031
// predicate to help out. if we have not
2032
// generated a previous if, just put {...} around
2033
// the end-of-token clause
2034
if ( nIF==0 ) {
2035                        println("{");
2036                    }
2037                    else {
2038                        println("else {");
2039                    }
2040                    finishingInfo.needAnErrorClause = false;
2041                }
2042                else
2043                {
2044                    // check for sem and syn preds
2045
// Add any semantic predicate expression to the lookahead test
2046
if ( alt.semPred != null ) {
2047                        // if debugging, wrap the evaluation of the predicate in a method
2048
//
2049
// translate $ and # references
2050
ActionTransInfo tInfo = new ActionTransInfo();
2051                        String JavaDoc actionStr = processActionForSpecialSymbols(alt.semPred,
2052                                                                          blk.line,
2053                                                                          currentRule,
2054                                                                          tInfo);
2055                        // ignore translation info...we don't need to do anything with it.
2056

2057                        // call that will inform SemanticPredicateListeners of the
2058
// result
2059
if ( grammar.debuggingOutput &&
2060                              ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))
2061                             )
2062                            e = "("+e+"&& fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.PREDICTING,"+ //FIXME
2063
addSemPred(charFormatter.escapeString(actionStr))+","+actionStr+"))";
2064                        else
2065                            e = "("+e+"&&("+actionStr +"))";
2066                    }
2067
2068                    // Generate any syntactic predicates
2069
if ( nIF>0 ) {
2070                        if ( alt.synPred != null ) {
2071                            println("else {");
2072                            tabs++;
2073                            genSynPred( alt.synPred, e );
2074                            closingBracesOfIFSequence++;
2075                        }
2076                        else {
2077                            println("else if " + e + " {");
2078                        }
2079                    }
2080                    else {
2081                        if ( alt.synPred != null ) {
2082                            genSynPred( alt.synPred, e );
2083                        }
2084                        else {
2085                            // when parsing trees, convert null to valid tree node
2086
// with NULL lookahead.
2087
if ( grammar instanceof TreeWalkerGrammar ) {
2088                                println("if (_t == "+labeledElementASTInit+" )");
2089                                tabs++;
2090                                println("_t = ASTNULL;");
2091                                tabs--;
2092                            }
2093                            println("if " + e + " {");
2094                        }
2095                    }
2096
2097                }
2098
2099                nIF++;
2100                tabs++;
2101                genAlt(alt, blk);
2102                tabs--;
2103                println("}");
2104            }
2105        }
2106        String JavaDoc ps = "";
2107        for (int i=1; i<=closingBracesOfIFSequence; i++) {
2108            tabs--; // does JavaCodeGenerator need this?
2109
ps+="}";
2110        }
2111
2112        // Restore the AST generation state
2113
genAST = savegenAST;
2114
2115        // restore save text state
2116
saveText=oldsaveTest;
2117
2118        // Return the finishing info.
2119
if ( createdLL1Switch ) {
2120            tabs--;
2121            finishingInfo.postscript = ps+"}";
2122            finishingInfo.generatedSwitch = true;
2123            finishingInfo.generatedAnIf = nIF>0;
2124            //return new CppBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
2125

2126        }
2127        else {
2128            finishingInfo.postscript = ps;
2129            finishingInfo.generatedSwitch = false;
2130            finishingInfo.generatedAnIf = nIF>0;
2131            //return new CppBlockFinishingInfo(ps, false,nIF>0);
2132
}
2133        return finishingInfo;
2134    }
2135
2136    private static boolean suitableForCaseExpression(Alternative a) {
2137        return a.lookaheadDepth == 1 &&
2138            a.semPred == null &&
2139            !a.cache[1].containsEpsilon() &&
2140            a.cache[1].fset.degree()<=caseSizeThreshold;
2141    }
2142
2143    /** Generate code to link an element reference into the AST
2144     */

2145    private void genElementAST(AlternativeElement el) {
2146
2147        // handle case where you're not building trees, but are in tree walker.
2148
// Just need to get labels set up.
2149
if ( grammar instanceof TreeWalkerGrammar && !grammar.buildAST )
2150        {
2151            String JavaDoc elementRef;
2152            String JavaDoc astName;
2153
2154            // Generate names and declarations of the AST variable(s)
2155
if (el.getLabel() == null)
2156            {
2157                elementRef = lt1Value;
2158                // Generate AST variables for unlabeled stuff
2159
astName = "tmp" + astVarNumber + "_AST";
2160                astVarNumber++;
2161                // Map the generated AST variable in the alternate
2162
mapTreeVariable(el, astName);
2163                // Generate an "input" AST variable also
2164
println(labeledElementASTType+" "+astName+"_in = "+elementRef+";");
2165            }
2166            return;
2167        }
2168
2169        if (grammar.buildAST && syntacticPredLevel == 0)
2170        {
2171            boolean needASTDecl =
2172                ( genAST && (el.getLabel() != null ||
2173                  el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG ));
2174
2175            // RK: if we have a grammar element always generate the decl
2176
// since some guy can access it from an action and we can't
2177
// peek ahead (well not without making a mess).
2178
// I'd prefer taking this out.
2179
if( el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG &&
2180                 (el instanceof TokenRefElement) )
2181                needASTDecl = true;
2182
2183            boolean doNoGuessTest =
2184                ( grammar.hasSyntacticPredicate && needASTDecl );
2185
2186            String JavaDoc elementRef;
2187            String JavaDoc astNameBase;
2188
2189            // Generate names and declarations of the AST variable(s)
2190
if (el.getLabel() != null)
2191            {
2192                // if the element is labeled use that name...
2193
elementRef = el.getLabel();
2194                astNameBase = el.getLabel();
2195            }
2196            else
2197            {
2198                // else generate a temporary name...
2199
elementRef = lt1Value;
2200                // Generate AST variables for unlabeled stuff
2201
astNameBase = "tmp" + astVarNumber;
2202                astVarNumber++;
2203            }
2204
2205            // Generate the declaration if required.
2206
if ( needASTDecl )
2207            {
2208                if ( el instanceof GrammarAtom )
2209                {
2210                    GrammarAtom ga = (GrammarAtom)el;
2211                    if ( ga.getASTNodeType()!=null )
2212                    {
2213                        genASTDeclaration( el, astNameBase, "Ref"+ga.getASTNodeType() );
2214// println("Ref"+ga.getASTNodeType()+" " + astName + ";");
2215
}
2216                    else
2217                    {
2218                        genASTDeclaration( el, astNameBase, labeledElementASTType );
2219// println(labeledElementASTType+" " + astName + " = "+labeledElementASTInit+";");
2220
}
2221                }
2222                else
2223                {
2224                    genASTDeclaration( el, astNameBase, labeledElementASTType );
2225// println(labeledElementASTType+" " + astName + " = "+labeledElementASTInit+";");
2226
}
2227            }
2228
2229            // for convenience..
2230
String JavaDoc astName = astNameBase + "_AST";
2231
2232            // Map the generated AST variable in the alternate
2233
mapTreeVariable(el, astName);
2234            if (grammar instanceof TreeWalkerGrammar)
2235            {
2236                // Generate an "input" AST variable also
2237
println(labeledElementASTType+" " + astName + "_in = "+labeledElementASTInit+";");
2238            }
2239
2240            // Enclose actions with !guessing
2241
if (doNoGuessTest) {
2242                println("if ( inputState->guessing == 0 ) {");
2243                tabs++;
2244            }
2245
2246            // if something has a label assume it will be used
2247
// so we must initialize the RefAST
2248
if (el.getLabel() != null)
2249            {
2250                if ( el instanceof GrammarAtom )
2251                {
2252                    println(astName + " = "+
2253                              getASTCreateString((GrammarAtom)el,elementRef) + ";");
2254                }
2255                else
2256                {
2257                    println(astName + " = "+
2258                              getASTCreateString(elementRef) + ";");
2259                }
2260            }
2261
2262            // if it has no label but a declaration exists initialize it.
2263
if( el.getLabel() == null && needASTDecl )
2264            {
2265                elementRef = lt1Value;
2266                if ( el instanceof GrammarAtom )
2267                {
2268                    println(astName + " = "+
2269                              getASTCreateString((GrammarAtom)el,elementRef) + ";");
2270                }
2271                else
2272                {
2273                    println(astName + " = "+
2274                              getASTCreateString(elementRef) + ";");
2275                }
2276                // Map the generated AST variable in the alternate
2277
if (grammar instanceof TreeWalkerGrammar)
2278                {
2279                    // set "input" AST variable also
2280
println(astName + "_in = " + elementRef + ";");
2281                }
2282            }
2283
2284            if (genAST)
2285            {
2286                switch (el.getAutoGenType())
2287                {
2288                case GrammarElement.AUTO_GEN_NONE:
2289                    if( usingCustomAST ||
2290                         (el instanceof GrammarAtom &&
2291                          ((GrammarAtom)el).getASTNodeType() != null) )
2292                        println("astFactory->addASTChild(currentAST, "+namespaceAntlr+"RefAST("+ astName + "));");
2293                    else
2294                        println("astFactory->addASTChild(currentAST, "+ astName + ");");
2295                    // println("astFactory.addASTChild(currentAST, "+namespaceAntlr+"RefAST(" + astName + "));");
2296
break;
2297                case GrammarElement.AUTO_GEN_CARET:
2298                    if( usingCustomAST ||
2299                         (el instanceof GrammarAtom &&
2300                         ((GrammarAtom)el).getASTNodeType() != null) )
2301                        println("astFactory->makeASTRoot(currentAST, "+namespaceAntlr+"RefAST(" + astName + "));");
2302                    else
2303                        println("astFactory->makeASTRoot(currentAST, " + astName + ");");
2304                    break;
2305                default:
2306                    break;
2307                }
2308            }
2309            if (doNoGuessTest)
2310            {
2311                tabs--;
2312                println("}");
2313            }
2314        }
2315    }
2316    /** Close the try block and generate catch phrases
2317     * if the element has a labeled handler in the rule
2318     */

2319    private void genErrorCatchForElement(AlternativeElement el) {
2320        if (el.getLabel() == null) return;
2321        String JavaDoc r = el.enclosingRuleName;
2322        if ( grammar instanceof LexerGrammar ) {
2323            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
2324        }
2325        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
2326        if (rs == null) {
2327            antlrTool.panic("Enclosing rule not found!");
2328        }
2329        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
2330        if (ex != null) {
2331            tabs--;
2332            println("}");
2333            genErrorHandler(ex);
2334        }
2335    }
2336    /** Generate the catch phrases for a user-specified error handler */
2337    private void genErrorHandler(ExceptionSpec ex)
2338    {
2339        // Each ExceptionHandler in the ExceptionSpec is a separate catch
2340
for (int i = 0; i < ex.handlers.size(); i++)
2341        {
2342            ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
2343            // Generate catch phrase
2344
println("catch (" + handler.exceptionTypeAndName.getText() + ") {");
2345            tabs++;
2346            if (grammar.hasSyntacticPredicate) {
2347                println("if (inputState->guessing==0) {");
2348                tabs++;
2349            }
2350
2351            // When not guessing, execute user handler action
2352
ActionTransInfo tInfo = new ActionTransInfo();
2353            genLineNo(handler.action);
2354            printAction(
2355                processActionForSpecialSymbols( handler.action.getText(),
2356                                                         handler.action.getLine(),
2357                                                         currentRule, tInfo )
2358            );
2359            genLineNo2();
2360
2361            if (grammar.hasSyntacticPredicate)
2362            {
2363                tabs--;
2364                println("} else {");
2365                tabs++;
2366                // When guessing, rethrow exception
2367
println("throw;");
2368                tabs--;
2369                println("}");
2370            }
2371            // Close catch phrase
2372
tabs--;
2373            println("}");
2374        }
2375    }
2376    /** Generate a try { opening if the element has a labeled handler in the rule */
2377    private void genErrorTryForElement(AlternativeElement el) {
2378        if (el.getLabel() == null) return;
2379        String JavaDoc r = el.enclosingRuleName;
2380        if ( grammar instanceof LexerGrammar ) {
2381            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
2382        }
2383        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
2384        if (rs == null) {
2385            antlrTool.panic("Enclosing rule not found!");
2386        }
2387        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
2388        if (ex != null) {
2389            println("try { // for error handling");
2390            tabs++;
2391        }
2392    }
2393    /** Generate a header that is common to all C++ files */
2394    protected void genHeader(String JavaDoc fileName)
2395    {
2396        println("/* $ANTLR "+antlrTool.version+": "+
2397                "\""+antlrTool.fileMinusPath(antlrTool.grammarFile)+"\""+
2398                " -> "+
2399                "\""+fileName+"\"$ */");
2400    }
2401
2402    // these are unique to C++ mode
2403
public void genInclude(LexerGrammar g) throws IOException JavaDoc
2404    {
2405        outputFile = grammar.getClassName() + ".hpp";
2406        outputLine = 1;
2407        currentOutput = antlrTool.openOutputFile(outputFile);
2408        //SAS: changed for proper text file io
2409

2410        genAST = false; // no way to gen trees.
2411
saveText = true; // save consumed characters.
2412

2413        tabs=0;
2414
2415        // Generate a guard wrapper
2416
println("#ifndef INC_"+grammar.getClassName()+"_hpp_");
2417        println("#define INC_"+grammar.getClassName()+"_hpp_");
2418        println("");
2419
2420        printHeaderAction(preIncludeHpp);
2421
2422        println("#include <antlr/config.hpp>");
2423
2424        // Generate header common to all C++ output files
2425
genHeader(outputFile);
2426
2427        // Generate header specific to lexer header file
2428
println("#include <antlr/CommonToken.hpp>");
2429        println("#include <antlr/InputBuffer.hpp>");
2430        println("#include <antlr/BitSet.hpp>");
2431        println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\"");
2432
2433        // Find the name of the super class
2434
String JavaDoc sup=null;
2435        if ( grammar.superClass!=null ) {
2436            sup = grammar.superClass;
2437
2438            println("\n// Include correct superclass header with a header statement for example:");
2439            println("// header \"post_include_hpp\" {");
2440            println("// #include \""+sup+".hpp\"");
2441            println("// }");
2442            println("// Or....");
2443            println("// header {");
2444            println("// #include \""+sup+".hpp\"");
2445            println("// }\n");
2446        }
2447        else {
2448            sup = grammar.getSuperClass();
2449            if (sup.lastIndexOf('.') != -1)
2450                sup = sup.substring(sup.lastIndexOf('.')+1);
2451            println("#include <antlr/"+sup+".hpp>");
2452            sup = namespaceAntlr + sup;
2453        }
2454
2455        // Do not use printAction because we assume tabs==0
2456
printHeaderAction(postIncludeHpp);
2457
2458        if (nameSpace != null)
2459               nameSpace.emitDeclarations(currentOutput);
2460
2461        printHeaderAction("");
2462
2463        // print javadoc comment if any
2464
if ( grammar.comment!=null ) {
2465            _println(grammar.comment);
2466        }
2467
2468        // Generate lexer class definition
2469
print("class CUSTOM_API " + grammar.getClassName() + " : public " + sup);
2470        println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
2471
2472        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
2473        if ( tsuffix != null ) {
2474            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
2475            if ( suffix != null ) {
2476                print(", "+suffix); // must be an interface name for Java
2477
}
2478        }
2479        println("{");
2480
2481        // Generate user-defined lexer class members
2482
if (grammar.classMemberAction != null) {
2483            genLineNo(grammar.classMemberAction);
2484            print(
2485                processActionForSpecialSymbols(grammar.classMemberAction.getText(),
2486                                                         grammar.classMemberAction.getLine(),
2487                                                         currentRule, null)
2488            );
2489            genLineNo2();
2490        }
2491
2492        // Generate initLiterals() method
2493
tabs=0;
2494        println("private:");
2495        tabs=1;
2496        println("void initLiterals();");
2497
2498        // Generate getCaseSensitiveLiterals() method
2499
tabs=0;
2500        println("public:");
2501        tabs=1;
2502        println("bool getCaseSensitiveLiterals() const");
2503        println("{");
2504        tabs++;
2505        println("return "+g.caseSensitiveLiterals + ";");
2506        tabs--;
2507        println("}");
2508
2509        // Make constructors public
2510
tabs=0;
2511        println("public:");
2512        tabs=1;
2513
2514        if( noConstructors )
2515        {
2516            tabs = 0;
2517            println("#if 0");
2518            println("// constructor creation turned of with 'noConstructor' option");
2519            tabs = 1;
2520        }
2521
2522        // Generate the constructor from std::istream
2523
println(grammar.getClassName() + "(" + namespaceStd + "istream& in);");
2524
2525        // Generate the constructor from InputBuffer
2526
println(grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib);");
2527
2528        println(grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state);");
2529        if( noConstructors )
2530        {
2531            tabs = 0;
2532            println("// constructor creation turned of with 'noConstructor' option");
2533            println("#endif");
2534            tabs = 1;
2535        }
2536
2537        // Generate nextToken() rule.
2538
// nextToken() is a synthetic lexer rule that is the implicit OR of all
2539
// user-defined lexer rules.
2540
println(namespaceAntlr+"RefToken nextToken();");
2541
2542        // Generate code for each rule in the lexer
2543
Enumeration JavaDoc ids = grammar.rules.elements();
2544        while ( ids.hasMoreElements() ) {
2545            RuleSymbol sym = (RuleSymbol) ids.nextElement();
2546            // Don't generate the synthetic rules
2547
if (!sym.getId().equals("mnextToken")) {
2548                genRuleHeader(sym, false);
2549            }
2550            exitIfError();
2551        }
2552
2553        // Make the rest private
2554
tabs=0;
2555        println("private:");
2556        tabs=1;
2557
2558        // generate the rule name array for debugging
2559
if ( grammar.debuggingOutput ) {
2560            println("static const char* _ruleNames[];");
2561        }
2562
2563        // Generate the semantic predicate map for debugging
2564
if (grammar.debuggingOutput)
2565            println("static const char* _semPredNames[];");
2566
2567        // Generate the bitsets used throughout the lexer
2568
genBitsetsHeader(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
2569
2570        tabs=0;
2571        println("};");
2572        println("");
2573        if (nameSpace != null)
2574            nameSpace.emitClosures(currentOutput);
2575
2576        // Generate a guard wrapper
2577
println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/");
2578
2579        // Close the lexer output stream
2580
currentOutput.close();
2581        currentOutput = null;
2582    }
2583    public void genInclude(ParserGrammar g) throws IOException JavaDoc
2584    {
2585        // Open the output stream for the parser and set the currentOutput
2586
outputFile = grammar.getClassName() + ".hpp";
2587        outputLine = 1;
2588        currentOutput = antlrTool.openOutputFile(outputFile);
2589        //SAS: changed for proper text file io
2590

2591        genAST = grammar.buildAST;
2592
2593        tabs = 0;
2594
2595        // Generate a guard wrapper
2596
println("#ifndef INC_"+grammar.getClassName()+"_hpp_");
2597        println("#define INC_"+grammar.getClassName()+"_hpp_");
2598        println("");
2599        printHeaderAction(preIncludeHpp);
2600        println("#include <antlr/config.hpp>");
2601
2602        // Generate the header common to all output files.
2603
genHeader(outputFile);
2604
2605        // Generate header for the parser
2606
println("#include <antlr/TokenStream.hpp>");
2607        println("#include <antlr/TokenBuffer.hpp>");
2608        println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\"");
2609
2610        // Generate parser class definition
2611
String JavaDoc sup=null;
2612        if ( grammar.superClass!=null ) {
2613            sup = grammar.superClass;
2614            println("\n// Include correct superclass header with a header statement for example:");
2615            println("// header \"post_include_hpp\" {");
2616            println("// #include \""+sup+".hpp\"");
2617            println("// }");
2618            println("// Or....");
2619            println("// header {");
2620            println("// #include \""+sup+".hpp\"");
2621            println("// }\n");
2622        }
2623        else {
2624            sup = grammar.getSuperClass();
2625            if (sup.lastIndexOf('.') != -1)
2626                sup = sup.substring(sup.lastIndexOf('.')+1);
2627            println("#include <antlr/"+sup+".hpp>");
2628            sup = namespaceAntlr + sup;
2629        }
2630        println("");
2631
2632        // Do not use printAction because we assume tabs==0
2633
printHeaderAction(postIncludeHpp);
2634
2635        if (nameSpace != null)
2636            nameSpace.emitDeclarations(currentOutput);
2637
2638        printHeaderAction("");
2639
2640        // print javadoc comment if any
2641
if ( grammar.comment!=null ) {
2642            _println(grammar.comment);
2643        }
2644
2645        // generate the actual class definition
2646
print("class CUSTOM_API " + grammar.getClassName() + " : public " + sup);
2647        println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
2648
2649        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
2650        if ( tsuffix != null ) {
2651            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
2652            if ( suffix != null )
2653                print(", "+suffix); // must be an interface name for Java
2654
}
2655        println("{");
2656
2657        // set up an array of all the rule names so the debugger can
2658
// keep track of them only by number -- less to store in tree...
2659
if (grammar.debuggingOutput) {
2660            println("public: static const char* _ruleNames[];");
2661        }
2662        // Generate user-defined parser class members
2663
if (grammar.classMemberAction != null) {
2664            genLineNo(grammar.classMemberAction.getLine());
2665            print(
2666                processActionForSpecialSymbols(grammar.classMemberAction.getText(),
2667                                                         grammar.classMemberAction.getLine(),
2668                                                         currentRule, null)
2669            );
2670            genLineNo2();
2671        }
2672        println("public:");
2673        tabs = 1;
2674        println("void initializeASTFactory( "+namespaceAntlr+"ASTFactory& factory );");
2675// println("// called from constructors");
2676
// println("void _initialize( void );");
2677

2678        // Generate parser class constructor from TokenBuffer
2679
tabs=0;
2680        if( noConstructors )
2681        {
2682            println("#if 0");
2683            println("// constructor creation turned of with 'noConstructor' option");
2684        }
2685        println("protected:");
2686        tabs=1;
2687        println(grammar.getClassName() + "("+namespaceAntlr+"TokenBuffer& tokenBuf, int k);");
2688        tabs=0;
2689        println("public:");
2690        tabs=1;
2691        println(grammar.getClassName() + "("+namespaceAntlr+"TokenBuffer& tokenBuf);");
2692
2693        // Generate parser class constructor from TokenStream
2694
tabs=0;
2695        println("protected:");
2696        tabs=1;
2697        println(grammar.getClassName()+"("+namespaceAntlr+"TokenStream& lexer, int k);");
2698        tabs=0;
2699        println("public:");
2700        tabs=1;
2701        println(grammar.getClassName()+"("+namespaceAntlr+"TokenStream& lexer);");
2702
2703        println(grammar.getClassName()+"(const "+namespaceAntlr+"ParserSharedInputState& state);");
2704        if( noConstructors )
2705        {
2706            tabs = 0;
2707            println("// constructor creation turned of with 'noConstructor' option");
2708            println("#endif");
2709            tabs = 1;
2710        }
2711
2712        println("int getNumTokens() const");
2713        println("{"); tabs++;
2714        println("return "+grammar.getClassName()+"::NUM_TOKENS;");
2715        tabs--; println("}");
2716        println("const char* getTokenName( int type ) const");
2717        println("{"); tabs++;
2718        println("if( type > getNumTokens() ) return 0;");
2719        println("return "+grammar.getClassName()+"::tokenNames[type];");
2720        tabs--; println("}");
2721        println("const char* const* getTokenNames() const");
2722        println("{"); tabs++;
2723        println("return "+grammar.getClassName()+"::tokenNames;");
2724        tabs--; println("}");
2725
2726        // Generate code for each rule in the grammar
2727
Enumeration JavaDoc ids = grammar.rules.elements();
2728        while ( ids.hasMoreElements() ) {
2729            GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
2730            if ( sym instanceof RuleSymbol) {
2731                RuleSymbol rs = (RuleSymbol)sym;
2732                genRuleHeader(rs, rs.references.size()==0);
2733            }
2734            exitIfError();
2735        }
2736
2737        // RK: when we are using a custom ast override Parser::getAST to return
2738
// the custom AST type. Ok, this does not work anymore with newer
2739
// compilers gcc 3.2.x and up. The reference counter is probably
2740
// getting in the way.
2741
// So now we just patch the return type back to RefAST
2742
tabs = 0; println("public:"); tabs = 1;
2743        println(namespaceAntlr+"RefAST getAST()");
2744        println("{");
2745        if( usingCustomAST )
2746        {
2747            tabs++;
2748            println("return "+namespaceAntlr+"RefAST(returnAST);");
2749            tabs--;
2750        }
2751        else
2752        {
2753            tabs++;
2754            println("return returnAST;");
2755            tabs--;
2756        }
2757        println("}");
2758        println("");
2759
2760        tabs=0; println("protected:"); tabs=1;
2761        println(labeledElementASTType+" returnAST;");
2762
2763        // Make the rest private
2764
tabs=0;
2765        println("private:");
2766        tabs=1;
2767
2768        // Generate the token names
2769
println("static const char* tokenNames[];");
2770        // and how many there are of them
2771
_println("#ifndef NO_STATIC_CONSTS");
2772        println("static const int NUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()+";");
2773        _println("#else");
2774        println("enum {");
2775        println("\tNUM_TOKENS = "+grammar.tokenManager.getVocabulary().size());
2776        println("};");
2777        _println("#endif");
2778
2779        // Generate the bitsets used throughout the grammar
2780
genBitsetsHeader(bitsetsUsed, grammar.tokenManager.maxTokenType());
2781
2782        // Generate the semantic predicate map for debugging
2783
if (grammar.debuggingOutput)
2784            println("static const char* _semPredNames[];");
2785
2786        // Close class definition
2787
tabs=0;
2788        println("};");
2789        println("");
2790        if (nameSpace != null)
2791            nameSpace.emitClosures(currentOutput);
2792
2793        // Generate a guard wrapper
2794
println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/");
2795
2796        // Close the parser output stream
2797
currentOutput.close();
2798        currentOutput = null;
2799    }
2800    public void genInclude(TreeWalkerGrammar g) throws IOException JavaDoc
2801    {
2802        // Open the output stream for the parser and set the currentOutput
2803
outputFile = grammar.getClassName() + ".hpp";
2804        outputLine = 1;
2805        currentOutput = antlrTool.openOutputFile(outputFile);
2806        //SAS: changed for proper text file io
2807

2808        genAST = grammar.buildAST;
2809        tabs = 0;
2810
2811        // Generate a guard wrapper
2812
println("#ifndef INC_"+grammar.getClassName()+"_hpp_");
2813        println("#define INC_"+grammar.getClassName()+"_hpp_");
2814        println("");
2815        printHeaderAction(preIncludeHpp);
2816        println("#include <antlr/config.hpp>");
2817        println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\"");
2818
2819        // Generate the header common to all output files.
2820
genHeader(outputFile);
2821
2822        // Find the name of the super class
2823
String JavaDoc sup=null;
2824        if ( grammar.superClass!=null ) {
2825            sup = grammar.superClass;
2826            println("\n// Include correct superclass header with a header statement for example:");
2827            println("// header \"post_include_hpp\" {");
2828            println("// #include \""+sup+".hpp\"");
2829            println("// }");
2830            println("// Or....");
2831            println("// header {");
2832            println("// #include \""+sup+".hpp\"");
2833            println("// }\n");
2834        }
2835        else {
2836            sup = grammar.getSuperClass();
2837            if (sup.lastIndexOf('.') != -1)
2838                sup = sup.substring(sup.lastIndexOf('.')+1);
2839            println("#include <antlr/"+sup+".hpp>");
2840            sup = namespaceAntlr + sup;
2841        }
2842        println("");
2843
2844        // Generate header for the parser
2845
//
2846
// Do not use printAction because we assume tabs==0
2847
printHeaderAction(postIncludeHpp);
2848
2849        if (nameSpace != null)
2850            nameSpace.emitDeclarations(currentOutput);
2851
2852        printHeaderAction("");
2853
2854        // print javadoc comment if any
2855
if ( grammar.comment!=null ) {
2856            _println(grammar.comment);
2857        }
2858
2859        // Generate parser class definition
2860
print("class CUSTOM_API " + grammar.getClassName() + " : public "+sup);
2861        println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
2862
2863        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
2864        if ( tsuffix != null ) {
2865            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
2866            if ( suffix != null ) {
2867                print(", "+suffix); // must be an interface name for Java
2868
}
2869        }
2870        println("{");
2871
2872        // Generate user-defined parser class members
2873
if (grammar.classMemberAction != null) {
2874            genLineNo(grammar.classMemberAction.getLine());
2875            print(
2876                    processActionForSpecialSymbols(grammar.classMemberAction.getText(),
2877                                                             grammar.classMemberAction.getLine(),
2878                                                             currentRule, null)
2879                    );
2880            genLineNo2();
2881        }
2882
2883        // Generate default parser class constructor
2884
tabs=0;
2885        println("public:");
2886
2887        if( noConstructors )
2888        {
2889            println("#if 0");
2890            println("// constructor creation turned of with 'noConstructor' option");
2891        }
2892        tabs=1;
2893        println(grammar.getClassName() + "();");
2894        if( noConstructors )
2895        {
2896            tabs = 0;
2897            println("#endif");
2898            tabs = 1;
2899        }
2900
2901        // Generate declaration for the initializeFactory method
2902
println("static void initializeASTFactory( "+namespaceAntlr+"ASTFactory& factory );");
2903
2904        println("int getNumTokens() const");
2905        println("{"); tabs++;
2906        println("return "+grammar.getClassName()+"::NUM_TOKENS;");
2907        tabs--; println("}");
2908        println("const char* getTokenName( int type ) const");
2909        println("{"); tabs++;
2910        println("if( type > getNumTokens() ) return 0;");
2911        println("return "+grammar.getClassName()+"::tokenNames[type];");
2912        tabs--; println("}");
2913        println("const char* const* getTokenNames() const");
2914        println("{"); tabs++;
2915        println("return "+grammar.getClassName()+"::tokenNames;");
2916        tabs--; println("}");
2917
2918        // Generate code for each rule in the grammar
2919
Enumeration JavaDoc ids = grammar.rules.elements();
2920        String JavaDoc ruleNameInits = "";
2921        while ( ids.hasMoreElements() ) {
2922            GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
2923            if ( sym instanceof RuleSymbol) {
2924                RuleSymbol rs = (RuleSymbol)sym;
2925                genRuleHeader(rs, rs.references.size()==0);
2926            }
2927            exitIfError();
2928        }
2929        tabs = 0; println("public:"); tabs = 1;
2930        println(namespaceAntlr+"RefAST getAST()");
2931        println("{");
2932        if( usingCustomAST )
2933        {
2934            tabs++;
2935            println("return "+namespaceAntlr+"RefAST(returnAST);");
2936            tabs--;
2937        }
2938        else
2939        {
2940            tabs++;
2941            println("return returnAST;");
2942            tabs--;
2943        }
2944        println("}");
2945        println("");
2946
2947        tabs=0; println("protected:"); tabs=1;
2948        println(labeledElementASTType+" returnAST;");
2949        println(labeledElementASTType+" _retTree;");
2950
2951        // Make the rest private
2952
tabs=0;
2953        println("private:");
2954        tabs=1;
2955
2956        // Generate the token names
2957
println("static const char* tokenNames[];");
2958        // and how many there are of them
2959
_println("#ifndef NO_STATIC_CONSTS");
2960        println("static const int NUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()+";");
2961        _println("#else");
2962        println("enum {");
2963        println("\tNUM_TOKENS = "+grammar.tokenManager.getVocabulary().size());
2964        println("};");
2965        _println("#endif");
2966
2967        // Generate the bitsets used throughout the grammar
2968
genBitsetsHeader(bitsetsUsed, grammar.tokenManager.maxTokenType());
2969
2970        // Close class definition
2971
tabs=0;
2972        println("};");
2973        println("");
2974        if (nameSpace != null)
2975            nameSpace.emitClosures(currentOutput);
2976
2977        // Generate a guard wrapper
2978
println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/");
2979
2980        // Close the parser output stream
2981
currentOutput.close();
2982        currentOutput = null;
2983    }
2984    /// for convenience
2985
protected void genASTDeclaration( AlternativeElement el ) {
2986        genASTDeclaration( el, labeledElementASTType );
2987    }
2988    /// for convenience
2989
protected void genASTDeclaration( AlternativeElement el, String JavaDoc node_type ) {
2990        genASTDeclaration( el, el.getLabel(), node_type );
2991    }
2992    /// Generate (if not already done) a declaration for the AST for el.
2993
protected void genASTDeclaration( AlternativeElement el, String JavaDoc var_name, String JavaDoc node_type ) {
2994        // already declared?
2995
if( declaredASTVariables.contains(el) )
2996            return;
2997
2998        String JavaDoc init = labeledElementASTInit;
2999
3000        if (el instanceof GrammarAtom &&
3001             ((GrammarAtom)el).getASTNodeType() != null )
3002            init = "Ref"+((GrammarAtom)el).getASTNodeType()+"("+labeledElementASTInit+")";
3003
3004        // emit code
3005
println(node_type+" " + var_name + "_AST = "+init+";");
3006
3007        // mark as declared
3008
declaredASTVariables.put(el, el);
3009    }
3010    private void genLiteralsTest() {
3011        println("_ttype = testLiteralsTable(_ttype);");
3012    }
3013    private void genLiteralsTestForPartialToken() {
3014        println("_ttype = testLiteralsTable(text.substr(_begin, text.length()-_begin),_ttype);");
3015    }
3016    protected void genMatch(BitSet b) {
3017    }
3018    protected void genMatch(GrammarAtom atom) {
3019        if ( atom instanceof StringLiteralElement ) {
3020            if ( grammar instanceof LexerGrammar ) {
3021                genMatchUsingAtomText(atom);
3022            }
3023            else {
3024                genMatchUsingAtomTokenType(atom);
3025            }
3026        }
3027        else if ( atom instanceof CharLiteralElement ) {
3028            if ( grammar instanceof LexerGrammar ) {
3029                genMatchUsingAtomText(atom);
3030            }
3031            else {
3032                antlrTool.error("cannot ref character literals in grammar: "+atom);
3033            }
3034        }
3035        else if ( atom instanceof TokenRefElement ) {
3036            genMatchUsingAtomTokenType(atom);
3037        } else if (atom instanceof WildcardElement) {
3038            gen((WildcardElement)atom);
3039        }
3040    }
3041    protected void genMatchUsingAtomText(GrammarAtom atom) {
3042        // match() for trees needs the _t cursor
3043
String JavaDoc astArgs="";
3044        if (grammar instanceof TreeWalkerGrammar) {
3045            if( usingCustomAST )
3046                astArgs=namespaceAntlr+"RefAST"+"(_t),";
3047            else
3048                astArgs="_t,";
3049        }
3050
3051        // if in lexer and ! on element, save buffer index to kill later
3052
if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
3053            println("_saveIndex = text.length();");
3054        }
3055
3056        print(atom.not ? "matchNot(" : "match(");
3057        _print(astArgs);
3058
3059        // print out what to match
3060
if (atom.atomText.equals("EOF")) {
3061            // horrible hack to handle EOF case
3062
_print(namespaceAntlr+"Token::EOF_TYPE");
3063        }
3064        else
3065        {
3066            if( grammar instanceof LexerGrammar ) // lexer needs special handling
3067
{
3068                String JavaDoc cppstring = convertJavaToCppString( atom.atomText );
3069                _print(cppstring);
3070            }
3071            else
3072                _print(atom.atomText);
3073        }
3074
3075        _println(");");
3076
3077        if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
3078            println("text.erase(_saveIndex);"); // kill text atom put in buffer
3079
}
3080    }
3081    protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
3082        // match() for trees needs the _t cursor
3083
String JavaDoc astArgs="";
3084        if (grammar instanceof TreeWalkerGrammar) {
3085            if( usingCustomAST )
3086                astArgs=namespaceAntlr+"RefAST"+"(_t),";
3087            else
3088                astArgs="_t,";
3089        }
3090
3091        // If the literal can be mangled, generate the symbolic constant instead
3092
String JavaDoc s = astArgs + getValueString(atom.getType());
3093
3094        // matching
3095
println( (atom.not ? "matchNot(" : "match(") + s + ");");
3096    }
3097    /** Generate the nextToken() rule.
3098     * nextToken() is a synthetic lexer rule that is the implicit OR of all
3099     * user-defined lexer rules.
3100     * @param RuleBlock
3101     */

3102    public void genNextToken() {
3103        // Are there any public rules? If not, then just generate a
3104
// fake nextToken().
3105
boolean hasPublicRules = false;
3106        for (int i = 0; i < grammar.rules.size(); i++) {
3107            RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
3108            if ( rs.isDefined() && rs.access.equals("public") ) {
3109                hasPublicRules = true;
3110                break;
3111            }
3112        }
3113        if (!hasPublicRules) {
3114            println("");
3115            println(namespaceAntlr+"RefToken "+grammar.getClassName()+"::nextToken() { return "+namespaceAntlr+"RefToken(new "+namespaceAntlr+"CommonToken("+namespaceAntlr+"Token::EOF_TYPE, \"\")); }");
3116            println("");
3117            return;
3118        }
3119
3120        // Create the synthesized nextToken() rule
3121
RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
3122        // Define the nextToken rule symbol
3123
RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
3124        nextTokenRs.setDefined();
3125        nextTokenRs.setBlock(nextTokenBlk);
3126        nextTokenRs.access = "private";
3127        grammar.define(nextTokenRs);
3128        // Analyze the nextToken rule
3129
boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
3130
3131        // Generate the next token rule
3132
String JavaDoc filterRule=null;
3133        if ( ((LexerGrammar)grammar).filterMode ) {
3134            filterRule = ((LexerGrammar)grammar).filterRule;
3135        }
3136
3137        println("");
3138        println(namespaceAntlr+"RefToken "+grammar.getClassName()+"::nextToken()");
3139        println("{");
3140        tabs++;
3141        println(namespaceAntlr+"RefToken theRetToken;");
3142        println("for (;;) {");
3143        tabs++;
3144        println(namespaceAntlr+"RefToken theRetToken;");
3145        println("int _ttype = "+namespaceAntlr+"Token::INVALID_TYPE;");
3146        if ( ((LexerGrammar)grammar).filterMode ) {
3147            println("setCommitToPath(false);");
3148            if ( filterRule!=null ) {
3149                // Here's a good place to ensure that the filter rule actually exists
3150
if ( !grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule)) ) {
3151                    grammar.antlrTool.error("Filter rule "+filterRule+" does not exist in this lexer");
3152                }
3153                else {
3154                    RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule));
3155                    if ( !rs.isDefined() ) {
3156                        grammar.antlrTool.error("Filter rule "+filterRule+" does not exist in this lexer");
3157                    }
3158                    else if ( rs.access.equals("public") ) {
3159                        grammar.antlrTool.error("Filter rule "+filterRule+" must be protected");
3160                    }
3161                }
3162                println("int _m;");
3163                println("_m = mark();");
3164            }
3165        }
3166        println("resetText();");
3167
3168        // Generate try around whole thing to trap scanner errors
3169
println("try { // for lexical and char stream error handling");
3170        tabs++;
3171
3172        // Test for public lexical rules with empty paths
3173
for (int i=0; i<nextTokenBlk.getAlternatives().size(); i++) {
3174            Alternative a = nextTokenBlk.getAlternativeAt(i);
3175            if ( a.cache[1].containsEpsilon() ) {
3176                antlrTool.warning("found optional path in nextToken()");
3177            }
3178        }
3179
3180        // Generate the block
3181
String JavaDoc newline = System.getProperty("line.separator");
3182        CppBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
3183        String JavaDoc errFinish = "if (LA(1)==EOF_CHAR)"+newline+
3184            "\t\t\t\t{"+newline+"\t\t\t\t\tuponEOF();"+newline+
3185            "\t\t\t\t\t_returnToken = makeToken("+namespaceAntlr+"Token::EOF_TYPE);"+
3186            newline+"\t\t\t\t}";
3187        errFinish += newline+"\t\t\t\t";
3188        if ( ((LexerGrammar)grammar).filterMode ) {
3189            if ( filterRule==null ) {
3190                errFinish += "else {consume(); goto tryAgain;}";
3191            }
3192            else {
3193                errFinish += "else {"+newline+
3194                        "\t\t\t\t\tcommit();"+newline+
3195                        "\t\t\t\t\ttry {m"+filterRule+"(false);}"+newline+
3196                        "\t\t\t\t\tcatch("+namespaceAntlr+"RecognitionException& e) {"+newline+
3197                        "\t\t\t\t\t // catastrophic failure"+newline+
3198                        "\t\t\t\t\t reportError(e);"+newline+
3199                        "\t\t\t\t\t consume();"+newline+
3200                        "\t\t\t\t\t}"+newline+
3201                        "\t\t\t\t\tgoto tryAgain;"+newline+
3202                        "\t\t\t\t}";
3203            }
3204        }
3205        else {
3206            errFinish += "else {"+throwNoViable+"}";
3207        }
3208        genBlockFinish(howToFinish, errFinish);
3209
3210        // at this point a valid token has been matched, undo "mark" that was done
3211
if ( ((LexerGrammar)grammar).filterMode && filterRule!=null ) {
3212            println("commit();");
3213        }
3214
3215        // Generate literals test if desired
3216
// make sure _ttype is set first; note _returnToken must be
3217
// non-null as the rule was required to create it.
3218
println("if ( !_returnToken )"+newline+
3219                  "\t\t\t\tgoto tryAgain; // found SKIP token"+newline);
3220        println("_ttype = _returnToken->getType();");
3221        if ( ((LexerGrammar)grammar).getTestLiterals()) {
3222            genLiteralsTest();
3223        }
3224
3225        // return token created by rule reference in switch
3226
println("_returnToken->setType(_ttype);");
3227        println("return _returnToken;");
3228
3229        // Close try block
3230
tabs--;
3231        println("}");
3232        println("catch ("+namespaceAntlr+"RecognitionException& e) {");
3233        tabs++;
3234        if ( ((LexerGrammar)grammar).filterMode ) {
3235            if ( filterRule==null ) {
3236                println("if ( !getCommitToPath() ) {");
3237                tabs++;
3238                println("consume();");
3239                println("goto tryAgain;");
3240                tabs--;
3241                println("}");
3242            }
3243            else {
3244                println("if ( !getCommitToPath() ) {");
3245                tabs++;
3246                println("rewind(_m);");
3247                println("resetText();");
3248                println("try {m"+filterRule+"(false);}");
3249                println("catch("+namespaceAntlr+"RecognitionException& ee) {");
3250                println(" // horrendous failure: error in filter rule");
3251                println(" reportError(ee);");
3252                println(" consume();");
3253                println("}");
3254                // println("goto tryAgain;");
3255
tabs--;
3256                println("}");
3257                println("else");
3258            }
3259        }
3260        if ( nextTokenBlk.getDefaultErrorHandler() ) {
3261            println("{");
3262            tabs++;
3263            println("reportError(e);");
3264            println("consume();");
3265            tabs--;
3266            println("}");
3267        }
3268        else {
3269            // pass on to invoking routine
3270
tabs++;
3271            println("throw "+namespaceAntlr+"TokenStreamRecognitionException(e);");
3272             tabs--;
3273        }
3274
3275        // close CharStreamException try
3276
tabs--;
3277        println("}");
3278        println("catch ("+namespaceAntlr+"CharStreamIOException& csie) {");
3279        println("\tthrow "+namespaceAntlr+"TokenStreamIOException(csie.io);");
3280        println("}");
3281        println("catch ("+namespaceAntlr+"CharStreamException& cse) {");
3282        println("\tthrow "+namespaceAntlr+"TokenStreamException(cse.getMessage());");
3283        println("}");
3284
3285        // close for-loop
3286
_println("tryAgain:;");
3287        tabs--;
3288        println("}");
3289
3290        // close method nextToken
3291
tabs--;
3292        println("}");
3293        println("");
3294    }
3295    /** Gen a named rule block.
3296     * ASTs are generated for each element of an alternative unless
3297     * the rule or the alternative have a '!' modifier.
3298     *
3299     * If an alternative defeats the default tree construction, it
3300     * must set <rule>_AST to the root of the returned AST.
3301     *
3302     * Each alternative that does automatic tree construction, builds
3303     * up root and child list pointers in an ASTPair structure.
3304     *
3305     * A rule finishes by setting the returnAST variable from the
3306     * ASTPair.
3307     *
3308     * @param rule The name of the rule to generate
3309     * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
3310    */

3311    public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, String JavaDoc prefix) {
3312// tabs=1; // JavaCodeGenerator needs this
3313
if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")");
3314        if ( !s.isDefined() ) {
3315            antlrTool.error("undefined rule: "+ s.getId());
3316            return;
3317        }
3318
3319        // Generate rule return type, name, arguments
3320
RuleBlock rblk = s.getBlock();
3321
3322        currentRule = rblk;
3323        currentASTResult = s.getId();
3324
3325        // clear list of declared ast variables..
3326
declaredASTVariables.clear();
3327
3328        // Save the AST generation state, and set it to that of the rule
3329
boolean savegenAST = genAST;
3330        genAST = genAST && rblk.getAutoGen();
3331
3332        // boolean oldsaveTest = saveText;
3333
saveText = rblk.getAutoGen();
3334
3335        // print javadoc comment if any
3336
if ( s.comment!=null ) {
3337            _println(s.comment);
3338        }
3339
3340        // Gen method return type (note lexer return action set at rule creation)
3341
if (rblk.returnAction != null)
3342        {
3343            // Has specified return value
3344
_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
3345        } else {
3346            // No specified return value
3347
_print("void ");
3348        }
3349
3350        // Gen method name
3351
_print(prefix + s.getId() + "(");
3352
3353        // Additional rule parameters common to all rules for this grammar
3354
_print(commonExtraParams);
3355        if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
3356            _print(",");
3357        }
3358
3359        // Gen arguments
3360
if (rblk.argAction != null)
3361        {
3362            // Has specified arguments
3363
_println("");
3364// FIXME: make argAction also a token? Hmmmmm
3365
// genLineNo(rblk);
3366
tabs++;
3367
3368            // Process arguments for default arguments
3369
// newer gcc's don't accept these in two places (header/cpp)
3370
//
3371
// Old appraoch with StringBuffer gave trouble with gcj.
3372
//
3373
// RK: Actually this breaks with string default arguments containing
3374
// a comma's or equal signs. Then again the old StringBuffer method
3375
// suffered from the same.
3376
String JavaDoc oldarg = rblk.argAction;
3377            String JavaDoc newarg = "";
3378
3379            String JavaDoc comma = "";
3380            int eqpos = oldarg.indexOf( '=' );
3381            if( eqpos != -1 )
3382            {
3383                int cmpos = 0;
3384                while( cmpos != -1 )
3385                {
3386                    newarg = newarg + comma + oldarg.substring( 0, eqpos ).trim();
3387                    comma = ", ";
3388                    cmpos = oldarg.indexOf( ',', eqpos );
3389                    if( cmpos != -1 )
3390                    {
3391                        // cut off part we just handled
3392
oldarg = oldarg.substring( cmpos+1 ).trim();
3393                        eqpos = oldarg.indexOf( '=' );
3394                    }
3395                }
3396            }
3397            else
3398                newarg = oldarg;
3399
3400            println( newarg );
3401
3402// println(rblk.argAction);
3403
tabs--;
3404            print(") ");
3405// genLineNo2(); // gcc gives error on the brace... hope it works for the others too
3406
} else {
3407            // No specified arguments
3408
_print(") ");
3409        }
3410        _println("{");
3411        tabs++;
3412
3413        if (grammar.traceRules) {
3414            if ( grammar instanceof TreeWalkerGrammar ) {
3415                if ( usingCustomAST )
3416                    println("Tracer traceInOut(this,\""+ s.getId() +"\","+namespaceAntlr+"RefAST"+"(_t));");
3417                else
3418                    println("Tracer traceInOut(this,\""+ s.getId() +"\",_t);");
3419            }
3420            else {
3421                println("Tracer traceInOut(this, \""+ s.getId() +"\");");
3422            }
3423        }
3424
3425        // Convert return action to variable declaration
3426
if (rblk.returnAction != null)
3427        {
3428            genLineNo(rblk);
3429            println(rblk.returnAction + ";");
3430            genLineNo2();
3431        }
3432
3433        // print out definitions needed by rules for various grammar types
3434
if (!commonLocalVars.equals(""))
3435            println(commonLocalVars);
3436
3437        if ( grammar instanceof LexerGrammar ) {
3438            // RK: why is this here? It seems not supported in the rest of the
3439
// tool.
3440
// lexer rule default return value is the rule's token name
3441
// This is a horrible hack to support the built-in EOF lexer rule.
3442
if (s.getId().equals("mEOF"))
3443                println("_ttype = "+namespaceAntlr+"Token::EOF_TYPE;");
3444            else
3445                println("_ttype = "+ s.getId().substring(1)+";");
3446            println("int _saveIndex;"); // used for element! (so we can kill text matched for element)
3447
/*
3448            println("boolean old_saveConsumedInput=saveConsumedInput;");
3449            if ( !rblk.getAutoGen() ) { // turn off "save input" if ! on rule
3450                println("saveConsumedInput=false;");
3451            }
3452*/

3453        }
3454
3455        // if debugging, write code to mark entry to the rule
3456
if ( grammar.debuggingOutput)
3457            if (grammar instanceof ParserGrammar)
3458                println("fireEnterRule(" + ruleNum + ",0);");
3459            else if (grammar instanceof LexerGrammar)
3460                println("fireEnterRule(" + ruleNum + ",_ttype);");
3461
3462        // Generate trace code if desired
3463
// if ( grammar.debuggingOutput || grammar.traceRules) {
3464
// println("try { // debugging");
3465
// tabs++;
3466
// }
3467

3468        // Initialize AST variables
3469
if (grammar instanceof TreeWalkerGrammar) {
3470            // "Input" value for rule
3471
// println(labeledElementASTType+" " + s.getId() + "_AST_in = "+labeledElementASTType+"(_t);");
3472
println(labeledElementASTType+" " + s.getId() + "_AST_in = (_t == ASTNULL) ? "+labeledElementASTInit+" : _t;");
3473        }
3474        if (grammar.buildAST) {
3475            // Parser member used to pass AST returns from rule invocations
3476
println("returnAST = "+labeledElementASTInit+";");
3477            // Tracks AST construction
3478
println(namespaceAntlr+"ASTPair currentAST;"); // = new ASTPair();");
3479
// User-settable return value for rule.
3480
println(labeledElementASTType+" " + s.getId() + "_AST = "+labeledElementASTInit+";");
3481        }
3482
3483        genBlockPreamble(rblk);
3484        genBlockInitAction(rblk);
3485        println("");
3486
3487        // Search for an unlabeled exception specification attached to the rule
3488
ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
3489
3490        // Generate try block around the entire rule for error handling
3491
if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
3492            println("try { // for error handling");
3493            tabs++;
3494        }
3495
3496        // Generate the alternatives
3497
if ( rblk.alternatives.size()==1 )
3498        {
3499            // One alternative -- use simple form
3500
Alternative alt = rblk.getAlternativeAt(0);
3501            String JavaDoc pred = alt.semPred;
3502            if ( pred!=null )
3503                genSemPred(pred, currentRule.line);
3504            if (alt.synPred != null) {
3505                antlrTool.warning(
3506                    "Syntactic predicate ignored for single alternative",
3507                    grammar.getFilename(),
3508                    alt.synPred.getLine(),
3509                    alt.synPred.getColumn()
3510                );
3511            }
3512            genAlt(alt, rblk);
3513        }
3514        else
3515        {
3516            // Multiple alternatives -- generate complex form
3517
boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
3518
3519            CppBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
3520            genBlockFinish(howToFinish, throwNoViable);
3521        }
3522
3523        // Generate catch phrase for error handling
3524
if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
3525            // Close the try block
3526
tabs--;
3527            println("}");
3528        }
3529
3530        // Generate user-defined or default catch phrases
3531
if (unlabeledUserSpec != null)
3532        {
3533            genErrorHandler(unlabeledUserSpec);
3534        }
3535        else if (rblk.getDefaultErrorHandler())
3536        {
3537            // Generate default catch phrase
3538
println("catch (" + exceptionThrown + "& ex) {");
3539            tabs++;
3540            // Generate code to handle error if not guessing
3541
if (grammar.hasSyntacticPredicate) {
3542                println("if( inputState->guessing == 0 ) {");
3543                tabs++;
3544            }
3545            println("reportError(ex);");
3546            if ( !(grammar instanceof TreeWalkerGrammar) )
3547            {
3548                // Generate code to consume until token in k==1 follow set
3549
Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
3550                String JavaDoc followSetName = getBitsetName(markBitsetForGen(follow.fset));
3551                println("consume();");
3552                println("consumeUntil(" + followSetName + ");");
3553            }
3554            else
3555            {
3556                // Just consume one token
3557
println("if ( _t != "+labeledElementASTInit+" )");
3558                tabs++;
3559                println("_t = _t->getNextSibling();");
3560                tabs--;
3561            }
3562            if (grammar.hasSyntacticPredicate)
3563            {
3564                tabs--;
3565                // When guessing, rethrow exception
3566
println("} else {");
3567                tabs++;
3568                println("throw;");
3569                tabs--;
3570                println("}");
3571            }
3572            // Close catch phrase
3573
tabs--;
3574            println("}");
3575        }
3576
3577        // Squirrel away the AST "return" value
3578
if (grammar.buildAST) {
3579            println("returnAST = " + s.getId() + "_AST;");
3580        }
3581
3582        // Set return tree value for tree walkers
3583
if ( grammar instanceof TreeWalkerGrammar ) {
3584            println("_retTree = _t;");
3585        }
3586
3587        // Generate literals test for lexer rules so marked
3588
if (rblk.getTestLiterals()) {
3589            if ( s.access.equals("protected") ) {
3590                genLiteralsTestForPartialToken();
3591            }
3592            else {
3593                genLiteralsTest();
3594            }
3595        }
3596
3597        // if doing a lexer rule, dump code to create token if necessary
3598
if ( grammar instanceof LexerGrammar ) {
3599            println("if ( _createToken && _token=="+namespaceAntlr+"nullToken && _ttype!="+namespaceAntlr+"Token::SKIP ) {");
3600            println(" _token = makeToken(_ttype);");
3601            println(" _token->setText(text.substr(_begin, text.length()-_begin));");
3602            println("}");
3603            println("_returnToken = _token;");
3604            // It should be easy for an optimizing compiler to realize this does nothing
3605
// but it avoids the warning about the variable being unused.
3606
println("_saveIndex=0;");
3607        }
3608
3609        // Gen the return statement if there is one (lexer has hard-wired return action)
3610
if (rblk.returnAction != null) {
3611            println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";");
3612        }
3613
3614// if ( grammar.debuggingOutput || grammar.traceRules) {
3615
//// tabs--;
3616
//// println("} finally { // debugging");
3617
//// tabs++;
3618
//
3619
// // Generate trace code if desired
3620
// if ( grammar.debuggingOutput)
3621
// if (grammar instanceof ParserGrammar)
3622
// println("fireExitRule(" + ruleNum + ",0);");
3623
// else if (grammar instanceof LexerGrammar)
3624
// println("fireExitRule(" + ruleNum + ",_ttype);");
3625
//
3626
//// if (grammar.traceRules) {
3627
//// if ( grammar instanceof TreeWalkerGrammar ) {
3628
//// println("traceOut(\""+ s.getId() +"\",_t);");
3629
//// }
3630
//// else {
3631
//// println("traceOut(\""+ s.getId() +"\");");
3632
//// }
3633
//// }
3634
////
3635
//// tabs--;
3636
//// println("}");
3637
// }
3638

3639        tabs--;
3640        println("}");
3641        println("");
3642
3643        // Restore the AST generation state
3644
genAST = savegenAST;
3645
3646        // restore char save state
3647
// saveText = oldsaveTest;
3648
}
3649    public void genRuleHeader(RuleSymbol s, boolean startSymbol) {
3650        tabs=1;
3651        if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRuleHeader("+ s.getId() +")");
3652        if ( !s.isDefined() ) {
3653            antlrTool.error("undefined rule: "+ s.getId());
3654            return;
3655        }
3656
3657        // Generate rule return type, name, arguments
3658
RuleBlock rblk = s.getBlock();
3659        currentRule = rblk;
3660        currentASTResult = s.getId();
3661
3662        // Save the AST generation state, and set it to that of the rule
3663
boolean savegenAST = genAST;
3664        genAST = genAST && rblk.getAutoGen();
3665
3666        // boolean oldsaveTest = saveText;
3667
saveText = rblk.getAutoGen();
3668
3669        // Gen method access
3670
print(s.access + ": ");
3671
3672        // Gen method return type (note lexer return action set at rule creation)
3673
if (rblk.returnAction != null)
3674        {
3675            // Has specified return value
3676
_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
3677        } else {
3678            // No specified return value
3679
_print("void ");
3680        }
3681
3682        // Gen method name
3683
_print(s.getId() + "(");
3684
3685        // Additional rule parameters common to all rules for this grammar
3686
_print(commonExtraParams);
3687        if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
3688            _print(",");
3689        }
3690
3691        // Gen arguments
3692
if (rblk.argAction != null)
3693        {
3694            // Has specified arguments
3695
_println("");
3696            tabs++;
3697            println(rblk.argAction);
3698            tabs--;
3699            print(")");
3700        } else {
3701            // No specified arguments
3702
_print(")");
3703        }
3704        _println(";");
3705
3706        tabs--;
3707
3708        // Restore the AST generation state
3709
genAST = savegenAST;
3710
3711        // restore char save state
3712
// saveText = oldsaveTest;
3713
}
3714    private void GenRuleInvocation(RuleRefElement rr) {
3715        // dump rule name
3716
_print(rr.targetRule + "(");
3717
3718        // lexers must tell rule if it should set _returnToken
3719
if ( grammar instanceof LexerGrammar ) {
3720            // if labeled, could access Token, so tell rule to create
3721
if ( rr.getLabel() != null ) {
3722                _print("true");
3723            }
3724            else {
3725                _print("false");
3726            }
3727            if (commonExtraArgs.length() != 0 || rr.args!=null ) {
3728                _print(",");
3729            }
3730        }
3731
3732        // Extra arguments common to all rules for this grammar
3733
_print(commonExtraArgs);
3734        if (commonExtraArgs.length() != 0 && rr.args!=null ) {
3735            _print(",");
3736        }
3737
3738        // Process arguments to method, if any
3739
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
3740        if (rr.args != null)
3741        {
3742            // When not guessing, execute user arg action
3743
ActionTransInfo tInfo = new ActionTransInfo();
3744            // FIXME: fix line number passed to processActionForTreeSpecifiers here..
3745
// this one might be a bit off..
3746
String JavaDoc args = processActionForSpecialSymbols(rr.args, rr.line,
3747                                                                        currentRule, tInfo);
3748            if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null )
3749            {
3750                antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #"+
3751                    currentRule.getRuleName()+" on line "+rr.getLine());
3752            }
3753            _print(args);
3754
3755            // Warn if the rule accepts no arguments
3756
if (rs.block.argAction == null)
3757            {
3758                antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments",
3759                    grammar.getFilename(),
3760                    rr.getLine(), rr.getColumn());
3761            }
3762        }
3763        else
3764        {
3765            // For C++, no warning if rule has parameters, because there may be default
3766
// values for all of the parameters
3767
//if (rs.block.argAction != null) {
3768
// tool.warning("Missing parameters on reference to rule "+rr.targetRule, rr.getLine());
3769
//}
3770
}
3771        _println(");");
3772
3773        // move down to the first child while parsing
3774
if ( grammar instanceof TreeWalkerGrammar ) {
3775            println("_t = _retTree;");
3776        }
3777    }
3778    protected void genSemPred(String JavaDoc pred, int line) {
3779        // translate $ and # references
3780
ActionTransInfo tInfo = new ActionTransInfo();
3781        pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo);
3782        // ignore translation info...we don't need to do anything with it.
3783
String JavaDoc escapedPred = charFormatter.escapeString(pred);
3784
3785        // if debugging, wrap the semantic predicate evaluation in a method
3786
// that can tell SemanticPredicateListeners the result
3787
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
3788              (grammar instanceof LexerGrammar)))
3789            pred = "fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.VALIDATING," //FIXME
3790
+ addSemPred(escapedPred) + "," + pred + ")";
3791        println("if (!(" + pred + "))");
3792        tabs++;
3793        println("throw "+namespaceAntlr+"SemanticException(\"" + escapedPred + "\");");
3794        tabs--;
3795    }
3796    /** Write an array of Strings which are the semantic predicate
3797     * expressions. The debugger will reference them by number only
3798     */

3799    protected void genSemPredMap(String JavaDoc prefix) {
3800        Enumeration JavaDoc e = semPreds.elements();
3801        println("const char* " + prefix + "_semPredNames[] = {");
3802        tabs++;
3803        while(e.hasMoreElements())
3804            println("\""+e.nextElement()+"\",");
3805        println("0");
3806        tabs--;
3807        println("};");
3808    }
3809    protected void genSynPred(SynPredBlock blk, String JavaDoc lookaheadExpr) {
3810        if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen=>("+blk+")");
3811
3812        // Dump synpred result variable
3813
println("bool synPredMatched" + blk.ID + " = false;");
3814        // Gen normal lookahead test
3815
println("if (" + lookaheadExpr + ") {");
3816        tabs++;
3817
3818        // Save input state
3819
if ( grammar instanceof TreeWalkerGrammar ) {
3820            println(labeledElementType + " __t" + blk.ID + " = _t;");
3821        }
3822        else {
3823            println("int _m" + blk.ID + " = mark();");
3824        }
3825
3826        // Once inside the try, assume synpred works unless exception caught
3827
println("synPredMatched" + blk.ID + " = true;");
3828        println("inputState->guessing++;");
3829
3830        // if debugging, tell listeners that a synpred has started
3831
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
3832             (grammar instanceof LexerGrammar))) {
3833            println("fireSyntacticPredicateStarted();");
3834        }
3835
3836        syntacticPredLevel++;
3837        println("try {");
3838        tabs++;
3839        gen((AlternativeBlock)blk); // gen code to test predicate
3840
tabs--;
3841        //println("System.out.println(\"pred "+blk+" succeeded\");");
3842
println("}");
3843        println("catch (" + exceptionThrown + "& pe) {");
3844        tabs++;
3845        println("synPredMatched"+blk.ID+" = false;");
3846        //println("System.out.println(\"pred "+blk+" failed\");");
3847
tabs--;
3848        println("}");
3849
3850        // Restore input state
3851
if ( grammar instanceof TreeWalkerGrammar ) {
3852            println("_t = __t"+blk.ID+";");
3853        }
3854        else {
3855            println("rewind(_m"+blk.ID+");");
3856        }
3857
3858        println("inputState->guessing--;");
3859
3860        // if debugging, tell listeners how the synpred turned out
3861
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
3862             (grammar instanceof LexerGrammar))) {
3863            println("if (synPredMatched" + blk.ID +")");
3864            println(" fireSyntacticPredicateSucceeded();");
3865            println("else");
3866            println(" fireSyntacticPredicateFailed();");
3867        }
3868
3869        syntacticPredLevel--;
3870        tabs--;
3871
3872        // Close lookahead test
3873
println("}");
3874
3875        // Test synpred result
3876
println("if ( synPredMatched"+blk.ID+" ) {");
3877    }
3878    /** Generate a static array containing the names of the tokens,
3879     * indexed by the token type values. This static array is used
3880     * to format error messages so that the token identifers or literal
3881     * strings are displayed instead of the token numbers.
3882     *
3883     * If a lexical rule has a paraphrase, use it rather than the
3884     * token label.
3885     */

3886    public void genTokenStrings(String JavaDoc prefix) {
3887        // Generate a string for each token. This creates a static
3888
// array of Strings indexed by token type.
3889
// println("");
3890
println("const char* " + prefix + "tokenNames[] = {");
3891        tabs++;
3892
3893        // Walk the token vocabulary and generate a Vector of strings
3894
// from the tokens.
3895
Vector v = grammar.tokenManager.getVocabulary();
3896        for (int i = 0; i < v.size(); i++)
3897        {
3898            String JavaDoc s = (String JavaDoc)v.elementAt(i);
3899            if (s == null)
3900            {
3901                s = "<"+String.valueOf(i)+">";
3902            }
3903            if ( !s.startsWith("\"") && !s.startsWith("<") ) {
3904                TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
3905                if ( ts!=null && ts.getParaphrase()!=null ) {
3906                    s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\"");
3907                }
3908            }
3909            print(charFormatter.literalString(s));
3910            _println(",");
3911        }
3912        println("0");
3913
3914        // Close the string array initailizer
3915
tabs--;
3916        println("};");
3917    }
3918    /** Generate the token types C++ file */
3919    protected void genTokenTypes(TokenManager tm) throws IOException JavaDoc {
3920        // Open the token output header file and set the currentOutput stream
3921
outputFile = tm.getName() + TokenTypesFileSuffix+".hpp";
3922        outputLine = 1;
3923        currentOutput = antlrTool.openOutputFile(outputFile);
3924        //SAS: changed for proper text file io
3925

3926        tabs = 0;
3927
3928        // Generate a guard wrapper
3929
println("#ifndef INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_");
3930        println("#define INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_");
3931        println("");
3932
3933        if (nameSpace != null)
3934            nameSpace.emitDeclarations(currentOutput);
3935
3936        // Generate the header common to all C++ files
3937
genHeader(outputFile);
3938
3939        // Encapsulate the definitions in an interface. This can be done
3940
// because they are all constants.
3941
println("");
3942        println("#ifndef CUSTOM_API");
3943        println("# define CUSTOM_API");
3944        println("#endif");
3945        println("");
3946        // In the case that the .hpp is included from C source (flexLexer!)
3947
// we just turn things into a plain enum
3948
println("#ifdef __cplusplus");
3949        println("struct CUSTOM_API " + tm.getName() + TokenTypesFileSuffix+" {");
3950        println("#endif");
3951        tabs++;
3952        println("enum {");
3953        tabs++;
3954
3955        // Generate a definition for each token type
3956
Vector v = tm.getVocabulary();
3957
3958        // Do special tokens manually
3959
println("EOF_ = " + Token.EOF_TYPE + ",");
3960
3961        // Move the other special token to the end, so we can solve
3962
// the superfluous comma problem easily
3963

3964        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
3965            String JavaDoc s = (String JavaDoc)v.elementAt(i);
3966            if (s != null) {
3967                if ( s.startsWith("\"") ) {
3968                    // a string literal
3969
StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
3970                    if ( sl==null ) {
3971                        antlrTool.panic("String literal "+s+" not in symbol table");
3972                    }
3973                    else if ( sl.label != null ) {
3974                        println(sl.label + " = " + i + ",");
3975                    }
3976                    else {
3977                        String JavaDoc mangledName = mangleLiteral(s);
3978                        if (mangledName != null) {
3979                            // We were able to create a meaningful mangled token name
3980
println(mangledName + " = " + i + ",");
3981                            // if no label specified, make the label equal to the mangled name
3982
sl.label = mangledName;
3983                        }
3984                        else {
3985                            println("// " + s + " = " + i);
3986                        }
3987                    }
3988                }
3989                else if ( !s.startsWith("<") ) {
3990                    println(s + " = " + i + ",");
3991                }
3992            }
3993        }
3994
3995        // Moved from above
3996
println("NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD);
3997
3998        // Close the enum
3999
tabs--;
4000        println("};");
4001
4002        // Close the interface
4003
tabs--;
4004        println("#ifdef __cplusplus");
4005        println("};");
4006        println("#endif");
4007
4008        if (nameSpace != null)
4009            nameSpace.emitClosures(currentOutput);
4010
4011        // Generate a guard wrapper
4012
println("#endif /*INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_*/");
4013
4014        // Close the tokens output file
4015
currentOutput.close();
4016        currentOutput = null;
4017        exitIfError();
4018    }
4019    /** Process a string for an simple expression for use in xx/action.g
4020     * it is used to cast simple tokens/references to the right type for
4021     * the generated language. Basically called for every element in
4022     * the vector to getASTCreateString(vector V)
4023     * @param str A String.
4024     */

4025    public String JavaDoc processStringForASTConstructor( String JavaDoc str )
4026    {
4027        if( usingCustomAST &&
4028            ((grammar instanceof TreeWalkerGrammar) ||
4029             (grammar instanceof ParserGrammar)) &&
4030            !(grammar.tokenManager.tokenDefined(str) ) )
4031        {
4032// System.out.println("processStringForASTConstructor: "+str+" with cast");
4033
return namespaceAntlr+"RefAST("+str+")";
4034        }
4035        else
4036        {
4037// System.out.println("processStringForASTConstructor: "+str);
4038
return str;
4039        }
4040    }
4041    /** Get a string for an expression to generate creation of an AST subtree.
4042      * @param v A Vector of String, where each element is an expression
4043      * in the target language yielding an AST node.
4044      */

4045    public String JavaDoc getASTCreateString(Vector v) {
4046        if (v.size() == 0) {
4047            return "";
4048        }
4049        StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
4050        // the labeledElementASTType here can probably be a cast or nothing
4051
// in the case of ! usingCustomAST
4052
buf.append(labeledElementASTType+
4053                    "(astFactory->make((new "+namespaceAntlr+
4054                      "ASTArray("+v.size()+"))");
4055        for (int i = 0; i < v.size(); i++) {
4056            buf.append("->add("+ v.elementAt(i) + ")");
4057        }
4058        buf.append("))");
4059        return buf.toString();
4060    }
4061    /** Get a string for an expression to generate creating of an AST node
4062     * @param str The arguments to the AST constructor
4063     */

4064    public String JavaDoc getASTCreateString(GrammarAtom atom, String JavaDoc str) {
4065        if ( atom!=null && atom.getASTNodeType() != null ) {
4066
4067            // this atom is using a heterogeneous AST type.
4068
// make note of the factory needed to generate it..
4069
// later this is inserted into the initializeFactory method.
4070
astTypes.appendElement("factory.registerFactory("+
4071                                      atom.getType() + ", \""+atom.getASTNodeType()+
4072                                      "\", "+atom.getASTNodeType()+"::factory);");
4073
4074            // after above init the factory knows what to generate...
4075
return "astFactory->create("+str+")";
4076        }
4077        else
4078        {
4079            // FIXME: This is *SO* ugly! but it will have to do for now...
4080
// 2.7.2 will have better I hope
4081
// this is due to the usage of getASTCreateString from inside
4082
// actions/cpp/action.g
4083
boolean is_constructor = false;
4084            if( str.indexOf(',') != -1 )
4085                is_constructor = grammar.tokenManager.tokenDefined(str.substring(0,str.indexOf(',')));
4086
4087// System.out.println("getAstCreateString(as): "+str+" "+grammar.tokenManager.tokenDefined(str));
4088
if( usingCustomAST &&
4089               (grammar instanceof TreeWalkerGrammar) &&
4090                !(grammar.tokenManager.tokenDefined(str) ) &&
4091                ! is_constructor )
4092                return "astFactory->create("+namespaceAntlr+"RefAST("+str+"))";
4093            else
4094                return "astFactory->create("+str+")";
4095        }
4096    }
4097
4098    /** Get a string for an expression to generate creating of an AST node
4099     * @param str The arguments to the AST constructor
4100     */

4101    public String JavaDoc getASTCreateString(String JavaDoc str) {
4102// System.out.println("getAstCreateString(str): "+str+" "+grammar.tokenManager.tokenDefined(str));
4103
if( usingCustomAST )
4104            return labeledElementASTType+"(astFactory->create("+namespaceAntlr+"RefAST("+str+")))";
4105        else
4106            return "astFactory->create("+str+")";
4107    }
4108
4109    protected String JavaDoc getLookaheadTestExpression(Lookahead[] look, int k) {
4110        StringBuffer JavaDoc e = new StringBuffer JavaDoc(100);
4111        boolean first = true;
4112
4113        e.append("(");
4114        for (int i = 1; i <= k; i++) {
4115            BitSet p = look[i].fset;
4116            if (!first) {
4117                e.append(") && (");
4118            }
4119            first = false;
4120
4121            // Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
4122
// There is no way to predict what that token would be. Just
4123
// allow anything instead.
4124
if (look[i].containsEpsilon()) {
4125                e.append("true");
4126            } else {
4127                e.append(getLookaheadTestTerm(i, p));
4128            }
4129        }
4130        e.append(")");
4131
4132        return e.toString();
4133    }
4134    /** Generate a lookahead test expression for an alternate. This
4135     * will be a series of tests joined by '&&' and enclosed by '()',
4136     * the number of such tests being determined by the depth of the lookahead.
4137     */

4138    protected String JavaDoc getLookaheadTestExpression(Alternative alt, int maxDepth) {
4139        int depth = alt.lookaheadDepth;
4140        if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
4141            // if the decision is nondeterministic, do the best we can: LL(k)
4142
// any predicates that are around will be generated later.
4143
depth = grammar.maxk;
4144        }
4145
4146        if ( maxDepth==0 ) {
4147            // empty lookahead can result from alt with sem pred
4148
// that can see end of token. E.g., A : {pred}? ('a')? ;
4149
return "true";
4150        }
4151
4152/*
4153boolean first = true;
4154        for (int i=1; i<=depth && i<=maxDepth; i++) {
4155            BitSet p = alt.cache[i].fset;
4156            if (!first) {
4157                e.append(") && (");
4158            }
4159            first = false;
4160
4161            // Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
4162            // There is no way to predict what that token would be. Just
4163            // allow anything instead.
4164            if ( alt.cache[i].containsEpsilon() ) {
4165                e.append("true");
4166            }
4167            else {
4168                e.append(getLookaheadTestTerm(i, p));
4169            }
4170        }
4171
4172        e.append(")");
4173*/

4174
4175        return "(" + getLookaheadTestExpression(alt.cache,depth) + ")";
4176    }
4177    /**Generate a depth==1 lookahead test expression given the BitSet.
4178     * This may be one of:
4179     * 1) a series of 'x==X||' tests
4180     * 2) a range test using >= && <= where possible,
4181     * 3) a bitset membership test for complex comparisons
4182     * @param k The lookahead level
4183     * @param p The lookahead set for level k
4184     */

4185    protected String JavaDoc getLookaheadTestTerm(int k, BitSet p) {
4186        // Determine the name of the item to be compared
4187
String JavaDoc ts = lookaheadString(k);
4188
4189        // Generate a range expression if possible
4190
int[] elems = p.toArray();
4191        if (elementsAreRange(elems)) {
4192            return getRangeExpression(k, elems);
4193        }
4194
4195        // Generate a bitset membership test if possible
4196
StringBuffer JavaDoc e;
4197        int degree = p.degree();
4198        if ( degree == 0 ) {
4199            return "true";
4200        }
4201
4202        if (degree >= bitsetTestThreshold) {
4203            int bitsetIdx = markBitsetForGen(p);
4204            return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
4205        }
4206
4207        // Otherwise, generate the long-winded series of "x==X||" tests
4208
e = new StringBuffer JavaDoc();
4209        for (int i = 0; i < elems.length; i++) {
4210            // Get the compared-to item (token or character value)
4211
String JavaDoc cs = getValueString(elems[i]);
4212
4213            // Generate the element comparison
4214
if( i > 0 ) e.append(" || ");
4215            e.append(ts);
4216            e.append(" == ");
4217            e.append(cs);
4218        }
4219        return e.toString();
4220    }
4221    /** Return an expression for testing a contiguous renage of elements
4222     * @param k The lookahead level
4223     * @param elems The elements representing the set, usually from BitSet.toArray().
4224     * @return String containing test expression.
4225     */

4226    public String JavaDoc getRangeExpression(int k, int[] elems) {
4227        if (!elementsAreRange(elems)) {
4228            antlrTool.panic("getRangeExpression called with non-range");
4229        }
4230        int begin = elems[0];
4231        int end = elems[elems.length-1];
4232        return
4233            "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " +
4234              lookaheadString(k) + " <= " + getValueString(end) + ")";
4235    }
4236    /** getValueString: get a string representation of a token or char value
4237     * @param value The token or char value
4238     */

4239    private String JavaDoc getValueString(int value) {
4240        String JavaDoc cs;
4241        if ( grammar instanceof LexerGrammar ) {
4242            cs = charFormatter.literalChar(value);
4243        }
4244        else
4245        {
4246            TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value);
4247            if ( ts == null ) {
4248                return ""+value; // return token type as string
4249
// tool.panic("vocabulary for token type " + value + " is null");
4250
}
4251            String JavaDoc tId = ts.getId();
4252            if ( ts instanceof StringLiteralSymbol ) {
4253                // if string literal, use predefined label if any
4254
// if no predefined, try to mangle into LITERAL_xxx.
4255
// if can't mangle, use int value as last resort
4256
StringLiteralSymbol sl = (StringLiteralSymbol)ts;
4257                String JavaDoc label = sl.getLabel();
4258                if ( label!=null ) {
4259                    cs = label;
4260                }
4261                else {
4262                    cs = mangleLiteral(tId);
4263                    if (cs == null) {
4264                        cs = String.valueOf(value);
4265                    }
4266                }
4267            }
4268            else {
4269                if ( tId.equals("EOF") )
4270                    cs = namespaceAntlr+"Token::EOF_TYPE";
4271                else
4272                    cs = tId;
4273            }
4274        }
4275        return cs;
4276    }
4277    /**Is the lookahead for this alt empty? */
4278    protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
4279        int depth = alt.lookaheadDepth;
4280        if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
4281            depth = grammar.maxk;
4282        }
4283        for (int i=1; i<=depth && i<=maxDepth; i++) {
4284            BitSet p = alt.cache[i].fset;
4285            if (p.degree() != 0) {
4286                return false;
4287            }
4288        }
4289        return true;
4290    }
4291    private String JavaDoc lookaheadString(int k) {
4292        if (grammar instanceof TreeWalkerGrammar) {
4293            return "_t->getType()";
4294        }
4295        return "LA(" + k + ")";
4296    }
4297    /** Mangle a string literal into a meaningful token name. This is
4298      * only possible for literals that are all characters. The resulting
4299      * mangled literal name is literalsPrefix with the text of the literal
4300      * appended.
4301      * @return A string representing the mangled literal, or null if not possible.
4302      */

4303    private String JavaDoc mangleLiteral(String JavaDoc s) {
4304        String JavaDoc mangled = antlrTool.literalsPrefix;
4305        for (int i = 1; i < s.length()-1; i++) {
4306            if (!Character.isLetter(s.charAt(i)) &&
4307                 s.charAt(i) != '_') {
4308                return null;
4309            }
4310            mangled += s.charAt(i);
4311        }
4312        if ( antlrTool.upperCaseMangledLiterals ) {
4313            mangled = mangled.toUpperCase();
4314        }
4315        return mangled;
4316    }
4317    /** Map an identifier to it's corresponding tree-node variable.
4318      * This is context-sensitive, depending on the rule and alternative
4319      * being generated
4320      * @param idParam The identifier name to map
4321      * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
4322      */

4323    public String JavaDoc mapTreeId(String JavaDoc idParam, ActionTransInfo transInfo) {
4324        // if not in an action of a rule, nothing to map.
4325
if ( currentRule==null ) return idParam;
4326// System.out.print("mapTreeId: "+idParam+" "+currentRule.getRuleName()+" ");
4327

4328        boolean in_var = false;
4329        String JavaDoc id = idParam;
4330        if (grammar instanceof TreeWalkerGrammar)
4331        {
4332// RK: hmmm this seems odd. If buildAST is false it translates
4333
// #rulename_in to 'rulename_in' else to 'rulename_AST_in' which indeed
4334
// exists. disabling for now.. and hope it doesn't blow up somewhere.
4335
if ( !grammar.buildAST )
4336            {
4337                in_var = true;
4338// System.out.println("in_var1");
4339
}
4340            // If the id ends with "_in", then map it to the input variable
4341
// else
4342
if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3)
4343            {
4344                // Strip off the "_in"
4345
id = id.substring(0, id.length()-3);
4346                in_var = true;
4347// System.out.println("in_var2");
4348
}
4349        }
4350// System.out.print(in_var+"\t");
4351

4352        // Check the rule labels. If id is a label, then the output
4353
// variable is label_AST, and the input variable is plain label.
4354
for (int i = 0; i < currentRule.labeledElements.size(); i++)
4355        {
4356            AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
4357            if (elt.getLabel().equals(id))
4358            {
4359// if( in_var )
4360
// System.out.println("returning (vec) "+(in_var ? id : id + "_AST"));
4361
return in_var ? id : id + "_AST";
4362            }
4363        }
4364
4365        // Failing that, check the id-to-variable map for the alternative.
4366
// If the id is in the map, then output variable is the name in the
4367
// map, and input variable is name_in
4368
String JavaDoc s = (String JavaDoc)treeVariableMap.get(id);
4369        if (s != null)
4370        {
4371            if (s == NONUNIQUE)
4372            {
4373// if( in_var )
4374
// System.out.println("returning null (nonunique)");
4375
// There is more than one element with this id
4376
antlrTool.error("Ambiguous reference to AST element "+id+
4377                                " in rule "+currentRule.getRuleName());
4378                return null;
4379            }
4380            else if (s.equals(currentRule.getRuleName()))
4381            {
4382                // a recursive call to the enclosing rule is
4383
// ambiguous with the rule itself.
4384
// if( in_var )
4385
// System.out.println("returning null (rulename)");
4386
antlrTool.error("Ambiguous reference to AST element "+id+
4387                                " in rule "+currentRule.getRuleName());
4388                return null;
4389            }
4390            else
4391            {
4392// if( in_var )
4393
// System.out.println("returning "+(in_var?s+"_in":s));
4394
return in_var ? s + "_in" : s;
4395            }
4396        }
4397
4398// System.out.println("Last check: "+id+" == "+currentRule.getRuleName());
4399
// Failing that, check the rule name itself. Output variable
4400
// is rule_AST; input variable is rule_AST_in (treeparsers).
4401
if( id.equals(currentRule.getRuleName()) )
4402        {
4403            String JavaDoc r = in_var ? id + "_AST_in" : id + "_AST";
4404            if ( transInfo!=null ) {
4405                if ( !in_var ) {
4406                    transInfo.refRuleRoot = r;
4407                }
4408            }
4409// if( in_var )
4410
// System.out.println("returning (r) "+r);
4411
return r;
4412        }
4413        else
4414        {
4415// if( in_var )
4416
// System.out.println("returning (last) "+id);
4417
// id does not map to anything -- return itself.
4418
return id;
4419        }
4420    }
4421    /** Given an element and the name of an associated AST variable,
4422      * create a mapping between the element "name" and the variable name.
4423      */

4424    private void mapTreeVariable(AlternativeElement e, String JavaDoc name)
4425    {
4426        // For tree elements, defer to the root
4427
if (e instanceof TreeElement) {
4428            mapTreeVariable( ((TreeElement)e).root, name);
4429            return;
4430        }
4431
4432        // Determine the name of the element, if any, for mapping purposes
4433
String JavaDoc elName = null;
4434
4435        // Don't map labeled items
4436
if (e.getLabel() == null) {
4437            if (e instanceof TokenRefElement) {
4438                // use the token id
4439
elName = ((TokenRefElement)e).atomText;
4440            }
4441            else if (e instanceof RuleRefElement) {
4442                // use the rule name
4443
elName = ((RuleRefElement)e).targetRule;
4444            }
4445        }
4446        // Add the element to the tree variable map if it has a name
4447
if (elName != null) {
4448            if (treeVariableMap.get(elName) != null) {
4449                // Name is already in the map -- mark it as duplicate
4450
treeVariableMap.remove(elName);
4451                treeVariableMap.put(elName, NONUNIQUE);
4452            }
4453            else {
4454                treeVariableMap.put(elName, name);
4455            }
4456        }
4457    }
4458
4459    /** Lexically process tree-specifiers in the action.
4460     * This will replace #id and #(...) with the appropriate
4461     * function calls and/or variables.
4462     */

4463    protected String JavaDoc processActionForSpecialSymbols(String JavaDoc actionStr,
4464                                                                    int line,
4465                                                                    RuleBlock currentRule,
4466                                                                    ActionTransInfo tInfo)
4467    {
4468        if ( actionStr==null || actionStr.length()==0 )
4469            return null;
4470
4471        // The action trans info tells us (at the moment) whether an
4472
// assignment was done to the rule's tree root.
4473
if (grammar==null)
4474            return actionStr;
4475
4476        if ((grammar.buildAST && actionStr.indexOf('#') != -1) ||
4477             grammar instanceof TreeWalkerGrammar ||
4478             ((grammar instanceof LexerGrammar ||
4479                grammar instanceof ParserGrammar)
4480                && actionStr.indexOf('$') != -1) )
4481        {
4482            // Create a lexer to read an action and return the translated version
4483
persistence.antlr.actions.cpp.ActionLexer lexer =
4484                new persistence.antlr.actions.cpp.ActionLexer(actionStr, currentRule, this, tInfo);
4485            lexer.setLineOffset(line);
4486            lexer.setFilename(grammar.getFilename());
4487            lexer.setTool(antlrTool);
4488
4489            try {
4490                lexer.mACTION(true);
4491                actionStr = lexer.getTokenObject().getText();
4492                // System.out.println("action translated: "+actionStr);
4493
// System.out.println("trans info is "+tInfo);
4494
}
4495            catch (RecognitionException ex) {
4496                lexer.reportError(ex);
4497                return actionStr;
4498            }
4499            catch (TokenStreamException tex) {
4500                antlrTool.panic("Error reading action:"+actionStr);
4501                return actionStr;
4502            }
4503            catch (CharStreamException io) {
4504                antlrTool.panic("Error reading action:"+actionStr);
4505                return actionStr;
4506            }
4507        }
4508        return actionStr;
4509    }
4510
4511    private String JavaDoc fixNameSpaceOption( String JavaDoc ns )
4512    {
4513        ns = StringUtils.stripFrontBack(ns,"\"","\"");
4514        if( ns.length() > 2 &&
4515             !ns.substring(ns.length()-2, ns.length()).equals("::") )
4516        ns += "::";
4517        return ns;
4518    }
4519
4520    private void setupGrammarParameters(Grammar g) {
4521        if (g instanceof ParserGrammar ||
4522             g instanceof LexerGrammar ||
4523             g instanceof TreeWalkerGrammar
4524            )
4525        {
4526            /* RK: options also have to be added to Grammar.java and for options
4527             * on the file level entries have to be defined in
4528             * DefineGrammarSymbols.java and passed around via 'globals' in
4529             * antlrTool.java
4530             */

4531            if( antlrTool.nameSpace != null )
4532                nameSpace = antlrTool.nameSpace;
4533
4534            if( antlrTool.namespaceStd != null )
4535                namespaceStd = fixNameSpaceOption(antlrTool.namespaceStd);
4536
4537            if( antlrTool.namespaceAntlr != null )
4538                namespaceAntlr = fixNameSpaceOption(antlrTool.namespaceAntlr);
4539
4540            genHashLines = antlrTool.genHashLines;
4541
4542            /* let grammar level options override filelevel ones...
4543             */

4544            if( g.hasOption("namespace") ) {
4545                Token t = g.getOption("namespace");
4546                if( t != null ) {
4547                    nameSpace = new NameSpace(t.getText());
4548                }
4549            }
4550            if( g.hasOption("namespaceAntlr") ) {
4551                Token t = g.getOption("namespaceAntlr");
4552                if( t != null ) {
4553                    String JavaDoc ns = StringUtils.stripFrontBack(t.getText(),"\"","\"");
4554                    if ( ns != null ) {
4555                        if( ns.length() > 2 &&
4556                             !ns.substring(ns.length()-2, ns.length()).equals("::") )
4557                            ns += "::";
4558                        namespaceAntlr = ns;
4559                    }
4560                }
4561            }
4562            if( g.hasOption("namespaceStd") ) {
4563                Token t = g.getOption("namespaceStd");
4564                if( t != null ) {
4565                    String JavaDoc ns = StringUtils.stripFrontBack(t.getText(),"\"","\"");
4566                    if ( ns != null ) {
4567                        if( ns.length() > 2 &&
4568                             !ns.substring(ns.length()-2, ns.length()).equals("::") )
4569                            ns += "::";
4570                        namespaceStd = ns;
4571                    }
4572                }
4573            }
4574            if( g.hasOption("genHashLines") ) {
4575                Token t = g.getOption("genHashLines");
4576                if( t != null ) {
4577                    String JavaDoc val = StringUtils.stripFrontBack(t.getText(),"\"","\"");
4578                    genHashLines = val.equals("true");
4579                }
4580            }
4581            noConstructors = antlrTool.noConstructors; // get the default
4582
if( g.hasOption("noConstructors") ) {
4583                Token t = g.getOption("noConstructors");
4584                if( (t != null) && !(t.getText().equals("true") || t.getText().equals("false")))
4585                    antlrTool.error("noConstructors option must be true or false", antlrTool.getGrammarFile(), t.getLine(), t.getColumn());
4586                noConstructors = t.getText().equals("true");
4587            }
4588        }
4589        if (g instanceof ParserGrammar) {
4590            labeledElementASTType = namespaceAntlr+"RefAST";
4591            labeledElementASTInit = namespaceAntlr+"nullAST";
4592            if ( g.hasOption("ASTLabelType") ) {
4593                Token tsuffix = g.getOption("ASTLabelType");
4594                if ( tsuffix != null ) {
4595                    String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
4596                    if ( suffix != null ) {
4597                        usingCustomAST = true;
4598                        labeledElementASTType = suffix;
4599                        labeledElementASTInit = suffix+"("+namespaceAntlr+"nullAST)";
4600                    }
4601                }
4602            }
4603            labeledElementType = namespaceAntlr+"RefToken ";
4604            labeledElementInit = namespaceAntlr+"nullToken";
4605            commonExtraArgs = "";
4606            commonExtraParams = "";
4607            commonLocalVars = "";
4608            lt1Value = "LT(1)";
4609            exceptionThrown = namespaceAntlr+"RecognitionException";
4610            throwNoViable = "throw "+namespaceAntlr+"NoViableAltException(LT(1), getFilename());";
4611        }
4612        else if (g instanceof LexerGrammar) {
4613            labeledElementType = "char ";
4614            labeledElementInit = "'\\0'";
4615            commonExtraArgs = "";
4616            commonExtraParams = "bool _createToken";
4617            commonLocalVars = "int _ttype; "+namespaceAntlr+"RefToken _token; int _begin=text.length();";
4618            lt1Value = "LA(1)";
4619            exceptionThrown = namespaceAntlr+"RecognitionException";
4620            throwNoViable = "throw "+namespaceAntlr+"NoViableAltForCharException(LA(1), getFilename(), getLine(), getColumn());";
4621        }
4622        else if (g instanceof TreeWalkerGrammar) {
4623            labeledElementInit = namespaceAntlr+"nullAST";
4624            labeledElementASTInit = namespaceAntlr+"nullAST";
4625            labeledElementASTType = namespaceAntlr+"RefAST";
4626            labeledElementType = namespaceAntlr+"RefAST";
4627            commonExtraParams = namespaceAntlr+"RefAST _t";
4628            throwNoViable = "throw "+namespaceAntlr+"NoViableAltException(_t);";
4629            lt1Value = "_t";
4630            if ( g.hasOption("ASTLabelType") ) {
4631                Token tsuffix = g.getOption("ASTLabelType");
4632                if ( tsuffix != null ) {
4633                    String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
4634                    if ( suffix != null ) {
4635                        usingCustomAST = true;
4636                        labeledElementASTType = suffix;
4637                        labeledElementType = suffix;
4638                        labeledElementInit = suffix+"("+namespaceAntlr+"nullAST)";
4639                        labeledElementASTInit = labeledElementInit;
4640                        commonExtraParams = suffix+" _t";
4641                        throwNoViable = "throw "+namespaceAntlr+"NoViableAltException("+namespaceAntlr+"RefAST(_t));";
4642                        lt1Value = "_t";
4643                    }
4644                }
4645            }
4646            if ( !g.hasOption("ASTLabelType") ) {
4647                g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,namespaceAntlr+"RefAST"));
4648            }
4649            commonExtraArgs = "_t";
4650            commonLocalVars = "";
4651            exceptionThrown = namespaceAntlr+"RecognitionException";
4652        }
4653        else {
4654            antlrTool.panic("Unknown grammar type");
4655        }
4656    }
4657    // Convert a char or string constant to something C++ likes and
4658
// check wether it's in range for the current charvocab size.
4659
private String JavaDoc normalizeStringOrChar(String JavaDoc text) {
4660        // check to see if the text is a single character
4661
if (text.startsWith("'")) {
4662            // assume it also ends with '
4663

4664            return charFormatter.literalChar(ANTLRLexer.tokenTypeForCharLiteral(text));
4665        }
4666        else
4667        {
4668            // must be string literal strip of the quotes so
4669
// they won't get quoted
4670
return "\""+charFormatter.escapeString(StringUtils.stripFrontBack(text,"\"","\""))+"\"";
4671        }
4672    }
4673}
4674
Popular Tags