KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > persistence > antlr > CSharpCodeGenerator


1 package persistence.antlr;
2
3 /* ANTLR Translator Generator
4  * Project led by Terence Parr at http://www.jGuru.com
5  * Software rights: http://www.antlr.org/license.html
6  *
7  */

8
9 //
10
// ANTLR C# Code Generator by Micheal Jordan
11
// Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com
12
// Anthony Oguntimehin
13
//
14
// With many thanks to Eric V. Smith from the ANTLR list.
15
//
16

17 // HISTORY:
18
//
19
// 17-May-2002 kunle Fixed bug in OctalToUnicode() - was processing non-Octal escape sequences
20
// Also added namespace support based on Cpp version.
21
// 07-Jun-2002 kunle Added Scott Ellis's _saveIndex creation optimizations
22
// 09-Sep-2002 richardN Richard Ney's bug-fix for literals table construction.
23
// [ Hashtable ctor needed instance of hash code provider not it's class name. ]
24
// 17-Sep-2002 kunle & Added all Token ID definitions as data member of every Lexer/Parser/TreeParser
25
// AOg [ A by-product of problem-solving phase of the hetero-AST changes below
26
// but, it breaks nothing and restores "normal" ANTLR codegen behaviour. ]
27
// 19-Oct-2002 kunle & Completed the work required to support heterogenous ASTs (many changes)
28
// AOg &
29
// michealj
30
// 14-Nov-2002 michealj Added "initializeASTFactory()" to support flexible ASTFactory initialization.
31
// [ Thanks to Ric Klaren - for suggesting it and implementing it for Cpp. ]
32
// 18-Nov-2002 kunle Added fix to make xx_tokenSet_xx names CLS compliant.
33
// 01-Dec-2002 richardN Patch to reduce "unreachable code" warnings
34
// 01-Dec-2002 richardN Fix to generate correct TreeParser token-type classnames.
35
// 12-Jan-2002 kunle & Generated Lexers, Parsers and TreeParsers now support ANTLR's tracing option.
36
// michealj
37
// 12-Jan-2003 kunle Fixed issue where initializeASTFactory() was generated when "buildAST=false"
38
// 14-Jan-2003 AOg initializeASTFactory(AST factory) method was modifying the Parser's "astFactory"
39
// member rather than it's own "factory" parameter. Fixed.
40
// 18-Jan-2003 kunle & Fixed reported issues with ASTFactory create() calls for hetero ASTs
41
// michealj - code generated for LEXER token with hetero-AST option specified does not compile
42
// - code generated for imaginary tokens with hetero-AST option specified uses default AST type
43
// - code generated for per-TokenRef hetero-AST option specified does not compile
44
// 18-Jan-2003 kunle initializeASTFactory(AST) method is now a static public member
45
// 18-May-2003 kunle Changes to address outstanding reported issues::
46
// - Fixed reported issues with support for case-sensitive literals
47
// - persistence.antlr.SemanticException now imported for all Lexers.
48
// [ This exception is thrown on predicate failure. ]
49
// 12-Jan-2004 kunle Added fix for reported issue with un-compileable generated lexers
50
//
51
//
52

53 import java.util.Enumeration JavaDoc;
54 import java.util.Hashtable JavaDoc;
55 import persistence.antlr.collections.impl.BitSet;
56 import persistence.antlr.collections.impl.Vector;
57 import java.io.PrintWriter JavaDoc; //SAS: changed for proper text file io
58
import java.io.IOException JavaDoc;
59 import java.io.FileWriter JavaDoc;
60
61 /** Generates MyParser.cs, MyLexer.cs and MyParserTokenTypes.cs */
62 public class CSharpCodeGenerator extends CodeGenerator {
63     // non-zero if inside syntactic predicate generation
64
protected int syntacticPredLevel = 0;
65
66     // Are we generating ASTs (for parsers and tree parsers) right now?
67
protected boolean genAST = false;
68
69     // Are we saving the text consumed (for lexers) right now?
70
protected boolean saveText = false;
71
72     // Grammar parameters set up to handle different grammar classes.
73
// These are used to get instanceof tests out of code generation
74
boolean usingCustomAST = false;
75     String JavaDoc labeledElementType;
76     String JavaDoc labeledElementASTType;
77     String JavaDoc labeledElementInit;
78     String JavaDoc commonExtraArgs;
79     String JavaDoc commonExtraParams;
80     String JavaDoc commonLocalVars;
81     String JavaDoc lt1Value;
82     String JavaDoc exceptionThrown;
83     String JavaDoc throwNoViable;
84
85     // Tracks the rule being generated. Used for mapTreeId
86
RuleBlock currentRule;
87     // Tracks the rule or labeled subrule being generated. Used for AST generation.
88
String JavaDoc currentASTResult;
89
90     /** Mapping between the ids used in the current alt, and the
91      * names of variables used to represent their AST values.
92      */

93     Hashtable JavaDoc treeVariableMap = new Hashtable JavaDoc();
94
95     /** Used to keep track of which AST variables have been defined in a rule
96      * (except for the #rule_name and #rule_name_in var's
97      */

98     Hashtable JavaDoc declaredASTVariables = new Hashtable JavaDoc();
99
100     /* Count of unnamed generated variables */
101     int astVarNumber = 1;
102
103     /** Special value used to mark duplicate in treeVariableMap */
104     protected static final String JavaDoc NONUNIQUE = new String JavaDoc();
105
106     public static final int caseSizeThreshold = 127; // ascii is max
107

108     private Vector semPreds;
109     // Used to keep track of which (heterogeneous AST types are used)
110
// which need to be set in the ASTFactory of the generated parser
111
private java.util.Vector JavaDoc astTypes;
112
113     private static CSharpNameSpace nameSpace = null;
114
115     // _saveIndex creation optimization -- don't create it unless we need to use it
116
boolean bSaveIndexCreated = false;
117
118
119     /** Create a CSharp code-generator using the given Grammar.
120      * The caller must still call setTool, setBehavior, and setAnalyzer
121      * before generating code.
122      */

123     public CSharpCodeGenerator() {
124         super();
125         charFormatter = new CSharpCharFormatter();
126     }
127
128     /** Adds a semantic predicate string to the sem pred vector
129         These strings will be used to build an array of sem pred names
130         when building a debugging parser. This method should only be
131         called when the debug option is specified
132      */

133     protected int addSemPred(String JavaDoc predicate) {
134         semPreds.appendElement(predicate);
135         return semPreds.size()-1;
136     }
137
138     public void exitIfError()
139     {
140         if (antlrTool.hasError())
141         {
142             antlrTool.fatalError("Exiting due to errors.");
143         }
144     }
145
146     /**Generate the parser, lexer, treeparser, and token types in CSharp */
147     public void gen() {
148         // Do the code generation
149
try {
150             // Loop over all grammars
151
Enumeration JavaDoc grammarIter = behavior.grammars.elements();
152             while (grammarIter.hasMoreElements()) {
153                 Grammar g = (Grammar)grammarIter.nextElement();
154                 // Connect all the components to each other
155
g.setGrammarAnalyzer(analyzer);
156                 g.setCodeGenerator(this);
157                 analyzer.setGrammar(g);
158                 // To get right overloading behavior across heterogeneous grammars
159
setupGrammarParameters(g);
160                 g.generate();
161                 exitIfError();
162             }
163
164             // Loop over all token managers (some of which are lexers)
165
Enumeration JavaDoc tmIter = behavior.tokenManagers.elements();
166             while (tmIter.hasMoreElements()) {
167                 TokenManager tm = (TokenManager)tmIter.nextElement();
168                 if (!tm.isReadOnly()) {
169                     // Write the token manager tokens as CSharp
170
// this must appear before genTokenInterchange so that
171
// labels are set on string literals
172
genTokenTypes(tm);
173                     // Write the token manager tokens as plain text
174
genTokenInterchange(tm);
175                 }
176                 exitIfError();
177             }
178         }
179         catch (IOException JavaDoc e) {
180             antlrTool.reportException(e, null);
181         }
182     }
183
184     /** Generate code for the given grammar element.
185      * @param blk The {...} action to generate
186      */

187     public void gen(ActionElement action) {
188         if ( DEBUG_CODE_GENERATOR ) System.out.println("genAction("+action+")");
189         if ( action.isSemPred ) {
190             genSemPred(action.actionText, action.line);
191         }
192         else {
193             if ( grammar.hasSyntacticPredicate ) {
194                 println("if (0==inputState.guessing)");
195                 println("{");
196                 tabs++;
197             }
198
199             ActionTransInfo tInfo = new ActionTransInfo();
200             String JavaDoc actionStr = processActionForSpecialSymbols(action.actionText,
201                                                                              action.getLine(),
202                                                                              currentRule, tInfo);
203
204             if ( tInfo.refRuleRoot!=null ) {
205                 // Somebody referenced "#rule", make sure translated var is valid
206
// assignment to #rule is left as a ref also, meaning that assignments
207
// with no other refs like "#rule = foo();" still forces this code to be
208
// generated (unnecessarily).
209
println(tInfo.refRuleRoot + " = ("+labeledElementASTType+")currentAST.root;");
210             }
211
212             // dump the translated action
213
printAction(actionStr);
214
215             if ( tInfo.assignToRoot ) {
216                 // Somebody did a "#rule=", reset internal currentAST.root
217
println("currentAST.root = "+tInfo.refRuleRoot+";");
218                 // reset the child pointer too to be last sibling in sibling list
219
println("if ( (null != "+tInfo.refRuleRoot+") && (null != "+tInfo.refRuleRoot+".getFirstChild()) )");
220                 tabs++;
221                 println("currentAST.child = "+tInfo.refRuleRoot+".getFirstChild();");
222                 tabs--;
223                 println("else");
224                 tabs++;
225                 println("currentAST.child = "+tInfo.refRuleRoot+";");
226                 tabs--;
227                 println("currentAST.advanceChildToEnd();");
228             }
229
230             if ( grammar.hasSyntacticPredicate ) {
231                 tabs--;
232                 println("}");
233             }
234         }
235     }
236
237     /** Generate code for the given grammar element.
238      * @param blk The "x|y|z|..." block to generate
239      */

240     public void gen(AlternativeBlock blk) {
241         if ( DEBUG_CODE_GENERATOR ) System.out.println("gen("+blk+")");
242         println("{");
243         tabs++;
244
245         genBlockPreamble(blk);
246         genBlockInitAction(blk);
247
248         // Tell AST generation to build subrule result
249
String JavaDoc saveCurrentASTResult = currentASTResult;
250         if (blk.getLabel() != null) {
251             currentASTResult = blk.getLabel();
252         }
253
254         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
255
256         CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
257         genBlockFinish(howToFinish, throwNoViable);
258
259         tabs--;
260         println("}");
261
262         // Restore previous AST generation
263
currentASTResult = saveCurrentASTResult;
264     }
265     /** Generate code for the given grammar element.
266      * @param blk The block-end element to generate. Block-end
267      * elements are synthesized by the grammar parser to represent
268      * the end of a block.
269      */

270     public void gen(BlockEndElement end) {
271         if ( DEBUG_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")");
272     }
273     /** Generate code for the given grammar element.
274      * @param blk The character literal reference to generate
275      */

276     public void gen(CharLiteralElement atom) {
277         if ( DEBUG_CODE_GENERATOR ) System.out.println("genChar("+atom+")");
278
279         if ( atom.getLabel()!=null ) {
280             println(atom.getLabel() + " = " + lt1Value + ";");
281         }
282
283         boolean oldsaveText = saveText;
284         saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
285         genMatch(atom);
286         saveText = oldsaveText;
287     }
288     /** Generate code for the given grammar element.
289      * @param blk The character-range reference to generate
290      */

291     public void gen(CharRangeElement r) {
292         if ( r.getLabel()!=null && syntacticPredLevel == 0) {
293             println(r.getLabel() + " = " + lt1Value + ";");
294         }
295       boolean flag = ( grammar instanceof LexerGrammar &&
296             (!saveText || (r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) );
297       if (flag)
298           println("_saveIndex = text.Length;");
299
300       println("matchRange("+OctalToUnicode(r.beginText)+","+OctalToUnicode(r.endText)+");");
301
302       if (flag)
303           println("text.Length = _saveIndex;");
304     }
305     /** Generate the lexer CSharp file */
306     public void gen(LexerGrammar g) throws IOException JavaDoc {
307         // If debugging, create a new sempred vector for this grammar
308
if (g.debuggingOutput)
309             semPreds = new Vector();
310
311         setGrammar(g);
312         if (!(grammar instanceof LexerGrammar)) {
313             antlrTool.panic("Internal error generating lexer");
314         }
315         genBody(g);
316     }
317     /** Generate code for the given grammar element.
318      * @param blk The (...)+ block to generate
319      */

320     public void gen(OneOrMoreBlock blk) {
321         if ( DEBUG_CODE_GENERATOR ) System.out.println("gen+("+blk+")");
322         String JavaDoc label;
323         String JavaDoc cnt;
324         println("{ // ( ... )+");
325         genBlockPreamble(blk);
326         if ( blk.getLabel() != null ) {
327             cnt = "_cnt_"+blk.getLabel();
328         }
329         else {
330             cnt = "_cnt" + blk.ID;
331         }
332         println("int "+cnt+"=0;");
333         if ( blk.getLabel() != null ) {
334             label = blk.getLabel();
335         }
336         else {
337             label = "_loop" + blk.ID;
338         }
339
340         println("for (;;)");
341         println("{");
342         tabs++;
343       // generate the init action for ()+ ()* inside the loop
344
// this allows us to do usefull EOF checking...
345
genBlockInitAction(blk);
346
347         // Tell AST generation to build subrule result
348
String JavaDoc saveCurrentASTResult = currentASTResult;
349         if (blk.getLabel() != null) {
350             currentASTResult = blk.getLabel();
351         }
352
353         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
354
355         // generate exit test if greedy set to false
356
// and an alt is ambiguous with exit branch
357
// or when lookahead derived purely from end-of-file
358
// Lookahead analysis stops when end-of-file is hit,
359
// returning set {epsilon}. Since {epsilon} is not
360
// ambig with any real tokens, no error is reported
361
// by deterministic() routines and we have to check
362
// for the case where the lookahead depth didn't get
363
// set to NONDETERMINISTIC (this only happens when the
364
// FOLLOW contains real atoms + epsilon).
365
boolean generateNonGreedyExitPath = false;
366         int nonGreedyExitDepth = grammar.maxk;
367
368         if ( !blk.greedy &&
369             blk.exitLookaheadDepth<=grammar.maxk &&
370             blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
371         {
372             generateNonGreedyExitPath = true;
373             nonGreedyExitDepth = blk.exitLookaheadDepth;
374         }
375         else if ( !blk.greedy &&
376             blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
377         {
378             generateNonGreedyExitPath = true;
379         }
380
381         // generate exit test if greedy set to false
382
// and an alt is ambiguous with exit branch
383
if ( generateNonGreedyExitPath ) {
384             if ( DEBUG_CODE_GENERATOR ) {
385                 System.out.println("nongreedy (...)+ loop; exit depth is "+
386                     blk.exitLookaheadDepth);
387             }
388             String JavaDoc predictExit =
389                 getLookaheadTestExpression(blk.exitCache,
390                 nonGreedyExitDepth);
391             println("// nongreedy exit test");
392             println("if (("+cnt+" >= 1) && "+predictExit+") goto "+label+"_breakloop;");
393         }
394
395         CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
396         genBlockFinish(
397             howToFinish,
398             "if ("+cnt+" >= 1) { goto "+label+"_breakloop; } else { " + throwNoViable + "; }"
399             );
400
401         println(cnt+"++;");
402         tabs--;
403         println("}");
404         _print(label + "_breakloop:");
405         println(";");
406         println("} // ( ... )+");
407
408         // Restore previous AST generation
409
currentASTResult = saveCurrentASTResult;
410     }
411     /** Generate the parser CSharp file */
412     public void gen(ParserGrammar g) throws IOException JavaDoc {
413
414         // if debugging, set up a new vector to keep track of sempred
415
// strings for this grammar
416
if (g.debuggingOutput)
417             semPreds = new Vector();
418
419         setGrammar(g);
420         if (!(grammar instanceof ParserGrammar)) {
421             antlrTool.panic("Internal error generating parser");
422         }
423         genBody(g);
424     }
425     /** Generate code for the given grammar element.
426      * @param blk The rule-reference to generate
427      */

428     public void gen(RuleRefElement rr)
429     {
430         if ( DEBUG_CODE_GENERATOR ) System.out.println("genRR("+rr+")");
431         RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
432         if (rs == null || !rs.isDefined())
433         {
434             // Is this redundant???
435
antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn());
436             return;
437         }
438         if (!(rs instanceof RuleSymbol))
439         {
440             // Is this redundant???
441
antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn());
442             return;
443         }
444
445         genErrorTryForElement(rr);
446
447         // AST value for labeled rule refs in tree walker.
448
// This is not AST construction; it is just the input tree node value.
449
if ( grammar instanceof TreeWalkerGrammar &&
450             rr.getLabel() != null &&
451             syntacticPredLevel == 0 )
452         {
453             println(rr.getLabel() + " = _t==ASTNULL ? null : "+lt1Value+";");
454         }
455
456         // if in lexer and ! on rule ref or alt or rule, save buffer index to kill later
457
if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG))
458         {
459             declareSaveIndexVariableIfNeeded();
460             println("_saveIndex = text.Length;");
461         }
462
463         // Process return value assignment if any
464
printTabs();
465         if (rr.idAssign != null)
466         {
467             // Warn if the rule has no return type
468
if (rs.block.returnAction == null)
469             {
470                 antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn());
471             }
472             _print(rr.idAssign + "=");
473         } else {
474             // Warn about return value if any, but not inside syntactic predicate
475
if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
476             {
477                 antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn());
478             }
479         }
480
481         // Call the rule
482
GenRuleInvocation(rr);
483
484         // if in lexer and ! on element or alt or rule, save buffer index to kill later
485
if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
486             declareSaveIndexVariableIfNeeded();
487             println("text.Length = _saveIndex;");
488         }
489
490         // if not in a syntactic predicate
491
if (syntacticPredLevel == 0)
492         {
493             boolean doNoGuessTest = (
494                 grammar.hasSyntacticPredicate &&
495                 (
496                 grammar.buildAST && rr.getLabel() != null ||
497                 (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
498                 )
499                 );
500             if (doNoGuessTest) {
501                 println("if (0 == inputState.guessing)");
502                 println("{");
503                 tabs++;
504             }
505
506             if (grammar.buildAST && rr.getLabel() != null)
507             {
508                 // always gen variable for rule return on labeled rules
509
println(rr.getLabel() + "_AST = ("+labeledElementASTType+")returnAST;");
510             }
511             if (genAST)
512             {
513                 switch (rr.getAutoGenType())
514                 {
515                 case GrammarElement.AUTO_GEN_NONE:
516                     if( usingCustomAST )
517                         println("astFactory.addASTChild(currentAST, (AST)returnAST);");
518                     else
519                         println("astFactory.addASTChild(currentAST, returnAST);");
520                     break;
521                 case GrammarElement.AUTO_GEN_CARET:
522                     antlrTool.error("Internal: encountered ^ after rule reference");
523                     break;
524                 default:
525                     break;
526                 }
527             }
528
529             // if a lexer and labeled, Token label defined at rule level, just set it here
530
if ( grammar instanceof LexerGrammar && rr.getLabel() != null )
531             {
532                 println(rr.getLabel()+" = returnToken_;");
533             }
534
535             if (doNoGuessTest)
536             {
537                 tabs--;
538                 println("}");
539             }
540         }
541         genErrorCatchForElement(rr);
542     }
543     /** Generate code for the given grammar element.
544      * @param blk The string-literal reference to generate
545      */

546     public void gen(StringLiteralElement atom) {
547         if ( DEBUG_CODE_GENERATOR ) System.out.println("genString("+atom+")");
548
549         // Variable declarations for labeled elements
550
if (atom.getLabel()!=null && syntacticPredLevel == 0) {
551             println(atom.getLabel() + " = " + lt1Value + ";");
552         }
553
554         // AST
555
genElementAST(atom);
556
557         // is there a bang on the literal?
558
boolean oldsaveText = saveText;
559         saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
560
561         // matching
562
genMatch(atom);
563
564             saveText = oldsaveText;
565
566         // tack on tree cursor motion if doing a tree walker
567
if (grammar instanceof TreeWalkerGrammar) {
568             println("_t = _t.getNextSibling();");
569         }
570     }
571
572     /** Generate code for the given grammar element.
573      * @param blk The token-range reference to generate
574      */

575     public void gen(TokenRangeElement r) {
576         genErrorTryForElement(r);
577         if ( r.getLabel()!=null && syntacticPredLevel == 0) {
578             println(r.getLabel() + " = " + lt1Value + ";");
579         }
580
581         // AST
582
genElementAST(r);
583
584         // match
585
println("matchRange("+OctalToUnicode(r.beginText)+","+OctalToUnicode(r.endText)+");");
586         genErrorCatchForElement(r);
587     }
588
589     /** Generate code for the given grammar element.
590      * @param blk The token-reference to generate
591      */

592     public void gen(TokenRefElement atom) {
593         if ( DEBUG_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")");
594         if ( grammar instanceof LexerGrammar ) {
595             antlrTool.panic("Token reference found in lexer");
596         }
597         genErrorTryForElement(atom);
598         // Assign Token value to token label variable
599
if ( atom.getLabel()!=null && syntacticPredLevel == 0) {
600             println(atom.getLabel() + " = " + lt1Value + ";");
601         }
602
603         // AST
604
genElementAST(atom);
605         // matching
606
genMatch(atom);
607         genErrorCatchForElement(atom);
608
609         // tack on tree cursor motion if doing a tree walker
610
if (grammar instanceof TreeWalkerGrammar) {
611             println("_t = _t.getNextSibling();");
612         }
613     }
614
615     public void gen(TreeElement t) {
616         // save AST cursor
617
println("AST __t" + t.ID + " = _t;");
618
619         // If there is a label on the root, then assign that to the variable
620
if (t.root.getLabel() != null) {
621             println(t.root.getLabel() + " = (ASTNULL == _t) ? null : ("+labeledElementASTType +")_t;");
622         }
623
624       // check for invalid modifiers ! and ^ on tree element roots
625
if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) {
626           antlrTool.error("Suffixing a root node with '!' is not implemented",
627                        grammar.getFilename(), t.getLine(), t.getColumn());
628           t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
629       }
630       if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) {
631           antlrTool.warning("Suffixing a root node with '^' is redundant; already a root",
632                        grammar.getFilename(), t.getLine(), t.getColumn());
633           t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
634       }
635
636         // Generate AST variables
637
genElementAST(t.root);
638         if (grammar.buildAST) {
639             // Save the AST construction state
640
println("ASTPair __currentAST" + t.ID + " = currentAST.copy();");
641             // Make the next item added a child of the TreeElement root
642
println("currentAST.root = currentAST.child;");
643             println("currentAST.child = null;");
644         }
645
646         // match root
647
if ( t.root instanceof WildcardElement ) {
648             println("if (null == _t) throw new MismatchedTokenException();");
649         }
650         else {
651                 genMatch(t.root);
652         }
653         // move to list of children
654
println("_t = _t.getFirstChild();");
655
656         // walk list of children, generating code for each
657
for (int i=0; i<t.getAlternatives().size(); i++) {
658             Alternative a = t.getAlternativeAt(i);
659             AlternativeElement e = a.head;
660             while ( e != null ) {
661                 e.generate();
662                 e = e.next;
663             }
664         }
665
666         if (grammar.buildAST) {
667             // restore the AST construction state to that just after the
668
// tree root was added
669
println("currentAST = __currentAST" + t.ID + ";");
670         }
671         // restore AST cursor
672
println("_t = __t" + t.ID + ";");
673         // move cursor to sibling of tree just parsed
674
println("_t = _t.getNextSibling();");
675     }
676     /** Generate the tree-parser CSharp file */
677     public void gen(TreeWalkerGrammar g) throws IOException JavaDoc {
678         // SAS: debugging stuff removed for now...
679
setGrammar(g);
680         if (!(grammar instanceof TreeWalkerGrammar)) {
681             antlrTool.panic("Internal error generating tree-walker");
682         }
683         genBody(g);
684     }
685
686     /** Generate code for the given grammar element.
687      * @param wc The wildcard element to generate
688      */

689     public void gen(WildcardElement wc) {
690         // Variable assignment for labeled elements
691
if (wc.getLabel()!=null && syntacticPredLevel == 0) {
692             println(wc.getLabel() + " = " + lt1Value + ";");
693         }
694
695         // AST
696
genElementAST(wc);
697         // Match anything but EOF
698
if (grammar instanceof TreeWalkerGrammar) {
699             println("if (null == _t) throw new MismatchedTokenException();");
700         }
701         else if (grammar instanceof LexerGrammar) {
702             if ( grammar instanceof LexerGrammar &&
703                 (!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
704                 declareSaveIndexVariableIfNeeded();
705                 println("_saveIndex = text.Length;");
706             }
707             println("matchNot(EOF/*_CHAR*/);");
708             if ( grammar instanceof LexerGrammar &&
709                 (!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
710                 declareSaveIndexVariableIfNeeded();
711                 println("text.Length = _saveIndex;"); // kill text atom put in buffer
712
}
713         }
714         else {
715             println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
716         }
717
718         // tack on tree cursor motion if doing a tree walker
719
if (grammar instanceof TreeWalkerGrammar) {
720             println("_t = _t.getNextSibling();");
721         }
722     }
723
724     /** Generate code for the given grammar element.
725      * @param blk The (...)* block to generate
726      */

727     public void gen(ZeroOrMoreBlock blk) {
728         if ( DEBUG_CODE_GENERATOR ) System.out.println("gen*("+blk+")");
729         println("{ // ( ... )*");
730         tabs++;
731         genBlockPreamble(blk);
732         String JavaDoc label;
733         if ( blk.getLabel() != null ) {
734             label = blk.getLabel();
735         }
736         else {
737             label = "_loop" + blk.ID;
738         }
739         println("for (;;)");
740         println("{");
741         tabs++;
742         // generate the init action for ()+ ()* inside the loop
743
// this allows us to do usefull EOF checking...
744
genBlockInitAction(blk);
745
746         // Tell AST generation to build subrule result
747
String JavaDoc saveCurrentASTResult = currentASTResult;
748         if (blk.getLabel() != null) {
749             currentASTResult = blk.getLabel();
750         }
751
752         boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
753
754         // generate exit test if greedy set to false
755
// and an alt is ambiguous with exit branch
756
// or when lookahead derived purely from end-of-file
757
// Lookahead analysis stops when end-of-file is hit,
758
// returning set {epsilon}. Since {epsilon} is not
759
// ambig with any real tokens, no error is reported
760
// by deterministic() routines and we have to check
761
// for the case where the lookahead depth didn't get
762
// set to NONDETERMINISTIC (this only happens when the
763
// FOLLOW contains real atoms + epsilon).
764
boolean generateNonGreedyExitPath = false;
765         int nonGreedyExitDepth = grammar.maxk;
766
767         if ( !blk.greedy &&
768             blk.exitLookaheadDepth<=grammar.maxk &&
769             blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
770         {
771             generateNonGreedyExitPath = true;
772             nonGreedyExitDepth = blk.exitLookaheadDepth;
773         }
774         else if ( !blk.greedy &&
775             blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
776         {
777             generateNonGreedyExitPath = true;
778         }
779         if ( generateNonGreedyExitPath ) {
780             if ( DEBUG_CODE_GENERATOR ) {
781                 System.out.println("nongreedy (...)* loop; exit depth is "+
782                     blk.exitLookaheadDepth);
783             }
784             String JavaDoc predictExit =
785                 getLookaheadTestExpression(blk.exitCache,
786                 nonGreedyExitDepth);
787             println("// nongreedy exit test");
788             println("if ("+predictExit+") goto "+label+"_breakloop;");
789         }
790
791         CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
792         genBlockFinish(howToFinish, "goto " + label + "_breakloop;");
793
794             tabs--;
795         println("}");
796         _print(label+"_breakloop:");
797         println(";");
798         tabs--;
799         println("} // ( ... )*");
800
801         // Restore previous AST generation
802
currentASTResult = saveCurrentASTResult;
803     }
804
805     /** Generate an alternative.
806       * @param alt The alternative to generate
807       * @param blk The block to which the alternative belongs
808       */

809     protected void genAlt(Alternative alt, AlternativeBlock blk)
810     {
811         // Save the AST generation state, and set it to that of the alt
812
boolean savegenAST = genAST;
813         genAST = genAST && alt.getAutoGen();
814
815         boolean oldsaveTest = saveText;
816         saveText = saveText && alt.getAutoGen();
817
818         // Reset the variable name map for the alternative
819
Hashtable JavaDoc saveMap = treeVariableMap;
820         treeVariableMap = new Hashtable JavaDoc();
821
822         // Generate try block around the alt for error handling
823
if (alt.exceptionSpec != null) {
824             println("try // for error handling");
825             println("{");
826             tabs++;
827         }
828
829         AlternativeElement elem = alt.head;
830         while ( !(elem instanceof BlockEndElement) ) {
831             elem.generate(); // alt can begin with anything. Ask target to gen.
832
elem = elem.next;
833         }
834
835         if ( genAST)
836         {
837             if (blk instanceof RuleBlock)
838             {
839                 // Set the AST return value for the rule
840
RuleBlock rblk = (RuleBlock)blk;
841                 if( usingCustomAST )
842                 {
843                     println(rblk.getRuleName() + "_AST = ("+labeledElementASTType+")currentAST.root;");
844                 }
845                 else
846                 {
847                     println(rblk.getRuleName() + "_AST = currentAST.root;");
848                 }
849             }
850             else if (blk.getLabel() != null) {
851                 // ### future: also set AST value for labeled subrules.
852
// println(blk.getLabel() + "_AST = ("+labeledElementASTType+")currentAST.root;");
853
antlrTool.warning("Labeled subrules not yet supported", grammar.getFilename(), blk.getLine(), blk.getColumn());
854             }
855         }
856
857         if (alt.exceptionSpec != null)
858         {
859             // close try block
860
tabs--;
861             println("}");
862             genErrorHandler(alt.exceptionSpec);
863         }
864
865         genAST = savegenAST;
866         saveText = oldsaveTest;
867
868         treeVariableMap = saveMap;
869     }
870
871     /** Generate all the bitsets to be used in the parser or lexer
872      * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
873      * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
874      * Note that most languages do not support object initialization inside a
875      * class definition, so other code-generators may have to separate the
876      * bitset declarations from the initializations (e.g., put the initializations
877      * in the generated constructor instead).
878      * @param bitsetList The list of bitsets to generate.
879      * @param maxVocabulary Ensure that each generated bitset can contain at least this value.
880      */

881     protected void genBitsets( Vector bitsetList, int maxVocabulary ) {
882         println("");
883         for (int i = 0; i < bitsetList.size(); i++)
884         {
885             BitSet p = (BitSet)bitsetList.elementAt(i);
886             // Ensure that generated BitSet is large enough for vocabulary
887
p.growToInclude(maxVocabulary);
888             genBitSet(p, i);
889         }
890     }
891
892     /** Do something simple like:
893      * private static final long[] mk_tokenSet_0() {
894      * long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L };
895      * return data;
896      * }
897      * public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
898      *
899      * Or, for large bitsets, optimize init so ranges are collapsed into loops.
900      * This is most useful for lexers using unicode.
901      */

902     private void genBitSet(BitSet p, int id) {
903         // initialization data
904
println("private static long[] mk_" + getBitsetName(id) + "()");
905         println("{");
906         tabs++;
907         int n = p.lengthInLongWords();
908         if ( n<BITSET_OPTIMIZE_INIT_THRESHOLD ) {
909             println("long[] data = { " + p.toStringOfWords() + "};");
910         }
911         else {
912             // will init manually, allocate space then set values
913
println("long[] data = new long["+n+"];");
914             long[] elems = p.toPackedArray();
915             for (int i = 0; i < elems.length;) {
916                 if ( (i+1)==elems.length || elems[i]!=elems[i+1] ) {
917                     // last number or no run of numbers, just dump assignment
918
println("data["+i+"]="+elems[i]+"L;");
919                     i++;
920                 }
921                 else
922                 {
923                     // scan to find end of run
924
int j;
925                     for (j = i + 1; j < elems.length && elems[j]==elems[i]; j++)
926                     {
927                         ;
928                     }
929                     // j-1 is last member of run
930
println("for (int i = "+i+"; i<="+(j-1)+"; i++) { data[i]="+
931                             elems[i]+"L; }");
932                     i = j;
933                 }
934             }
935         }
936
937         println("return data;");
938         tabs--;
939         println("}");
940         // BitSet object
941
println("public static readonly BitSet " + getBitsetName(id) + " = new BitSet(" +
942             "mk_" + getBitsetName(id) + "()" + ");");
943     }
944
945     /** Given the index of a bitset in the bitset list, generate a unique name.
946      * Specific code-generators may want to override this
947      * if the language does not allow '_' or numerals in identifiers.
948      * @param index The index of the bitset in the bitset list.
949      */

950     protected String JavaDoc getBitsetName(int index) {
951         return "tokenSet_" + index + "_";
952     }
953
954     /** Generate the finish of a block, using a combination of the info
955     * returned from genCommonBlock() and the action to perform when
956     * no alts were taken
957     * @param howToFinish The return of genCommonBlock()
958     * @param noViableAction What to generate when no alt is taken
959     */

960     private void genBlockFinish(CSharpBlockFinishingInfo howToFinish, String JavaDoc noViableAction)
961     {
962
963         if (howToFinish.needAnErrorClause &&
964             (howToFinish.generatedAnIf || howToFinish.generatedSwitch))
965         {
966             if ( howToFinish.generatedAnIf ) {
967                 println("else");
968                 println("{");
969             }
970             else {
971                 println("{");
972             }
973             tabs++;
974             println(noViableAction);
975             tabs--;
976             println("}");
977         }
978
979         if ( howToFinish.postscript!=null ) {
980             if (howToFinish.needAnErrorClause && howToFinish.generatedSwitch &&
981                 !howToFinish.generatedAnIf && noViableAction != null)
982             {
983                 // Check to make sure that noViableAction is only a throw statement
984
if (noViableAction.indexOf("throw") == 0 || noViableAction.indexOf("goto") == 0) {
985                     // Remove the break statement since it isn't reachable with a throw exception
986
int endOfBreak = howToFinish.postscript.indexOf("break;") + 6;
987                     String JavaDoc newPostScript = howToFinish.postscript.substring(endOfBreak);
988                     println(newPostScript);
989                 }
990                 else {
991                     println(howToFinish.postscript);
992                 }
993             }
994             else {
995                 println(howToFinish.postscript);
996             }
997         }
998     }
999
1000    /** Generate the init action for a block, which may be a RuleBlock or a
1001     * plain AlternativeBLock.
1002     * @blk The block for which the preamble is to be generated.
1003     */

1004    protected void genBlockInitAction(AlternativeBlock blk)
1005    {
1006        // dump out init action
1007
if (blk.initAction != null) {
1008            printAction(processActionForSpecialSymbols(blk.initAction, blk.getLine(), currentRule, null));
1009        }
1010    }
1011
1012    /** Generate the header for a block, which may be a RuleBlock or a
1013     * plain AlternativeBLock. This generates any variable declarations
1014     * and syntactic-predicate-testing variables.
1015     * @blk The block for which the preamble is to be generated.
1016     */

1017    protected void genBlockPreamble(AlternativeBlock blk) {
1018        // define labels for rule blocks.
1019
if ( blk instanceof RuleBlock ) {
1020            RuleBlock rblk = (RuleBlock)blk;
1021            if ( rblk.labeledElements!=null ) {
1022                for (int i=0; i<rblk.labeledElements.size(); i++) {
1023
1024                    AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
1025                    //System.out.println("looking at labeled element: "+a);
1026
//Variables for labeled rule refs and
1027
//subrules are different than variables for
1028
//grammar atoms. This test is a little tricky
1029
//because we want to get all rule refs and ebnf,
1030
//but not rule blocks or syntactic predicates
1031
if (
1032                        a instanceof RuleRefElement ||
1033                        a instanceof AlternativeBlock &&
1034                        !(a instanceof RuleBlock) &&
1035                        !(a instanceof SynPredBlock)
1036                        ) {
1037
1038                        if (
1039                            !(a instanceof RuleRefElement) &&
1040                            ((AlternativeBlock)a).not &&
1041                            analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
1042                            ) {
1043                            // Special case for inverted subrules that
1044
// will be inlined. Treat these like
1045
// token or char literal references
1046
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1047                            if (grammar.buildAST) {
1048                                genASTDeclaration(a);
1049                            }
1050                        }
1051                        else {
1052                            if (grammar.buildAST) {
1053                                // Always gen AST variables for
1054
// labeled elements, even if the
1055
// element itself is marked with !
1056
genASTDeclaration(a);
1057                            }
1058                            if ( grammar instanceof LexerGrammar ) {
1059                                println("Token "+a.getLabel()+" = null;");
1060                            }
1061                            if (grammar instanceof TreeWalkerGrammar) {
1062                                // always generate rule-ref variables
1063
// for tree walker
1064
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1065                            }
1066                        }
1067                    }
1068                    else {
1069                        // It is a token or literal reference. Generate the
1070
// correct variable type for this grammar
1071
println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1072                        // In addition, generate *_AST variables if building ASTs
1073
if (grammar.buildAST) {
1074                            //println(labeledElementASTType+" " + a.getLabel() + "_AST = null;");
1075
if (a instanceof GrammarAtom &&
1076                                ((GrammarAtom)a).getASTNodeType()!=null ) {
1077                                GrammarAtom ga = (GrammarAtom)a;
1078                                genASTDeclaration(a, ga.getASTNodeType());
1079                            }
1080                            else {
1081                                genASTDeclaration(a);
1082                            }
1083                        }
1084                    }
1085                }
1086            }
1087        }
1088    }
1089
1090    public void genBody(LexerGrammar g) throws IOException JavaDoc
1091    {
1092        // SAS: moved output creation to method so a subclass can change
1093
// how the output is generated (for VAJ interface)
1094
setupOutput(grammar.getClassName());
1095
1096        genAST = false; // no way to gen trees.
1097
saveText = true; // save consumed characters.
1098

1099        tabs=0;
1100
1101        // Generate header common to all CSharp output files
1102
genHeader();
1103        // Do not use printAction because we assume tabs==0
1104
println(behavior.getHeaderAction(""));
1105
1106            // Generate the CSharp namespace declaration (if specified)
1107
if (nameSpace != null)
1108            nameSpace.emitDeclarations(currentOutput);
1109        tabs++;
1110
1111        // Generate header specific to lexer CSharp file
1112
// println("import java.io.FileInputStream;");
1113
println("// Generate header specific to lexer CSharp file");
1114        println("using System;");
1115        println("using Stream = System.IO.Stream;");
1116        println("using TextReader = System.IO.TextReader;");
1117        println("using Hashtable = System.Collections.Hashtable;");
1118        println("using Comparer = System.Collections.Comparer;");
1119        if ( !(g.caseSensitiveLiterals) )
1120        {
1121            println("using CaseInsensitiveHashCodeProvider = System.Collections.CaseInsensitiveHashCodeProvider;");
1122            println("using CaseInsensitiveComparer = System.Collections.CaseInsensitiveComparer;");
1123        }
1124        println("");
1125        println("using TokenStreamException = persistence.antlr.TokenStreamException;");
1126        println("using TokenStreamIOException = persistence.antlr.TokenStreamIOException;");
1127        println("using TokenStreamRecognitionException = persistence.antlr.TokenStreamRecognitionException;");
1128        println("using CharStreamException = persistence.antlr.CharStreamException;");
1129        println("using CharStreamIOException = persistence.antlr.CharStreamIOException;");
1130        println("using ANTLRException = persistence.antlr.ANTLRException;");
1131        println("using CharScanner = persistence.antlr.CharScanner;");
1132        println("using InputBuffer = persistence.antlr.InputBuffer;");
1133        println("using ByteBuffer = persistence.antlr.ByteBuffer;");
1134        println("using CharBuffer = persistence.antlr.CharBuffer;");
1135        println("using Token = persistence.antlr.Token;");
1136        println("using CommonToken = persistence.antlr.CommonToken;");
1137        println("using SemanticException = persistence.antlr.SemanticException;");
1138        println("using RecognitionException = persistence.antlr.RecognitionException;");
1139        println("using NoViableAltForCharException = persistence.antlr.NoViableAltForCharException;");
1140        println("using MismatchedCharException = persistence.antlr.MismatchedCharException;");
1141        println("using TokenStream = persistence.antlr.TokenStream;");
1142        println("using LexerSharedInputState = persistence.antlr.LexerSharedInputState;");
1143        println("using BitSet = persistence.antlr.collections.impl.BitSet;");
1144
1145        // Generate user-defined lexer file preamble
1146
println(grammar.preambleAction.getText());
1147
1148        // Generate lexer class definition
1149
String JavaDoc sup=null;
1150        if ( grammar.superClass!=null ) {
1151            sup = grammar.superClass;
1152        }
1153        else {
1154            sup = "persistence.antlr." + grammar.getSuperClass();
1155        }
1156
1157        // print javadoc comment if any
1158
if ( grammar.comment!=null )
1159        {
1160            _println(grammar.comment);
1161        }
1162
1163        Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
1164        if (tprefix == null) {
1165            print("public ");
1166        }
1167        else {
1168            String JavaDoc p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
1169            if (p == null) {
1170                print("public ");
1171            }
1172            else {
1173                print(p+" ");
1174            }
1175        }
1176
1177        print("class " + grammar.getClassName() + " : "+sup);
1178        println(", TokenStream");
1179        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
1180        if ( tsuffix != null )
1181        {
1182            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
1183            if ( suffix != null )
1184            {
1185                print(", "+suffix); // must be an interface name for CSharp
1186
}
1187        }
1188        println(" {");
1189        tabs++;
1190
1191        // Generate 'const' definitions for Token IDs
1192
genTokenDefinitions(grammar.tokenManager);
1193
1194        // Generate user-defined lexer class members
1195
print(
1196            processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
1197            );
1198
1199        //
1200
// Generate the constructor from InputStream, which in turn
1201
// calls the ByteBuffer constructor
1202
//
1203
println("public " + grammar.getClassName() + "(Stream ins) : this(new ByteBuffer(ins))");
1204        println("{");
1205        println("}");
1206        println("");
1207
1208        //
1209
// Generate the constructor from Reader, which in turn
1210
// calls the CharBuffer constructor
1211
//
1212
println("public " + grammar.getClassName() + "(TextReader r) : this(new CharBuffer(r))");
1213        println("{");
1214        println("}");
1215        println("");
1216
1217        print("public " + grammar.getClassName() + "(InputBuffer ib)");
1218        // if debugging, wrap the input buffer in a debugger
1219
if (grammar.debuggingOutput)
1220            println(" : this(new LexerSharedInputState(new persistence.antlr.debug.DebuggingInputBuffer(ib)))");
1221        else
1222            println(" : this(new LexerSharedInputState(ib))");
1223        println("{");
1224        println("}");
1225        println("");
1226
1227        //
1228
// Generate the constructor from InputBuffer (char or byte)
1229
//
1230
println("public " + grammar.getClassName() + "(LexerSharedInputState state) : base(state)");
1231        println("{");
1232        tabs++;
1233        println("initialize();");
1234        tabs--;
1235        println("}");
1236
1237        // Generate the initialize function
1238
println("private void initialize()");
1239        println("{");
1240        tabs++;
1241
1242        // if debugging, set up array variables and call user-overridable
1243
// debugging setup method
1244
if ( grammar.debuggingOutput ) {
1245            println("ruleNames = _ruleNames;");
1246            println("semPredNames = _semPredNames;");
1247            println("setupDebugging();");
1248        }
1249
1250          // Generate the setting of various generated options.
1251
// These need to be before the literals since ANTLRHashString depends on
1252
// the casesensitive stuff.
1253
println("caseSensitiveLiterals = " + g.caseSensitiveLiterals + ";");
1254          println("setCaseSensitive(" + g.caseSensitive + ");");
1255
1256        // Generate the initialization of a hashtable
1257
// containing the string literals used in the lexer
1258
// The literals variable itself is in CharScanner
1259
if (g.caseSensitiveLiterals)
1260            println("literals = new Hashtable(null, Comparer.Default);");
1261        else
1262            println("literals = new Hashtable(CaseInsensitiveHashCodeProvider.Default, CaseInsensitiveComparer.Default);");
1263        Enumeration JavaDoc keys = grammar.tokenManager.getTokenSymbolKeys();
1264        while ( keys.hasMoreElements() ) {
1265            String JavaDoc key = (String JavaDoc)keys.nextElement();
1266            if ( key.charAt(0) != '"' ) {
1267                continue;
1268            }
1269            TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
1270            if ( sym instanceof StringLiteralSymbol ) {
1271                StringLiteralSymbol s = (StringLiteralSymbol)sym;
1272                println("literals.Add(" + s.getId() + ", " + s.getTokenType() + ");");
1273            }
1274        }
1275
1276        Enumeration JavaDoc ids;
1277        tabs--;
1278        println("}");
1279
1280        // generate the rule name array for debugging
1281
if (grammar.debuggingOutput) {
1282            println("private const string[] _ruleNames = {");
1283
1284            ids = grammar.rules.elements();
1285            int ruleNum=0;
1286            while ( ids.hasMoreElements() ) {
1287                GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1288                if ( sym instanceof RuleSymbol)
1289                    println(" \""+((RuleSymbol)sym).getId()+"\",");
1290            }
1291            println("};");
1292        }
1293
1294        // Generate nextToken() rule.
1295
// nextToken() is a synthetic lexer rule that is the implicit OR of all
1296
// user-defined lexer rules.
1297
genNextToken();
1298
1299        // Generate code for each rule in the lexer
1300
ids = grammar.rules.elements();
1301        int ruleNum=0;
1302        while ( ids.hasMoreElements() ) {
1303            RuleSymbol sym = (RuleSymbol) ids.nextElement();
1304            // Don't generate the synthetic rules
1305
if (!sym.getId().equals("mnextToken")) {
1306                genRule(sym, false, ruleNum++, grammar.tokenManager);
1307            }
1308            exitIfError();
1309        }
1310
1311        // Generate the semantic predicate map for debugging
1312
if (grammar.debuggingOutput)
1313            genSemPredMap();
1314
1315        // Generate the bitsets used throughout the lexer
1316
genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
1317
1318        println("");
1319        tabs--;
1320        println("}");
1321
1322        tabs--;
1323        // Generate the CSharp namespace closures (if required)
1324
if (nameSpace != null)
1325            nameSpace.emitClosures(currentOutput);
1326
1327        // Close the lexer output stream
1328
currentOutput.close();
1329        currentOutput = null;
1330    }
1331
1332    public void genInitFactory( Grammar g ) {
1333        if( g.buildAST )
1334        {
1335            // Generate the method to initialize an ASTFactory when we're
1336
// building AST's
1337
println("static public void initializeASTFactory( ASTFactory factory )");
1338            println("{");
1339            tabs++;
1340
1341            println("factory.setMaxNodeType("+g.tokenManager.maxTokenType()+");");
1342
1343            // Walk the token vocabulary and generate code to register every TokenID->ASTNodeType
1344
// mapping specified in the tokens {...} section with the ASTFactory.
1345
Vector v = g.tokenManager.getVocabulary();
1346            for (int i = 0; i < v.size(); i++) {
1347                String JavaDoc s = (String JavaDoc)v.elementAt(i);
1348                if (s != null) {
1349                    TokenSymbol ts = g.tokenManager.getTokenSymbol(s);
1350                    if (ts != null && ts.getASTNodeType() != null) {
1351                        println("factory.setTokenTypeASTNodeType(" + s + ", \"" + ts.getASTNodeType() + "\");");
1352                    }
1353                }
1354            }
1355
1356            tabs--;
1357            println("}");
1358        }
1359    }
1360
1361    public void genBody(ParserGrammar g) throws IOException JavaDoc
1362    {
1363        // Open the output stream for the parser and set the currentOutput
1364
// SAS: moved file setup so subclass could do it (for VAJ interface)
1365
setupOutput(grammar.getClassName());
1366
1367        genAST = grammar.buildAST;
1368
1369        tabs = 0;
1370
1371        // Generate the header common to all output files.
1372
genHeader();
1373        // Do not use printAction because we assume tabs==0
1374
println(behavior.getHeaderAction(""));
1375
1376            // Generate the CSharp namespace declaration (if specified)
1377
if (nameSpace != null)
1378            nameSpace.emitDeclarations(currentOutput);
1379        tabs++;
1380
1381        // Generate header for the parser
1382
println("// Generate the header common to all output files.");
1383        println("using System;");
1384        println("");
1385        println("using TokenBuffer = persistence.antlr.TokenBuffer;");
1386        println("using TokenStreamException = persistence.antlr.TokenStreamException;");
1387        println("using TokenStreamIOException = persistence.antlr.TokenStreamIOException;");
1388        println("using ANTLRException = persistence.antlr.ANTLRException;");
1389        println("using " + grammar.getSuperClass() + " = persistence.antlr." + grammar.getSuperClass() + ";");
1390        println("using Token = persistence.antlr.Token;");
1391        println("using TokenStream = persistence.antlr.TokenStream;");
1392        println("using RecognitionException = persistence.antlr.RecognitionException;");
1393        println("using NoViableAltException = persistence.antlr.NoViableAltException;");
1394        println("using MismatchedTokenException = persistence.antlr.MismatchedTokenException;");
1395        println("using SemanticException = persistence.antlr.SemanticException;");
1396        println("using ParserSharedInputState = persistence.antlr.ParserSharedInputState;");
1397        println("using BitSet = persistence.antlr.collections.impl.BitSet;");
1398        if ( genAST ) {
1399            println("using AST = persistence.antlr.collections.AST;");
1400            println("using ASTPair = persistence.antlr.ASTPair;");
1401            println("using ASTFactory = persistence.antlr.ASTFactory;");
1402            println("using ASTArray = persistence.antlr.collections.impl.ASTArray;");
1403        }
1404
1405        // Output the user-defined parser preamble
1406
println(grammar.preambleAction.getText());
1407
1408        // Generate parser class definition
1409
String JavaDoc sup=null;
1410        if ( grammar.superClass != null )
1411            sup = grammar.superClass;
1412        else
1413            sup = "persistence.antlr." + grammar.getSuperClass();
1414
1415        // print javadoc comment if any
1416
if ( grammar.comment!=null ) {
1417            _println(grammar.comment);
1418        }
1419
1420        Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
1421        if (tprefix == null) {
1422            print("public ");
1423        }
1424        else {
1425            String JavaDoc p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
1426            if (p == null) {
1427                print("public ");
1428            }
1429            else {
1430                print(p+" ");
1431            }
1432        }
1433
1434        println("class " + grammar.getClassName() + " : "+sup);
1435
1436        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
1437        if ( tsuffix != null ) {
1438            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
1439            if ( suffix != null )
1440                print(" , "+suffix); // must be an interface name for CSharp
1441
}
1442        println("{");
1443        tabs++;
1444
1445        // Generate 'const' definitions for Token IDs
1446
genTokenDefinitions(grammar.tokenManager);
1447
1448        // set up an array of all the rule names so the debugger can
1449
// keep track of them only by number -- less to store in tree...
1450
if (grammar.debuggingOutput) {
1451            println("private const string[] _ruleNames = {");
1452            tabs++;
1453
1454            Enumeration JavaDoc ids = grammar.rules.elements();
1455            int ruleNum=0;
1456            while ( ids.hasMoreElements() ) {
1457                GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1458                if ( sym instanceof RuleSymbol)
1459                    println(" \""+((RuleSymbol)sym).getId()+"\",");
1460            }
1461            tabs--;
1462            println("};");
1463        }
1464
1465        // Generate user-defined parser class members
1466
print(
1467            processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
1468            );
1469
1470        // Generate parser class constructor from TokenBuffer
1471
println("");
1472        println("protected void initialize()");
1473        println("{");
1474        tabs++;
1475        println("tokenNames = tokenNames_;");
1476
1477        if( grammar.buildAST )
1478            println("initializeFactory();");
1479
1480        // if debugging, set up arrays and call the user-overridable
1481
// debugging setup method
1482
if ( grammar.debuggingOutput ) {
1483            println("ruleNames = _ruleNames;");
1484            println("semPredNames = _semPredNames;");
1485            println("setupDebugging(tokenBuf);");
1486        }
1487        tabs--;
1488        println("}");
1489        println("");
1490
1491        println("");
1492        println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) : base(tokenBuf, k)");
1493        println("{");
1494        tabs++;
1495        println("initialize();");
1496        tabs--;
1497        println("}");
1498        println("");
1499
1500        println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) : this(tokenBuf," + grammar.maxk + ")");
1501        println("{");
1502        println("}");
1503        println("");
1504
1505        // Generate parser class constructor from TokenStream
1506
println("protected " + grammar.getClassName()+"(TokenStream lexer, int k) : base(lexer,k)");
1507        println("{");
1508        tabs++;
1509        println("initialize();");
1510        tabs--;
1511        println("}");
1512        println("");
1513
1514        println("public " + grammar.getClassName()+"(TokenStream lexer) : this(lexer," + grammar.maxk + ")");
1515        println("{");
1516        println("}");
1517        println("");
1518
1519        println("public " + grammar.getClassName()+"(ParserSharedInputState state) : base(state," + grammar.maxk + ")");
1520        println("{");
1521        tabs++;
1522        println("initialize();");
1523        tabs--;
1524        println("}");
1525        println("");
1526
1527        astTypes = new java.util.Vector JavaDoc(100);
1528
1529        // Generate code for each rule in the grammar
1530
Enumeration JavaDoc ids = grammar.rules.elements();
1531        int ruleNum=0;
1532        while ( ids.hasMoreElements() ) {
1533            GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1534            if ( sym instanceof RuleSymbol) {
1535                RuleSymbol rs = (RuleSymbol)sym;
1536                genRule(rs, rs.references.size()==0, ruleNum++, grammar.tokenManager);
1537            }
1538            exitIfError();
1539        }
1540        if ( usingCustomAST )
1541        {
1542            // when we are using a custom AST, overload Parser.getAST() to return the
1543
// custom AST type
1544
println("public new " + labeledElementASTType + " getAST()");
1545            println("{");
1546            tabs++;
1547            println("return (" + labeledElementASTType + ") returnAST;");
1548            tabs--;
1549            println("}");
1550            println("");
1551        }
1552
1553        // Generate the method that initializes the ASTFactory when we're
1554
// building AST's
1555
println("private void initializeFactory()");
1556        println("{");
1557        tabs++;
1558        if( grammar.buildAST ) {
1559            println("if (astFactory == null)");
1560            println("{");
1561            tabs++;
1562            if( usingCustomAST )
1563            {
1564                println("astFactory = new ASTFactory(\"" + labeledElementASTType + "\");");
1565            }
1566            else
1567                println("astFactory = new ASTFactory();");
1568            tabs--;
1569            println("}");
1570            println("initializeASTFactory( astFactory );");
1571        }
1572        tabs--;
1573        println("}");
1574        genInitFactory( g );
1575
1576        // Generate the token names
1577
genTokenStrings();
1578
1579        // Generate the bitsets used throughout the grammar
1580
genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
1581
1582        // Generate the semantic predicate map for debugging
1583
if (grammar.debuggingOutput)
1584            genSemPredMap();
1585
1586        // Close class definition
1587
println("");
1588        tabs--;
1589        println("}");
1590
1591        tabs--;
1592        // Generate the CSharp namespace closures (if required)
1593
if (nameSpace != null)
1594            nameSpace.emitClosures(currentOutput);
1595
1596        // Close the parser output stream
1597
currentOutput.close();
1598        currentOutput = null;
1599    }
1600    public void genBody(TreeWalkerGrammar g) throws IOException JavaDoc
1601    {
1602        // Open the output stream for the parser and set the currentOutput
1603
// SAS: move file open to method so subclass can override it
1604
// (mainly for VAJ interface)
1605
setupOutput(grammar.getClassName());
1606
1607        genAST = grammar.buildAST;
1608        tabs = 0;
1609
1610        // Generate the header common to all output files.
1611
genHeader();
1612        // Do not use printAction because we assume tabs==0
1613
println(behavior.getHeaderAction(""));
1614
1615      // Generate the CSharp namespace declaration (if specified)
1616
if (nameSpace != null)
1617            nameSpace.emitDeclarations(currentOutput);
1618        tabs++;
1619
1620        // Generate header specific to the tree-parser CSharp file
1621
println("// Generate header specific to the tree-parser CSharp file");
1622        println("using System;");
1623        println("");
1624        println("using " + grammar.getSuperClass() + " = persistence.antlr." + grammar.getSuperClass() + ";");
1625        println("using Token = persistence.antlr.Token;");
1626        println("using AST = persistence.antlr.collections.AST;");
1627        println("using RecognitionException = persistence.antlr.RecognitionException;");
1628        println("using ANTLRException = persistence.antlr.ANTLRException;");
1629        println("using NoViableAltException = persistence.antlr.NoViableAltException;");
1630        println("using MismatchedTokenException = persistence.antlr.MismatchedTokenException;");
1631        println("using SemanticException = persistence.antlr.SemanticException;");
1632        println("using BitSet = persistence.antlr.collections.impl.BitSet;");
1633        println("using ASTPair = persistence.antlr.ASTPair;");
1634        println("using ASTFactory = persistence.antlr.ASTFactory;");
1635        println("using ASTArray = persistence.antlr.collections.impl.ASTArray;");
1636
1637        // Output the user-defined parser premamble
1638
println(grammar.preambleAction.getText());
1639
1640        // Generate parser class definition
1641
String JavaDoc sup=null;
1642        if ( grammar.superClass!=null ) {
1643            sup = grammar.superClass;
1644        }
1645        else {
1646            sup = "persistence.antlr." + grammar.getSuperClass();
1647        }
1648        println("");
1649
1650        // print javadoc comment if any
1651
if ( grammar.comment!=null ) {
1652            _println(grammar.comment);
1653        }
1654
1655        Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
1656        if (tprefix == null) {
1657            print("public ");
1658        }
1659        else {
1660            String JavaDoc p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
1661            if (p == null) {
1662                print("public ");
1663            }
1664            else {
1665                print(p+" ");
1666            }
1667        }
1668
1669        println("class " + grammar.getClassName() + " : "+sup);
1670        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
1671        if ( tsuffix != null ) {
1672            String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
1673            if ( suffix != null ) {
1674                print(" , "+suffix); // must be an interface name for CSharp
1675
}
1676        }
1677        println("{");
1678        tabs++;
1679
1680        // Generate 'const' definitions for Token IDs
1681
genTokenDefinitions(grammar.tokenManager);
1682
1683        // Generate user-defined parser class members
1684
print(
1685            processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
1686            );
1687
1688        // Generate default parser class constructor
1689
println("public " + grammar.getClassName() + "()");
1690        println("{");
1691        tabs++;
1692        println("tokenNames = tokenNames_;");
1693        tabs--;
1694        println("}");
1695        println("");
1696
1697        astTypes = new java.util.Vector JavaDoc();
1698        // Generate code for each rule in the grammar
1699
Enumeration JavaDoc ids = grammar.rules.elements();
1700        int ruleNum=0;
1701        String JavaDoc ruleNameInits = "";
1702        while ( ids.hasMoreElements() ) {
1703            GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
1704            if ( sym instanceof RuleSymbol) {
1705                RuleSymbol rs = (RuleSymbol)sym;
1706                genRule(rs, rs.references.size()==0, ruleNum++, grammar.tokenManager);
1707            }
1708            exitIfError();
1709        }
1710
1711        if ( usingCustomAST )
1712        {
1713            // when we are using a custom ast override Parser.getAST to return the
1714
// custom AST type
1715
println("public new " + labeledElementASTType + " getAST()");
1716            println("{");
1717            tabs++;
1718            println("return (" + labeledElementASTType + ") returnAST;");
1719            tabs--;
1720            println("}");
1721            println("");
1722        }
1723
1724        // Generate the ASTFactory initialization function
1725
genInitFactory( grammar );
1726
1727        // Generate the token names
1728
genTokenStrings();
1729
1730        // Generate the bitsets used throughout the grammar
1731
genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
1732
1733        // Close class definition
1734
tabs--;
1735        println("}");
1736        println("");
1737
1738        tabs--;
1739        // Generate the CSharp namespace closures (if required)
1740
if (nameSpace != null)
1741            nameSpace.emitClosures(currentOutput);
1742
1743        // Close the parser output stream
1744
currentOutput.close();
1745        currentOutput = null;
1746    }
1747
1748    /** Generate a series of case statements that implement a BitSet test.
1749     * @param p The Bitset for which cases are to be generated
1750     */

1751    protected void genCases(BitSet p) {
1752        if ( DEBUG_CODE_GENERATOR ) System.out.println("genCases("+p+")");
1753        int[] elems;
1754
1755        elems = p.toArray();
1756        // Wrap cases four-per-line for lexer, one-per-line for parser
1757
int wrap = (grammar instanceof LexerGrammar) ? 4 : 1;
1758        int j=1;
1759        boolean startOfLine = true;
1760        for (int i = 0; i < elems.length; i++) {
1761            if (j==1) {
1762                print("");
1763            } else {
1764                _print(" ");
1765            }
1766            _print("case " + getValueString(elems[i]) + ":");
1767            if (j==wrap) {
1768                _println("");
1769                startOfLine = true;
1770                j=1;
1771            }
1772            else {
1773                j++;
1774                startOfLine = false;
1775            }
1776        }
1777        if (!startOfLine) {
1778            _println("");
1779        }
1780    }
1781
1782    /**Generate common code for a block of alternatives; return a
1783    * postscript that needs to be generated at the end of the
1784    * block. Other routines may append else-clauses and such for
1785    * error checking before the postfix is generated. If the
1786    * grammar is a lexer, then generate alternatives in an order
1787    * where alternatives requiring deeper lookahead are generated
1788    * first, and EOF in the lookahead set reduces the depth of
1789    * the lookahead. @param blk The block to generate @param
1790    * noTestForSingle If true, then it does not generate a test
1791    * for a single alternative.
1792    */

1793    public CSharpBlockFinishingInfo genCommonBlock(AlternativeBlock blk,
1794        boolean noTestForSingle)
1795    {
1796        int nIF=0;
1797        boolean createdLL1Switch = false;
1798        int closingBracesOfIFSequence = 0;
1799        CSharpBlockFinishingInfo finishingInfo = new CSharpBlockFinishingInfo();
1800        if ( DEBUG_CODE_GENERATOR ) System.out.println("genCommonBlock("+blk+")");
1801
1802        // Save the AST generation state, and set it to that of the block
1803
boolean savegenAST = genAST;
1804        genAST = genAST && blk.getAutoGen();
1805
1806            boolean oldsaveTest = saveText;
1807        saveText = saveText && blk.getAutoGen();
1808
1809        // Is this block inverted? If so, generate special-case code
1810
if ( blk.not &&
1811            analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar) )
1812        {
1813            if ( DEBUG_CODE_GENERATOR ) System.out.println("special case: ~(subrule)");
1814            Lookahead p = analyzer.look(1, blk);
1815            // Variable assignment for labeled elements
1816
if (blk.getLabel() != null && syntacticPredLevel == 0) {
1817                println(blk.getLabel() + " = " + lt1Value + ";");
1818            }
1819
1820            // AST
1821
genElementAST(blk);
1822
1823            String JavaDoc astArgs="";
1824            if (grammar instanceof TreeWalkerGrammar) {
1825                if ( usingCustomAST )
1826                    astArgs = "(AST)_t,";
1827                else
1828                    astArgs = "_t,";
1829            }
1830
1831            // match the bitset for the alternative
1832
println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
1833
1834            // tack on tree cursor motion if doing a tree walker
1835
if (grammar instanceof TreeWalkerGrammar)
1836            {
1837                println("_t = _t.getNextSibling();");
1838            }
1839            return finishingInfo;
1840        }
1841
1842        // Special handling for single alt
1843
if (blk.getAlternatives().size() == 1)
1844        {
1845            Alternative alt = blk.getAlternativeAt(0);
1846            // Generate a warning if there is a synPred for single alt.
1847
if (alt.synPred != null)
1848            {
1849                antlrTool.warning(
1850                    "Syntactic predicate superfluous for single alternative",
1851                    grammar.getFilename(),
1852               blk.getAlternativeAt(0).synPred.getLine(),
1853               blk.getAlternativeAt(0).synPred.getColumn()
1854                    );
1855            }
1856            if (noTestForSingle)
1857            {
1858                if (alt.semPred != null)
1859                {
1860                    // Generate validating predicate
1861
genSemPred(alt.semPred, blk.line);
1862                }
1863                genAlt(alt, blk);
1864                return finishingInfo;
1865            }
1866        }
1867
1868        // count number of simple LL(1) cases; only do switch for
1869
// many LL(1) cases (no preds, no end of token refs)
1870
// We don't care about exit paths for (...)*, (...)+
1871
// because we don't explicitly have a test for them
1872
// as an alt in the loop.
1873
//
1874
// Also, we now count how many unicode lookahead sets
1875
// there are--they must be moved to DEFAULT or ELSE
1876
// clause.
1877
int nLL1 = 0;
1878        for (int i=0; i<blk.getAlternatives().size(); i++)
1879        {
1880            Alternative a = blk.getAlternativeAt(i);
1881            if ( suitableForCaseExpression(a) ) {
1882                nLL1++;
1883            }
1884        }
1885
1886        // do LL(1) cases
1887
if ( nLL1 >= makeSwitchThreshold)
1888        {
1889            // Determine the name of the item to be compared
1890
String JavaDoc testExpr = lookaheadString(1);
1891            createdLL1Switch = true;
1892            // when parsing trees, convert null to valid tree node with NULL lookahead
1893
if ( grammar instanceof TreeWalkerGrammar )
1894            {
1895                println("if (null == _t)");
1896                tabs++;
1897                println("_t = ASTNULL;");
1898                tabs--;
1899            }
1900            println("switch ( " + testExpr+" )");
1901            println("{");
1902            //tabs++;
1903
for (int i=0; i<blk.alternatives.size(); i++)
1904            {
1905                Alternative alt = blk.getAlternativeAt(i);
1906                // ignore any non-LL(1) alts, predicated alts,
1907
// or end-of-token alts for case expressions
1908
bSaveIndexCreated = false;
1909                if ( !suitableForCaseExpression(alt) )
1910                {
1911                    continue;
1912                }
1913                Lookahead p = alt.cache[1];
1914                if (p.fset.degree() == 0 && !p.containsEpsilon())
1915                {
1916                    antlrTool.warning("Alternate omitted due to empty prediction set",
1917                        grammar.getFilename(),
1918                        alt.head.getLine(), alt.head.getColumn());
1919                }
1920                else
1921                {
1922                    genCases(p.fset);
1923                    println("{");
1924                    tabs++;
1925                    genAlt(alt, blk);
1926                    println("break;");
1927                    tabs--;
1928                    println("}");
1929                }
1930            }
1931            println("default:");
1932            tabs++;
1933        }
1934
1935        // do non-LL(1) and nondeterministic cases This is tricky in
1936
// the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR
1937
// : "*="; Since nextToken is generated without a loop, then
1938
// the STAR will have end-of-token as it's lookahead set for
1939
// LA(2). So, we must generate the alternatives containing
1940
// trailing end-of-token in their lookahead sets *after* the
1941
// alternatives without end-of-token. This implements the
1942
// usual lexer convention that longer matches come before
1943
// shorter ones, e.g. "*=" matches ASSIGN_STAR not STAR
1944
//
1945
// For non-lexer grammars, this does not sort the alternates
1946
// by depth Note that alts whose lookahead is purely
1947
// end-of-token at k=1 end up as default or else clauses.
1948
int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
1949        for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
1950            if ( DEBUG_CODE_GENERATOR ) System.out.println("checking depth "+altDepth);
1951            for (int i=0; i<blk.alternatives.size(); i++) {
1952                Alternative alt = blk.getAlternativeAt(i);
1953                if ( DEBUG_CODE_GENERATOR ) System.out.println("genAlt: "+i);
1954                // if we made a switch above, ignore what we already took care
1955
// of. Specifically, LL(1) alts with no preds
1956
// that do not have end-of-token in their prediction set
1957
// and that are not giant unicode sets.
1958
if ( createdLL1Switch && suitableForCaseExpression(alt) )
1959                {
1960                    if ( DEBUG_CODE_GENERATOR ) System.out.println("ignoring alt because it was in the switch");
1961                    continue;
1962                }
1963                String JavaDoc e;
1964
1965                boolean unpredicted = false;
1966
1967                if (grammar instanceof LexerGrammar) {
1968                    // Calculate the "effective depth" of the alt,
1969
// which is the max depth at which
1970
// cache[depth]!=end-of-token
1971
int effectiveDepth = alt.lookaheadDepth;
1972                    if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC)
1973                    {
1974                        // use maximum lookahead
1975
effectiveDepth = grammar.maxk;
1976                    }
1977                    while ( effectiveDepth >= 1 &&
1978                        alt.cache[effectiveDepth].containsEpsilon() )
1979                    {
1980                        effectiveDepth--;
1981                    }
1982                    // Ignore alts whose effective depth is other than
1983
// the ones we are generating for this iteration.
1984
if (effectiveDepth != altDepth)
1985                    {
1986                        if ( DEBUG_CODE_GENERATOR )
1987                            System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth);
1988                        continue;
1989                    }
1990                    unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
1991                    e = getLookaheadTestExpression(alt, effectiveDepth);
1992                }
1993                else
1994                {
1995                    unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
1996                    e = getLookaheadTestExpression(alt, grammar.maxk);
1997                }
1998
1999                // Was it a big unicode range that forced unsuitability
2000
// for a case expression?
2001
if (alt.cache[1].fset.degree() > caseSizeThreshold &&
2002                suitableForCaseExpression(alt))
2003                {
2004                    if ( nIF==0 )
2005                    {
2006                        println("if " + e);
2007                        println("{");
2008                    }
2009                    else {
2010                        println("else if " + e);
2011                        println("{");
2012                    }
2013                }
2014                else if (unpredicted &&
2015                    alt.semPred==null &&
2016                    alt.synPred==null)
2017                {
2018                    // The alt has empty prediction set and no
2019
// predicate to help out. if we have not
2020
// generated a previous if, just put {...} around
2021
// the end-of-token clause
2022
if ( nIF==0 ) {
2023                        println("{");
2024                    }
2025                    else {
2026                        println("else {");
2027                    }
2028                    finishingInfo.needAnErrorClause = false;
2029                }
2030                else
2031                {
2032                    // check for sem and syn preds
2033
// Add any semantic predicate expression to the lookahead test
2034
if ( alt.semPred != null ) {
2035                        // if debugging, wrap the evaluation of the predicate in a method
2036
//
2037
// translate $ and # references
2038
ActionTransInfo tInfo = new ActionTransInfo();
2039                        String JavaDoc actionStr = processActionForSpecialSymbols(alt.semPred,
2040                            blk.line,
2041                            currentRule,
2042                            tInfo);
2043                        // ignore translation info...we don't need to
2044
// do anything with it. call that will inform
2045
// SemanticPredicateListeners of the result
2046
if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) &&
2047                                grammar.debuggingOutput) {
2048                            e = "("+e+"&& fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.PREDICTING,"+ //FIXME
2049
addSemPred(charFormatter.escapeString(actionStr))+","+actionStr+"))";
2050                        }
2051                        else {
2052                            e = "("+e+"&&("+actionStr +"))";
2053                        }
2054                    }
2055
2056                    // Generate any syntactic predicates
2057
if ( nIF>0 ) {
2058                        if ( alt.synPred != null ) {
2059                            println("else {");
2060                            tabs++;
2061                            genSynPred( alt.synPred, e );
2062                            closingBracesOfIFSequence++;
2063                        }
2064                        else {
2065                            println("else if " + e + " {");
2066                        }
2067                    }
2068                    else {
2069                        if ( alt.synPred != null ) {
2070                            genSynPred( alt.synPred, e );
2071                        }
2072                        else {
2073                            // when parsing trees, convert null to valid tree node
2074
// with NULL lookahead.
2075
if ( grammar instanceof TreeWalkerGrammar ) {
2076                                println("if (_t == null)");
2077                                tabs++;
2078                                println("_t = ASTNULL;");
2079                                tabs--;
2080                            }
2081                            println("if " + e);
2082                            println("{");
2083                        }
2084                    }
2085
2086                }
2087
2088                nIF++;
2089                tabs++;
2090                genAlt(alt, blk);
2091                tabs--;
2092                println("}");
2093            }
2094        }
2095
2096        String JavaDoc ps = "";
2097        for (int i=1; i<=closingBracesOfIFSequence; i++) {
2098            ps+="}";
2099        }
2100
2101        // Restore the AST generation state
2102
genAST = savegenAST;
2103
2104        // restore save text state
2105
saveText=oldsaveTest;
2106
2107        // Return the finishing info.
2108
if ( createdLL1Switch ) {
2109            tabs--;
2110            finishingInfo.postscript = ps+"break; }";
2111            finishingInfo.generatedSwitch = true;
2112            finishingInfo.generatedAnIf = nIF>0;
2113            //return new CSharpBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
2114

2115        }
2116        else {
2117            finishingInfo.postscript = ps;
2118            finishingInfo.generatedSwitch = false;
2119            finishingInfo.generatedAnIf = nIF>0;
2120            // return new CSharpBlockFinishingInfo(ps, false,nIF>0);
2121
}
2122        return finishingInfo;
2123    }
2124
2125    private static boolean suitableForCaseExpression(Alternative a) {
2126        return a.lookaheadDepth == 1 &&
2127            a.semPred == null &&
2128            !a.cache[1].containsEpsilon() &&
2129            a.cache[1].fset.degree()<=caseSizeThreshold;
2130    }
2131
2132    /** Generate code to link an element reference into the AST */
2133    private void genElementAST(AlternativeElement el) {
2134        // handle case where you're not building trees, but are in tree walker.
2135
// Just need to get labels set up.
2136
if ( grammar instanceof TreeWalkerGrammar && !grammar.buildAST )
2137        {
2138            String JavaDoc elementRef;
2139            String JavaDoc astName;
2140
2141            // Generate names and declarations of the AST variable(s)
2142
if (el.getLabel() == null)
2143            {
2144                elementRef = lt1Value;
2145                // Generate AST variables for unlabeled stuff
2146
astName = "tmp" + astVarNumber + "_AST";
2147                astVarNumber++;
2148                // Map the generated AST variable in the alternate
2149
mapTreeVariable(el, astName);
2150                // Generate an "input" AST variable also
2151
println(labeledElementASTType+" "+astName+"_in = "+elementRef+";");
2152            }
2153            return;
2154        }
2155
2156        if (grammar.buildAST && syntacticPredLevel == 0)
2157        {
2158            boolean needASTDecl =
2159                (genAST &&
2160                (el.getLabel() != null || (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG)));
2161
2162            // RK: if we have a grammar element always generate the decl
2163
// since some guy can access it from an action and we can't
2164
// peek ahead (well not without making a mess).
2165
// I'd prefer taking this out.
2166
if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG &&
2167                (el instanceof TokenRefElement))
2168                needASTDecl = true;
2169
2170            boolean doNoGuessTest = (grammar.hasSyntacticPredicate && needASTDecl);
2171
2172            String JavaDoc elementRef;
2173            String JavaDoc astNameBase;
2174
2175            // Generate names and declarations of the AST variable(s)
2176
if (el.getLabel() != null)
2177            {
2178                // if the element is labeled use that name...
2179
elementRef = el.getLabel();
2180                astNameBase = el.getLabel();
2181            }
2182            else
2183            {
2184                // else generate a temporary name...
2185
elementRef = lt1Value;
2186                // Generate AST variables for unlabeled stuff
2187
astNameBase = "tmp" + astVarNumber;
2188                astVarNumber++;
2189            }
2190
2191            // Generate the declaration if required.
2192
if (needASTDecl)
2193            {
2194                // Generate the declaration
2195
if ( el instanceof GrammarAtom )
2196                {
2197                    GrammarAtom ga = (GrammarAtom)el;
2198                    if ( ga.getASTNodeType()!=null )
2199                    {
2200                        genASTDeclaration(el, astNameBase, ga.getASTNodeType());
2201                        //println(ga.getASTNodeType()+" " + astName+" = null;");
2202
}
2203                    else
2204                    {
2205                        genASTDeclaration(el, astNameBase, labeledElementASTType);
2206                        //println(labeledElementASTType+" " + astName + " = null;");
2207
}
2208                }
2209                else
2210                {
2211                    genASTDeclaration(el, astNameBase, labeledElementASTType);
2212                    //println(labeledElementASTType+" " + astName + " = null;");
2213
}
2214            }
2215
2216            // for convenience..
2217
String JavaDoc astName = astNameBase + "_AST";
2218
2219            // Map the generated AST variable in the alternate
2220
mapTreeVariable(el, astName);
2221            if (grammar instanceof TreeWalkerGrammar)
2222            {
2223                // Generate an "input" AST variable also
2224
println(labeledElementASTType+" " + astName + "_in = null;");
2225            }
2226
2227
2228            // Enclose actions with !guessing
2229
if (doNoGuessTest) {
2230                //println("if (0 == inputState.guessing)");
2231
//println("{");
2232
//tabs++;
2233
}
2234
2235            // if something has a label assume it will be used
2236
// so we must initialize the RefAST
2237
if (el.getLabel() != null)
2238            {
2239                if ( el instanceof GrammarAtom )
2240                {
2241                    println(astName + " = "+ getASTCreateString((GrammarAtom)el, elementRef) + ";");
2242                }
2243                else
2244                {
2245                    println(astName + " = "+ getASTCreateString(elementRef) + ";");
2246                }
2247            }
2248
2249            // if it has no label but a declaration exists initialize it.
2250
if (el.getLabel() == null && needASTDecl)
2251            {
2252                elementRef = lt1Value;
2253                if ( el instanceof GrammarAtom )
2254                {
2255                    println(astName + " = "+ getASTCreateString((GrammarAtom)el, elementRef) + ";");
2256                }
2257                else
2258                {
2259                    println(astName + " = "+ getASTCreateString(elementRef) + ";");
2260                }
2261                // Map the generated AST variable in the alternate
2262
if (grammar instanceof TreeWalkerGrammar)
2263                {
2264                    // set "input" AST variable also
2265
println(astName + "_in = " + elementRef + ";");
2266                }
2267            }
2268
2269            if (genAST)
2270            {
2271                switch (el.getAutoGenType())
2272                {
2273                case GrammarElement.AUTO_GEN_NONE:
2274                    if ( usingCustomAST ||
2275                         ( (el instanceof GrammarAtom) &&
2276                           (((GrammarAtom)el).getASTNodeType() != null) ) )
2277                        println("astFactory.addASTChild(currentAST, (AST)" + astName + ");");
2278                    else
2279                        println("astFactory.addASTChild(currentAST, " + astName + ");");
2280                    break;
2281                case GrammarElement.AUTO_GEN_CARET:
2282                    if ( usingCustomAST ||
2283                         ( (el instanceof GrammarAtom) &&
2284                           (((GrammarAtom)el).getASTNodeType() != null) ) )
2285                        println("astFactory.makeASTRoot(currentAST, (AST)" + astName + ");");
2286                    else
2287                        println("astFactory.makeASTRoot(currentAST, " + astName + ");");
2288                    break;
2289                default:
2290                    break;
2291                }
2292            }
2293            if (doNoGuessTest)
2294            {
2295                //tabs--;
2296
//println("}");
2297
}
2298        }
2299    }
2300
2301
2302    /** Close the try block and generate catch phrases
2303     * if the element has a labeled handler in the rule
2304     */

2305    private void genErrorCatchForElement(AlternativeElement el) {
2306        if (el.getLabel() == null) return;
2307        String JavaDoc r = el.enclosingRuleName;
2308        if ( grammar instanceof LexerGrammar ) {
2309            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
2310        }
2311        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
2312        if (rs == null) {
2313            antlrTool.panic("Enclosing rule not found!");
2314        }
2315        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
2316        if (ex != null) {
2317            tabs--;
2318            println("}");
2319            genErrorHandler(ex);
2320        }
2321    }
2322
2323    /** Generate the catch phrases for a user-specified error handler */
2324    private void genErrorHandler(ExceptionSpec ex)
2325    {
2326        // Each ExceptionHandler in the ExceptionSpec is a separate catch
2327
for (int i = 0; i < ex.handlers.size(); i++)
2328        {
2329            ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
2330            // Generate catch phrase
2331
println("catch (" + handler.exceptionTypeAndName.getText() + ")");
2332            println("{");
2333            tabs++;
2334            if (grammar.hasSyntacticPredicate) {
2335                println("if (0 == inputState.guessing)");
2336                println("{");
2337                tabs++;
2338            }
2339
2340        // When not guessing, execute user handler action
2341
ActionTransInfo tInfo = new ActionTransInfo();
2342        printAction(processActionForSpecialSymbols(handler.action.getText(),
2343                            handler.action.getLine(), currentRule, tInfo));
2344
2345            if (grammar.hasSyntacticPredicate)
2346            {
2347                tabs--;
2348                println("}");
2349                println("else");
2350                println("{");
2351                tabs++;
2352                // When guessing, rethrow exception
2353
//println("throw " + extractIdOfAction(handler.exceptionTypeAndName) + ";");
2354
println("throw;");
2355                tabs--;
2356                println("}");
2357            }
2358            // Close catch phrase
2359
tabs--;
2360            println("}");
2361        }
2362    }
2363    /** Generate a try { opening if the element has a labeled handler in the rule */
2364    private void genErrorTryForElement(AlternativeElement el) {
2365        if (el.getLabel() == null) return;
2366        String JavaDoc r = el.enclosingRuleName;
2367        if ( grammar instanceof LexerGrammar ) {
2368            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
2369        }
2370        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
2371        if (rs == null) {
2372            antlrTool.panic("Enclosing rule not found!");
2373        }
2374        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
2375        if (ex != null) {
2376            println("try // for error handling");
2377            println("{");
2378            tabs++;
2379        }
2380    }
2381
2382    protected void genASTDeclaration(AlternativeElement el)
2383    {
2384        genASTDeclaration(el, labeledElementASTType);
2385    }
2386
2387    protected void genASTDeclaration(AlternativeElement el, String JavaDoc node_type)
2388    {
2389        genASTDeclaration(el, el.getLabel(), node_type);
2390    }
2391
2392    protected void genASTDeclaration(AlternativeElement el, String JavaDoc var_name, String JavaDoc node_type)
2393    {
2394        // already declared?
2395
if (declaredASTVariables.contains(el))
2396            return;
2397
2398        // emit code
2399
//String s = StringUtils.stripFrontBack(node_type, "\"", "\"");
2400
//println(s + " " + var_name + "_AST = null;");
2401
println(node_type + " " + var_name + "_AST = null;");
2402
2403        // mark as declared
2404
declaredASTVariables.put(el,el);
2405    }
2406
2407    /** Generate a header that is common to all CSharp files */
2408    protected void genHeader()
2409    {
2410        println("// $ANTLR "+Tool.version+": "+
2411            "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" +
2412            " -> "+
2413            "\""+grammar.getClassName()+".cs\"$");
2414    }
2415
2416    private void genLiteralsTest() {
2417        println("_ttype = testLiteralsTable(_ttype);");
2418    }
2419
2420    private void genLiteralsTestForPartialToken() {
2421        println("_ttype = testLiteralsTable(text.ToString(_begin, text.Length-_begin), _ttype);");
2422    }
2423
2424    protected void genMatch(BitSet b) {
2425    }
2426
2427    protected void genMatch(GrammarAtom atom) {
2428        if ( atom instanceof StringLiteralElement ) {
2429            if ( grammar instanceof LexerGrammar ) {
2430                genMatchUsingAtomText(atom);
2431            }
2432            else {
2433                genMatchUsingAtomTokenType(atom);
2434            }
2435        }
2436        else if ( atom instanceof CharLiteralElement ) {
2437            if ( grammar instanceof LexerGrammar ) {
2438                genMatchUsingAtomText(atom);
2439            }
2440            else {
2441                antlrTool.error("cannot ref character literals in grammar: "+atom);
2442            }
2443        }
2444        else if ( atom instanceof TokenRefElement ) {
2445            genMatchUsingAtomText(atom);
2446        } else if (atom instanceof WildcardElement) {
2447          gen((WildcardElement)atom);
2448      }
2449    }
2450    protected void genMatchUsingAtomText(GrammarAtom atom) {
2451        // match() for trees needs the _t cursor
2452
String JavaDoc astArgs="";
2453        if (grammar instanceof TreeWalkerGrammar) {
2454            if ( usingCustomAST )
2455                astArgs="(AST)_t,";
2456            else
2457                astArgs="_t,";
2458        }
2459
2460        // if in lexer and ! on element, save buffer index to kill later
2461
if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
2462            declareSaveIndexVariableIfNeeded();
2463            println("_saveIndex = text.Length;");
2464        }
2465
2466        print(atom.not ? "matchNot(" : "match(");
2467        _print(astArgs);
2468
2469        // print out what to match
2470
if (atom.atomText.equals("EOF")) {
2471            // horrible hack to handle EOF case
2472
_print("Token.EOF_TYPE");
2473        }
2474        else {
2475                _print(atom.atomText);
2476        }
2477        _println(");");
2478
2479        if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
2480            declareSaveIndexVariableIfNeeded();
2481            println("text.Length = _saveIndex;"); // kill text atom put in buffer
2482
}
2483    }
2484
2485    protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
2486        // match() for trees needs the _t cursor
2487
String JavaDoc astArgs="";
2488        if (grammar instanceof TreeWalkerGrammar) {
2489            if( usingCustomAST )
2490                astArgs="(AST)_t,";
2491            else
2492                astArgs="_t,";
2493        }
2494
2495        // If the literal can be mangled, generate the symbolic constant instead
2496
String JavaDoc mangledName = null;
2497        String JavaDoc s = astArgs + getValueString(atom.getType());
2498
2499        // matching
2500
println( (atom.not ? "matchNot(" : "match(") + s + ");");
2501    }
2502
2503    /** Generate the nextToken() rule. nextToken() is a synthetic
2504    * lexer rule that is the implicit OR of all user-defined
2505    * lexer rules.
2506    */

2507    public void genNextToken() {
2508        // Are there any public rules? If not, then just generate a
2509
// fake nextToken().
2510
boolean hasPublicRules = false;
2511        for (int i = 0; i < grammar.rules.size(); i++) {
2512            RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
2513            if ( rs.isDefined() && rs.access.equals("public") ) {
2514                hasPublicRules = true;
2515                break;
2516            }
2517        }
2518        if (!hasPublicRules) {
2519            println("");
2520            println("override public Token nextToken()\t\t\t//throws TokenStreamException");
2521            println("{");
2522            tabs++;
2523            println("try");
2524            println("{");
2525            tabs++;
2526            println("uponEOF();");
2527            tabs--;
2528            println("}");
2529            println("catch(CharStreamIOException csioe)");
2530            println("{");
2531            tabs++;
2532            println("throw new TokenStreamIOException(csioe.io);");
2533            tabs--;
2534            println("}");
2535            println("catch(CharStreamException cse)");
2536            println("{");
2537            tabs++;
2538            println("throw new TokenStreamException(cse.Message);");
2539            tabs--;
2540            println("}");
2541            println("return new CommonToken(Token.EOF_TYPE, \"\");");
2542            tabs--;
2543            println("}");
2544            println("");
2545            return;
2546        }
2547
2548        // Create the synthesized nextToken() rule
2549
RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
2550        // Define the nextToken rule symbol
2551
RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
2552        nextTokenRs.setDefined();
2553        nextTokenRs.setBlock(nextTokenBlk);
2554        nextTokenRs.access = "private";
2555        grammar.define(nextTokenRs);
2556        // Analyze the nextToken rule
2557
boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
2558
2559        // Generate the next token rule
2560
String JavaDoc filterRule=null;
2561        if ( ((LexerGrammar)grammar).filterMode ) {
2562            filterRule = ((LexerGrammar)grammar).filterRule;
2563        }
2564
2565        println("");
2566        println("override public Token nextToken()\t\t\t//throws TokenStreamException");
2567        println("{");
2568        tabs++;
2569        println("Token theRetToken = null;");
2570        _println("tryAgain:");
2571        println("for (;;)");
2572        println("{");
2573        tabs++;
2574        println("Token _token = null;");
2575        println("int _ttype = Token.INVALID_TYPE;");
2576        if ( ((LexerGrammar)grammar).filterMode ) {
2577            println("setCommitToPath(false);");
2578            if ( filterRule!=null ) {
2579                // Here's a good place to ensure that the filter rule actually exists
2580
if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) {
2581                grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
2582                }
2583                else {
2584                    RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule));
2585                    if ( !rs.isDefined() ) {
2586                        grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
2587                    }
2588                    else if ( rs.access.equals("public") ) {
2589                        grammar.antlrTool.error("Filter rule " + filterRule + " must be protected");
2590                    }
2591                }
2592                println("int _m;");
2593                println("_m = mark();");
2594            }
2595        }
2596        println("resetText();");
2597
2598        println("try // for char stream error handling");
2599        println("{");
2600        tabs++;
2601
2602        // Generate try around whole thing to trap scanner errors
2603
println("try // for lexical error handling");
2604        println("{");
2605        tabs++;
2606
2607        // Test for public lexical rules with empty paths
2608
for (int i=0; i<nextTokenBlk.getAlternatives().size(); i++) {
2609            Alternative a = nextTokenBlk.getAlternativeAt(i);
2610            if ( a.cache[1].containsEpsilon() ) {
2611                //String r = a.head.toString();
2612
RuleRefElement rr = (RuleRefElement)a.head;
2613            String JavaDoc r = CodeGenerator.decodeLexerRuleName(rr.targetRule);
2614            antlrTool.warning("public lexical rule "+r+" is optional (can match \"nothing\")");
2615            }
2616        }
2617
2618        // Generate the block
2619
String JavaDoc newline = System.getProperty("line.separator");
2620        CSharpBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
2621        String JavaDoc errFinish = "if (LA(1)==EOF_CHAR) { uponEOF(); returnToken_ = makeToken(Token.EOF_TYPE); }";
2622        errFinish += newline+"\t\t\t\t";
2623        if ( ((LexerGrammar)grammar).filterMode ) {
2624            if ( filterRule==null ) {
2625            //kunle: errFinish += "else { consume(); continue tryAgain; }";
2626
errFinish += "\t\t\t\telse";
2627            errFinish += "\t\t\t\t{";
2628            errFinish += "\t\t\t\t\tconsume();";
2629            errFinish += "\t\t\t\t\tgoto tryAgain;";
2630            errFinish += "\t\t\t\t}";
2631            }
2632            else {
2633                errFinish += "\t\t\t\t\telse"+newline+
2634                    "\t\t\t\t\t{"+newline+
2635                    "\t\t\t\t\tcommit();"+newline+
2636                    "\t\t\t\t\ttry {m"+filterRule+"(false);}"+newline+
2637                    "\t\t\t\t\tcatch(RecognitionException e)"+newline+
2638                    "\t\t\t\t\t{"+newline+
2639                    "\t\t\t\t\t // catastrophic failure"+newline+
2640                    "\t\t\t\t\t reportError(e);"+newline+
2641                    "\t\t\t\t\t consume();"+newline+
2642                    "\t\t\t\t\t}"+newline+
2643                    "\t\t\t\t\tgoto tryAgain;"+newline+
2644                    "\t\t\t\t}";
2645            }
2646        }
2647        else {
2648            errFinish += "else {"+throwNoViable+"}";
2649        }
2650        genBlockFinish(howToFinish, errFinish);
2651
2652        // at this point a valid token has been matched, undo "mark" that was done
2653
if ( ((LexerGrammar)grammar).filterMode && filterRule!=null ) {
2654            println("commit();");
2655        }
2656
2657        // Generate literals test if desired
2658
// make sure _ttype is set first; note returnToken_ must be
2659
// non-null as the rule was required to create it.
2660
println("if ( null==returnToken_ ) goto tryAgain; // found SKIP token");
2661        println("_ttype = returnToken_.Type;");
2662        if ( ((LexerGrammar)grammar).getTestLiterals()) {
2663            genLiteralsTest();
2664        }
2665
2666        // return token created by rule reference in switch
2667
println("returnToken_.Type = _ttype;");
2668        println("return returnToken_;");
2669
2670        // Close try block
2671
tabs--;
2672        println("}");
2673        println("catch (RecognitionException e) {");
2674        tabs++;
2675        if ( ((LexerGrammar)grammar).filterMode ) {
2676            if ( filterRule==null ) {
2677                println("if (!getCommitToPath())");
2678                println("{");
2679                tabs++;
2680                println("consume();");
2681                println("goto tryAgain;");
2682                tabs--;
2683                println("}");
2684            }
2685            else {
2686                println("if (!getCommitToPath())");
2687                println("{");
2688                tabs++;
2689                println("rewind(_m);");
2690                println("resetText();");
2691                println("try {m"+filterRule+"(false);}");
2692                println("catch(RecognitionException ee) {");
2693                println(" // horrendous failure: error in filter rule");
2694                println(" reportError(ee);");
2695                println(" consume();");
2696                println("}");
2697                //println("goto tryAgain;");
2698
tabs--;
2699                println("}");
2700                println("else");
2701            }
2702        }
2703        if ( nextTokenBlk.getDefaultErrorHandler() ) {
2704            println("{");
2705            tabs++;
2706            println("reportError(e);");
2707            println("consume();");
2708            tabs--;
2709            println("}");
2710        }
2711        else {
2712            // pass on to invoking routine
2713
tabs++;
2714            println("throw new TokenStreamRecognitionException(e);");
2715            tabs--;
2716        }
2717        tabs--;
2718        println("}");
2719
2720        // close CharStreamException try
2721
tabs--;
2722        println("}");
2723        println("catch (CharStreamException cse) {");
2724        println(" if ( cse is CharStreamIOException ) {");
2725        println(" throw new TokenStreamIOException(((CharStreamIOException)cse).io);");
2726        println(" }");
2727        println(" else {");
2728        println(" throw new TokenStreamException(cse.Message);");
2729        println(" }");
2730        println("}");
2731
2732        // close for-loop
2733
tabs--;
2734        println("}");
2735
2736        // close method nextToken
2737
tabs--;
2738        println("}");
2739        println("");
2740    }
2741    /** Gen a named rule block.
2742     * ASTs are generated for each element of an alternative unless
2743     * the rule or the alternative have a '!' modifier.
2744     *
2745     * If an alternative defeats the default tree construction, it
2746     * must set <rule>_AST to the root of the returned AST.
2747     *
2748     * Each alternative that does automatic tree construction, builds
2749     * up root and child list pointers in an ASTPair structure.
2750     *
2751     * A rule finishes by setting the returnAST variable from the
2752     * ASTPair.
2753     *
2754     * @param rule The name of the rule to generate
2755     * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
2756    */

2757    public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, TokenManager tm) {
2758        tabs=1;
2759        if ( DEBUG_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")");
2760        if ( !s.isDefined() ) {
2761            antlrTool.error("undefined rule: "+ s.getId());
2762            return;
2763        }
2764
2765        // Generate rule return type, name, arguments
2766
RuleBlock rblk = s.getBlock();
2767        currentRule = rblk;
2768        currentASTResult = s.getId();
2769
2770      // clear list of declared ast variables..
2771
declaredASTVariables.clear();
2772
2773        // Save the AST generation state, and set it to that of the rule
2774
boolean savegenAST = genAST;
2775        genAST = genAST && rblk.getAutoGen();
2776
2777        // boolean oldsaveTest = saveText;
2778
saveText = rblk.getAutoGen();
2779
2780        // print javadoc comment if any
2781
if ( s.comment!=null ) {
2782            _println(s.comment);
2783        }
2784
2785        // Gen method access and final qualifier
2786
//print(s.access + " final ");
2787
print(s.access + " ");
2788
2789        // Gen method return type (note lexer return action set at rule creation)
2790
if (rblk.returnAction != null)
2791        {
2792            // Has specified return value
2793
_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
2794        } else {
2795            // No specified return value
2796
_print("void ");
2797        }
2798
2799        // Gen method name
2800
_print(s.getId() + "(");
2801
2802        // Additional rule parameters common to all rules for this grammar
2803
_print(commonExtraParams);
2804        if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
2805            _print(",");
2806        }
2807
2808        // Gen arguments
2809
if (rblk.argAction != null)
2810        {
2811            // Has specified arguments
2812
_println("");
2813            tabs++;
2814            println(rblk.argAction);
2815            tabs--;
2816            print(")");
2817        }
2818        else {
2819            // No specified arguments
2820
_print(")");
2821        }
2822
2823        // Gen throws clause and open curly
2824
_print(" //throws " + exceptionThrown);
2825        if ( grammar instanceof ParserGrammar ) {
2826            _print(", TokenStreamException");
2827        }
2828        else if ( grammar instanceof LexerGrammar ) {
2829            _print(", CharStreamException, TokenStreamException");
2830        }
2831        // Add user-defined exceptions unless lexer (for now)
2832
if ( rblk.throwsSpec!=null ) {
2833            if ( grammar instanceof LexerGrammar ) {
2834                antlrTool.error("user-defined throws spec not allowed (yet) for lexer rule "+rblk.ruleName);
2835            }
2836            else {
2837                _print(", "+rblk.throwsSpec);
2838            }
2839        }
2840
2841        _println("");
2842        _println("{");
2843        tabs++;
2844
2845        // Convert return action to variable declaration
2846
if (rblk.returnAction != null)
2847            println(rblk.returnAction + ";");
2848
2849        // print out definitions needed by rules for various grammar types
2850
println(commonLocalVars);
2851
2852        if (grammar.traceRules) {
2853            if ( grammar instanceof TreeWalkerGrammar ) {
2854                if ( usingCustomAST )
2855                    println("traceIn(\""+ s.getId() +"\",(AST)_t);");
2856                else
2857                    println("traceIn(\""+ s.getId() +"\",_t);");
2858            }
2859            else {
2860                println("traceIn(\""+ s.getId() +"\");");
2861            }
2862        }
2863
2864        if ( grammar instanceof LexerGrammar ) {
2865            // lexer rule default return value is the rule's token name
2866
// This is a horrible hack to support the built-in EOF lexer rule.
2867
if (s.getId().equals("mEOF"))
2868                println("_ttype = Token.EOF_TYPE;");
2869            else
2870                println("_ttype = " + s.getId().substring(1)+";");
2871
2872            // delay creation of _saveIndex until we need it OK?
2873
bSaveIndexCreated = false;
2874
2875            /*
2876                  println("boolean old_saveConsumedInput=saveConsumedInput;");
2877                  if ( !rblk.getAutoGen() ) { // turn off "save input" if ! on rule
2878                  println("saveConsumedInput=false;");
2879                  }
2880                */

2881        }
2882
2883        // if debugging, write code to mark entry to the rule
2884
if ( grammar.debuggingOutput)
2885            if (grammar instanceof ParserGrammar)
2886                println("fireEnterRule(" + ruleNum + ",0);");
2887            else if (grammar instanceof LexerGrammar)
2888            println("fireEnterRule(" + ruleNum + ",_ttype);");
2889
2890
2891        // Generate trace code if desired
2892
if ( grammar.debuggingOutput || grammar.traceRules) {
2893            println("try { // debugging");
2894            tabs++;
2895        }
2896
2897        // Initialize AST variables
2898
if (grammar instanceof TreeWalkerGrammar) {
2899            // "Input" value for rule
2900
println(labeledElementASTType+" " + s.getId() + "_AST_in = ("+labeledElementASTType+")_t;");
2901        }
2902        if (grammar.buildAST) {
2903            // Parser member used to pass AST returns from rule invocations
2904
println("returnAST = null;");
2905            // Tracks AST construction
2906
// println("ASTPair currentAST = (inputState.guessing==0) ? new ASTPair() : null;");
2907
println("ASTPair currentAST = new ASTPair();");
2908            // User-settable return value for rule.
2909
println(labeledElementASTType+" " + s.getId() + "_AST = null;");
2910        }
2911
2912        genBlockPreamble(rblk);
2913        genBlockInitAction(rblk);
2914        println("");
2915
2916        // Search for an unlabeled exception specification attached to the rule
2917
ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
2918
2919        // Generate try block around the entire rule for error handling
2920
if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
2921            println("try { // for error handling");
2922            tabs++;
2923        }
2924
2925        // Generate the alternatives
2926
if ( rblk.alternatives.size()==1 )
2927        {
2928            // One alternative -- use simple form
2929
Alternative alt = rblk.getAlternativeAt(0);
2930            String JavaDoc pred = alt.semPred;
2931            if ( pred!=null )
2932                genSemPred(pred, currentRule.line);
2933            if (alt.synPred != null) {
2934                antlrTool.warning(
2935                    "Syntactic predicate ignored for single alternative",
2936                    grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn()
2937                    );
2938            }
2939            genAlt(alt, rblk);
2940        }
2941        else
2942        {
2943            // Multiple alternatives -- generate complex form
2944
boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
2945
2946            CSharpBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
2947            genBlockFinish(howToFinish, throwNoViable);
2948        }
2949
2950        // Generate catch phrase for error handling
2951
if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
2952            // Close the try block
2953
tabs--;
2954            println("}");
2955        }
2956
2957        // Generate user-defined or default catch phrases
2958
if (unlabeledUserSpec != null)
2959        {
2960            genErrorHandler(unlabeledUserSpec);
2961        }
2962        else if (rblk.getDefaultErrorHandler())
2963        {
2964            // Generate default catch phrase
2965
println("catch (" + exceptionThrown + " ex)");
2966            println("{");
2967            tabs++;
2968            // Generate code to handle error if not guessing
2969
if (grammar.hasSyntacticPredicate) {
2970                println("if (0 == inputState.guessing)");
2971                println("{");
2972                tabs++;
2973            }
2974            println("reportError(ex);");
2975            if ( !(grammar instanceof TreeWalkerGrammar) )
2976            {
2977                // Generate code to consume until token in k==1 follow set
2978
Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
2979                String JavaDoc followSetName = getBitsetName(markBitsetForGen(follow.fset));
2980                println("consume();");
2981                println("consumeUntil(" + followSetName + ");");
2982            }
2983            else
2984            {
2985                // Just consume one token
2986
println("if (null != _t)");
2987            println("{");
2988            tabs++;
2989            println("_t = _t.getNextSibling();");
2990            tabs--;
2991            println("}");
2992            }
2993            if (grammar.hasSyntacticPredicate)
2994            {
2995                tabs--;
2996                // When guessing, rethrow exception
2997
println("}");
2998                println("else");
2999                println("{");
3000                tabs++;
3001                //println("throw ex;");
3002
println("throw;");
3003                tabs--;
3004                println("}");
3005            }
3006            // Close catch phrase
3007
tabs--;
3008            println("}");
3009        }
3010
3011        // Squirrel away the AST "return" value
3012
if (grammar.buildAST) {
3013            println("returnAST = " + s.getId() + "_AST;");
3014        }
3015
3016        // Set return tree value for tree walkers
3017
if ( grammar instanceof TreeWalkerGrammar ) {
3018            println("retTree_ = _t;");
3019        }
3020
3021        // Generate literals test for lexer rules so marked
3022
if (rblk.getTestLiterals()) {
3023            if ( s.access.equals("protected") ) {
3024                genLiteralsTestForPartialToken();
3025            }
3026            else {
3027                genLiteralsTest();
3028            }
3029        }
3030
3031        // if doing a lexer rule, dump code to create token if necessary
3032
if ( grammar instanceof LexerGrammar ) {
3033            println("if (_createToken && (null == _token) && (_ttype != Token.SKIP))");
3034            println("{");
3035            tabs++;
3036            println("_token = makeToken(_ttype);");
3037            println("_token.setText(text.ToString(_begin, text.Length-_begin));");
3038            tabs--;
3039            println("}");
3040            println("returnToken_ = _token;");
3041        }
3042
3043        // Gen the return statement if there is one (lexer has hard-wired return action)
3044
if (rblk.returnAction != null) {
3045            println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";");
3046        }
3047
3048        if ( grammar.debuggingOutput || grammar.traceRules) {
3049            tabs--;
3050            println("}");
3051            println("finally");
3052            println("{ // debugging");
3053            tabs++;
3054
3055            // If debugging, generate calls to mark exit of rule
3056
if ( grammar.debuggingOutput)
3057                if (grammar instanceof ParserGrammar)
3058                    println("fireExitRule(" + ruleNum + ",0);");
3059                else if (grammar instanceof LexerGrammar)
3060                println("fireExitRule(" + ruleNum + ",_ttype);");
3061
3062            if (grammar.traceRules) {
3063                if ( grammar instanceof TreeWalkerGrammar ) {
3064                    println("traceOut(\""+ s.getId() +"\",_t);");
3065                }
3066                else {
3067                    println("traceOut(\""+ s.getId() +"\");");
3068                }
3069            }
3070
3071            tabs--;
3072            println("}");
3073        }
3074
3075        tabs--;
3076        println("}");
3077        println("");
3078
3079        // Restore the AST generation state
3080
genAST = savegenAST;
3081
3082        // restore char save state
3083
// saveText = oldsaveTest;
3084
}
3085    private void GenRuleInvocation(RuleRefElement rr) {
3086        // dump rule name
3087
_print(rr.targetRule + "(");
3088
3089        // lexers must tell rule if it should set returnToken_
3090
if ( grammar instanceof LexerGrammar ) {
3091            // if labeled, could access Token, so tell rule to create
3092
if ( rr.getLabel() != null ) {
3093                _print("true");
3094            }
3095            else {
3096                _print("false");
3097            }
3098            if (commonExtraArgs.length() != 0 || rr.args!=null ) {
3099                _print(",");
3100            }
3101        }
3102
3103        // Extra arguments common to all rules for this grammar
3104
_print(commonExtraArgs);
3105        if (commonExtraArgs.length() != 0 && rr.args!=null ) {
3106            _print(",");
3107        }
3108
3109        // Process arguments to method, if any
3110
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
3111        if (rr.args != null)
3112        {
3113            // When not guessing, execute user arg action
3114
ActionTransInfo tInfo = new ActionTransInfo();
3115            String JavaDoc args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo);
3116            if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null )
3117            {
3118            antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" +
3119                 currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn());
3120            }
3121            _print(args);
3122
3123            // Warn if the rule accepts no arguments
3124
if (rs.block.argAction == null)
3125            {
3126                antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn());
3127            }
3128        }
3129        else
3130        {
3131            // For C++, no warning if rule has parameters, because there may be default
3132
// values for all of the parameters
3133
if (rs.block.argAction != null)
3134            {
3135                antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn());
3136            }
3137        }
3138        _println(");");
3139
3140        // move down to the first child while parsing
3141
if ( grammar instanceof TreeWalkerGrammar ) {
3142            println("_t = retTree_;");
3143        }
3144    }
3145    protected void genSemPred(String JavaDoc pred, int line) {
3146        // translate $ and # references
3147
ActionTransInfo tInfo = new ActionTransInfo();
3148        pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo);
3149        // ignore translation info...we don't need to do anything with it.
3150
String JavaDoc escapedPred = charFormatter.escapeString(pred);
3151
3152        // if debugging, wrap the semantic predicate evaluation in a method
3153
// that can tell SemanticPredicateListeners the result
3154
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)))
3155            pred = "fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.VALIDATING,"
3156            + addSemPred(escapedPred) + "," + pred + ")";
3157        println("if (!(" + pred + "))");
3158        println(" throw new SemanticException(\"" + escapedPred + "\");");
3159    }
3160    /** Write an array of Strings which are the semantic predicate
3161     * expressions. The debugger will reference them by number only
3162     */

3163    protected void genSemPredMap() {
3164        Enumeration JavaDoc e = semPreds.elements();
3165        println("private string[] _semPredNames = {");
3166        tabs++;
3167        while(e.hasMoreElements())
3168            println("\""+e.nextElement()+"\",");
3169        tabs--;
3170        println("};");
3171    }
3172    protected void genSynPred(SynPredBlock blk, String JavaDoc lookaheadExpr) {
3173        if ( DEBUG_CODE_GENERATOR ) System.out.println("gen=>("+blk+")");
3174
3175        // Dump synpred result variable
3176
println("bool synPredMatched" + blk.ID + " = false;");
3177        // Gen normal lookahead test
3178
println("if (" + lookaheadExpr + ")");
3179        println("{");
3180        tabs++;
3181
3182        // Save input state
3183
if ( grammar instanceof TreeWalkerGrammar ) {
3184            println("AST __t" + blk.ID + " = _t;");
3185        }
3186        else {
3187            println("int _m" + blk.ID + " = mark();");
3188        }
3189
3190        // Once inside the try, assume synpred works unless exception caught
3191
println("synPredMatched" + blk.ID + " = true;");
3192        println("inputState.guessing++;");
3193
3194        // if debugging, tell listeners that a synpred has started
3195
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
3196            (grammar instanceof LexerGrammar))) {
3197            println("fireSyntacticPredicateStarted();");
3198        }
3199
3200        syntacticPredLevel++;
3201        println("try {");
3202        tabs++;
3203        gen((AlternativeBlock)blk); // gen code to test predicate
3204
tabs--;
3205        //println("System.out.println(\"pred "+blk+" succeeded\");");
3206
println("}");
3207        //kunle: lose a few warnings cheaply
3208
// println("catch (" + exceptionThrown + " pe)");
3209
println("catch (" + exceptionThrown + ")");
3210        println("{");
3211        tabs++;
3212        println("synPredMatched"+blk.ID+" = false;");
3213        //println("System.out.println(\"pred "+blk+" failed\");");
3214
tabs--;
3215        println("}");
3216
3217        // Restore input state
3218
if ( grammar instanceof TreeWalkerGrammar ) {
3219            println("_t = __t"+blk.ID+";");
3220        }
3221        else {
3222            println("rewind(_m"+blk.ID+");");
3223        }
3224
3225        println("inputState.guessing--;");
3226
3227        // if debugging, tell listeners how the synpred turned out
3228
if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
3229            (grammar instanceof LexerGrammar))) {
3230            println("if (synPredMatched" + blk.ID +")");
3231            println(" fireSyntacticPredicateSucceeded();");
3232            println("else");
3233            println(" fireSyntacticPredicateFailed();");
3234        }
3235
3236        syntacticPredLevel--;
3237        tabs--;
3238
3239        // Close lookahead test
3240
println("}");
3241
3242        // Test synred result
3243
println("if ( synPredMatched"+blk.ID+" )");
3244        println("{");
3245    }
3246    /** Generate a static array containing the names of the tokens,
3247     * indexed by the token type values. This static array is used
3248     * to format error messages so that the token identifers or literal
3249     * strings are displayed instead of the token numbers.
3250     *
3251     * If a lexical rule has a paraphrase, use it rather than the
3252     * token label.
3253     */

3254    public void genTokenStrings() {
3255        // Generate a string for each token. This creates a static
3256
// array of Strings indexed by token type.
3257
println("");
3258        println("public static readonly string[] tokenNames_ = new string[] {");
3259        tabs++;
3260
3261        // Walk the token vocabulary and generate a Vector of strings
3262
// from the tokens.
3263
Vector v = grammar.tokenManager.getVocabulary();
3264        for (int i = 0; i < v.size(); i++)
3265        {
3266            String JavaDoc s = (String JavaDoc)v.elementAt(i);
3267            if (s == null)
3268            {
3269                s = "<"+String.valueOf(i)+">";
3270            }
3271            if ( !s.startsWith("\"") && !s.startsWith("<") ) {
3272                TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
3273                if ( ts!=null && ts.getParaphrase()!=null ) {
3274                    s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\"");
3275                }
3276            }
3277            else if (s.startsWith("\"")) {
3278                s = StringUtils.stripFrontBack(s, "\"", "\"");
3279            }
3280            print(charFormatter.literalString(s));
3281            if (i != v.size()-1) {
3282                _print(",");
3283            }
3284            _println("");
3285        }
3286
3287        // Close the string array initailizer
3288
tabs--;
3289        println("};");
3290    }
3291    /** Generate the token types CSharp file */
3292    protected void genTokenTypes(TokenManager tm) throws IOException JavaDoc {
3293        // Open the token output CSharp file and set the currentOutput stream
3294
// SAS: file open was moved to a method so a subclass can override
3295
// This was mainly for the VAJ interface
3296
setupOutput(tm.getName() + TokenTypesFileSuffix);
3297
3298        tabs = 0;
3299
3300        // Generate the header common to all CSharp files
3301
genHeader();
3302        // Do not use printAction because we assume tabs==0
3303
println(behavior.getHeaderAction(""));
3304
3305          // Generate the CSharp namespace declaration (if specified)
3306
if (nameSpace != null)
3307            nameSpace.emitDeclarations(currentOutput);
3308        tabs++;
3309
3310        // Encapsulate the definitions in a class. This has to be done as a class because
3311
// they are all constants and CSharp inteface types cannot contain constants.
3312
println("public class " + tm.getName() + TokenTypesFileSuffix);
3313        //println("public class " + getTokenTypesClassName());
3314
println("{");
3315        tabs++;
3316
3317        genTokenDefinitions(tm);
3318
3319        // Close the interface
3320
tabs--;
3321        println("}");
3322
3323        tabs--;
3324        // Generate the CSharp namespace closures (if required)
3325
if (nameSpace != null)
3326            nameSpace.emitClosures(currentOutput);
3327
3328        // Close the tokens output file
3329
currentOutput.close();
3330        currentOutput = null;
3331        exitIfError();
3332    }
3333    protected void genTokenDefinitions(TokenManager tm) throws IOException JavaDoc {
3334        // Generate a definition for each token type
3335
Vector v = tm.getVocabulary();
3336
3337        // Do special tokens manually
3338
println("public const int EOF = " + Token.EOF_TYPE + ";");
3339        println("public const int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";");
3340
3341        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
3342            String JavaDoc s = (String JavaDoc)v.elementAt(i);
3343            if (s != null) {
3344                if ( s.startsWith("\"") ) {
3345                    // a string literal
3346
StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
3347                    if ( sl==null ) {
3348                        antlrTool.panic("String literal " + s + " not in symbol table");
3349                    }
3350                    else if ( sl.label != null ) {
3351                        println("public const int " + sl.label + " = " + i + ";");
3352                    }
3353                    else {
3354                        String JavaDoc mangledName = mangleLiteral(s);
3355                        if (mangledName != null) {
3356                            // We were able to create a meaningful mangled token name
3357
println("public const int " + mangledName + " = " + i + ";");
3358                            // if no label specified, make the label equal to the mangled name
3359
sl.label = mangledName;
3360                        }
3361                        else {
3362                            println("// " + s + " = " + i);
3363                        }
3364                    }
3365                }
3366                else if ( !s.startsWith("<") ) {
3367                    println("public const int " + s + " = " + i + ";");
3368                }
3369            }
3370        }
3371        println("");
3372    }
3373    /** Process a string for an simple expression for use in xx/action.g
3374     * it is used to cast simple tokens/references to the right type for
3375     * the generated language. Basically called for every element in
3376     * the vector to getASTCreateString(vector V)
3377     * @param str A String.
3378     */

3379    public String JavaDoc processStringForASTConstructor( String JavaDoc str )
3380    {
3381        /*
3382        System.out.println("processStringForASTConstructor: str = "+str+
3383                           ", custom = "+(new Boolean(usingCustomAST)).toString()+
3384                           ", tree = "+(new Boolean((grammar instanceof TreeWalkerGrammar))).toString()+
3385                           ", parser = "+(new Boolean((grammar instanceof ParserGrammar))).toString()+
3386                           ", notDefined = "+(new Boolean((!(grammar.tokenManager.tokenDefined(str))))).toString()
3387                           );
3388        */

3389        if( usingCustomAST &&
3390            ( (grammar instanceof TreeWalkerGrammar) ||
3391              (grammar instanceof ParserGrammar) ) &&
3392            !(grammar.tokenManager.tokenDefined(str)) )
3393        {
3394            //System.out.println("processStringForASTConstructor: "+str+" with cast");
3395
return "(AST)"+str;
3396        }
3397        else
3398        {
3399            //System.out.println("processStringForASTConstructor: "+str);
3400
return str;
3401        }
3402    }
3403    /** Get a string for an expression to generate creation of an AST subtree.
3404      * @param v A Vector of String, where each element is an expression
3405      * in the target language yielding an AST node.
3406      */

3407    public String JavaDoc getASTCreateString(Vector v) {
3408        if (v.size() == 0) {
3409            return "";
3410        }
3411        StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
3412        buf.append("("+labeledElementASTType+
3413            ")astFactory.make( (new ASTArray(" + v.size() +
3414            "))");
3415        for (int i = 0; i < v.size(); i++) {
3416            buf.append(".add(" + v.elementAt(i) + ")");
3417        }
3418        buf.append(")");
3419        return buf.toString();
3420    }
3421
3422    /** Get a string for an expression to generate creating of an AST node
3423     * @param atom The grammar node for which you are creating the node
3424     * @param str The arguments to the AST constructor
3425     */

3426    public String JavaDoc getASTCreateString(GrammarAtom atom, String JavaDoc astCtorArgs) {
3427        String JavaDoc astCreateString = "astFactory.create(" + astCtorArgs + ")";
3428
3429        if (atom == null)
3430            return getASTCreateString(astCtorArgs);
3431        else {
3432            if ( atom.getASTNodeType() != null ) {
3433                // this Atom was instantiated from a Token that had an "AST" option - associating
3434
// it with a specific heterogeneous AST type - applied to either:
3435
// 1) it's underlying TokenSymbol (in the "tokens {} section" or,
3436
// 2) a particular token reference in the grammar
3437
//
3438
// For option (1), we simply generate a cast to hetero-AST type
3439
// For option (2), we generate a call to factory.create(Token, ASTNodeType) and cast it too
3440
TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atom.getText());
3441                if ( (ts == null) || (ts.getASTNodeType() != atom.getASTNodeType()) )
3442                    astCreateString = "(" + atom.getASTNodeType() + ") astFactory.create(" + astCtorArgs + ", \"" + atom.getASTNodeType() + "\")";
3443                else if ( (ts != null) && (ts.getASTNodeType() != null) )
3444                    astCreateString = "(" + ts.getASTNodeType() + ") " + astCreateString;
3445            }
3446            else if ( usingCustomAST )
3447                astCreateString = "(" + labeledElementASTType + ") " + astCreateString;
3448        }
3449        return astCreateString;
3450    }
3451
3452    /** Returns a string expression that creates an AST node using the specified
3453     * AST constructor argument string.
3454     * Parses the first (possibly only) argument in the supplied AST ctor argument
3455     * string to obtain the token type -- ctorID.
3456     *
3457     * IF the token type is a valid token symbol AND
3458     * it has an associated AST node type AND
3459     * this is not a #[ID, "T", "ASTType"] constructor
3460     * THEN
3461     * generate a call to factory.create(ID, Text, token.ASTNodeType())
3462     *
3463     * #[ID, "T", "ASTType"] constructors are mapped to astFactory.create(ID, "T", "ASTType")
3464     *
3465     * The supported AST constructor forms are:
3466     * #[ID]
3467     * #[ID, "text"]
3468     * #[ID, "text", ASTclassname] -- introduced in 2.7.2
3469     *
3470     * @param astCtorArgs The arguments to the AST constructor
3471     */

3472    public String JavaDoc getASTCreateString(String JavaDoc astCtorArgs) {
3473        // kunle: 19-Aug-2002
3474
// This AST creation string is almost certainly[*1] a manual tree construction request.
3475
// From the manual [I couldn't read ALL of the code ;-)], this can only be one of:
3476
// 1) #[ID] -- 'astCtorArgs' contains: 'ID' (without quotes) or,
3477
// 2) #[ID, "T"] -- 'astCtorArgs' contains: 'ID, "Text"' (without single quotes) or,
3478
// kunle: 08-Dec-2002 - 2.7.2a6
3479
// 3) #[ID, "T", "ASTTypeName"] -- 'astCtorArgs' contains: 'ID, "T", "ASTTypeName"' (without single quotes)
3480
//
3481
// [*1] In my tests, 'atom' was '== null' only for manual tree construction requests
3482

3483        if ( astCtorArgs==null ) {
3484            astCtorArgs = "";
3485        }
3486        String JavaDoc astCreateString = "astFactory.create(" + astCtorArgs + ")";
3487        String JavaDoc ctorID = astCtorArgs;
3488        String JavaDoc ctorText = null;
3489        int commaIndex;
3490        boolean ctorIncludesCustomType = false; // Is this a #[ID, "t", "ASTType"] constructor?
3491

3492        commaIndex = astCtorArgs.indexOf(',');
3493        if ( commaIndex != -1 ) {
3494            ctorID = astCtorArgs.substring(0, commaIndex); // the 'ID' portion of #[ID, "Text"]
3495
ctorText = astCtorArgs.substring(commaIndex+1, astCtorArgs.length()); // the 'Text' portion of #[ID, "Text"]
3496
commaIndex = ctorText.indexOf(',');
3497            if (commaIndex != -1 ) {
3498                // This is an AST creation of the form: #[ID, "Text", "ASTTypename"]
3499
// Support for this was introduced with 2.7.2a6
3500
// create default type or (since 2.7.2) 3rd arg is classname
3501
ctorIncludesCustomType = true;
3502            }
3503        }
3504        TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ctorID);
3505        if ( (null != ts) && (null != ts.getASTNodeType()) )
3506            astCreateString = "(" + ts.getASTNodeType() + ") " + astCreateString;
3507        else if ( usingCustomAST )
3508            astCreateString = "(" + labeledElementASTType + ") " + astCreateString;
3509
3510        return astCreateString;
3511    }
3512
3513    protected String JavaDoc getLookaheadTestExpression(Lookahead[] look, int k) {
3514        StringBuffer JavaDoc e = new StringBuffer JavaDoc(100);
3515        boolean first = true;
3516
3517        e.append("(");
3518        for (int i = 1; i <= k; i++) {
3519            BitSet p = look[i].fset;
3520            if (!first) {
3521                e.append(") && (");
3522            }
3523            first = false;
3524
3525            // Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
3526
// There is no way to predict what that token would be. Just
3527
// allow anything instead.
3528
if (look[i].containsEpsilon()) {
3529                e.append("true");
3530            } else {
3531                e.append(getLookaheadTestTerm(i, p));
3532            }
3533        }
3534        e.append(")");
3535
3536        return e.toString();
3537    }
3538
3539    /**Generate a lookahead test expression for an alternate. This
3540     * will be a series of tests joined by '&&' and enclosed by '()',
3541     * the number of such tests being determined by the depth of the lookahead.
3542     */

3543    protected String JavaDoc getLookaheadTestExpression(Alternative alt, int maxDepth) {
3544        int depth = alt.lookaheadDepth;
3545        if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
3546            // if the decision is nondeterministic, do the best we can: LL(k)
3547
// any predicates that are around will be generated later.
3548
depth = grammar.maxk;
3549        }
3550
3551        if ( maxDepth==0 ) {
3552            // empty lookahead can result from alt with sem pred
3553
// that can see end of token. E.g., A : {pred}? ('a')? ;
3554
return "( true )";
3555        }
3556        return "(" + getLookaheadTestExpression(alt.cache,depth) + ")";
3557    }
3558
3559    /**Generate a depth==1 lookahead test expression given the BitSet.
3560     * This may be one of:
3561     * 1) a series of 'x==X||' tests
3562     * 2) a range test using >= && <= where possible,
3563     * 3) a bitset membership test for complex comparisons
3564     * @param k The lookahead level
3565     * @param p The lookahead set for level k
3566     */

3567    protected String JavaDoc getLookaheadTestTerm(int k, BitSet p) {
3568        // Determine the name of the item to be compared
3569
String JavaDoc ts = lookaheadString(k);
3570
3571        // Generate a range expression if possible
3572
int[] elems = p.toArray();
3573        if (elementsAreRange(elems)) {
3574            return getRangeExpression(k, elems);
3575        }
3576
3577        // Generate a bitset membership test if possible
3578
StringBuffer JavaDoc e;
3579        int degree = p.degree();
3580        if ( degree == 0 ) {
3581            return "true";
3582        }
3583
3584        if (degree >= bitsetTestThreshold) {
3585            int bitsetIdx = markBitsetForGen(p);
3586            return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
3587        }
3588
3589        // Otherwise, generate the long-winded series of "x==X||" tests
3590
e = new StringBuffer JavaDoc();
3591        for (int i = 0; i < elems.length; i++) {
3592            // Get the compared-to item (token or character value)
3593
String JavaDoc cs = getValueString(elems[i]);
3594
3595            // Generate the element comparison
3596
if ( i>0 ) e.append("||");
3597            e.append(ts);
3598            e.append("==");
3599            e.append(cs);
3600        }
3601        return e.toString();
3602    }
3603
3604    /** Return an expression for testing a contiguous renage of elements
3605     * @param k The lookahead level
3606     * @param elems The elements representing the set, usually from BitSet.toArray().
3607     * @return String containing test expression.
3608     */

3609    public String JavaDoc getRangeExpression(int k, int[] elems) {
3610        if (!elementsAreRange(elems)) {
3611            antlrTool.panic("getRangeExpression called with non-range");
3612        }
3613        int begin = elems[0];
3614        int end = elems[elems.length-1];
3615
3616        return
3617            "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " +
3618            lookaheadString(k) + " <= " + getValueString(end) + ")";
3619    }
3620
3621    /** getValueString: get a string representation of a token or char value
3622     * @param value The token or char value
3623     */

3624    private String JavaDoc getValueString(int value) {
3625        String JavaDoc cs;
3626        if ( grammar instanceof LexerGrammar ) {
3627            cs = charFormatter.literalChar(value);
3628        }
3629        else
3630        {
3631            TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value);
3632            if ( ts == null ) {
3633                return ""+value; // return token type as string
3634
// antlrTool.panic("vocabulary for token type " + value + " is null");
3635
}
3636            String JavaDoc tId = ts.getId();
3637            if ( ts instanceof StringLiteralSymbol ) {
3638                // if string literal, use predefined label if any
3639
// if no predefined, try to mangle into LITERAL_xxx.
3640
// if can't mangle, use int value as last resort
3641
StringLiteralSymbol sl = (StringLiteralSymbol)ts;
3642                String JavaDoc label = sl.getLabel();
3643                if ( label!=null ) {
3644                    cs = label;
3645                }
3646                else {
3647                    cs = mangleLiteral(tId);
3648                    if (cs == null) {
3649                        cs = String.valueOf(value);
3650                    }
3651                }
3652            }
3653            else {
3654                cs = tId;
3655            }
3656        }
3657        return cs;
3658    }
3659
3660    /**Is the lookahead for this alt empty? */
3661    protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
3662        int depth = alt.lookaheadDepth;
3663        if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
3664            depth = grammar.maxk;
3665        }
3666        for (int i=1; i<=depth && i<=maxDepth; i++) {
3667            BitSet p = alt.cache[i].fset;
3668            if (p.degree() != 0) {
3669                return false;
3670            }
3671        }
3672        return true;
3673    }
3674
3675    private String JavaDoc lookaheadString(int k) {
3676        if (grammar instanceof TreeWalkerGrammar) {
3677            return "_t.Type";
3678        }
3679        return "LA(" + k + ")";
3680    }
3681
3682    /** Mangle a string literal into a meaningful token name. This is
3683      * only possible for literals that are all characters. The resulting
3684      * mangled literal name is literalsPrefix with the text of the literal
3685      * appended.
3686      * @return A string representing the mangled literal, or null if not possible.
3687      */

3688    private String JavaDoc mangleLiteral(String JavaDoc s) {
3689        String JavaDoc mangled = antlrTool.literalsPrefix;
3690        for (int i = 1; i < s.length()-1; i++) {
3691            if (!Character.isLetter(s.charAt(i)) &&
3692                s.charAt(i) != '_') {
3693                return null;
3694            }
3695            mangled += s.charAt(i);
3696        }
3697        if ( antlrTool.upperCaseMangledLiterals ) {
3698            mangled = mangled.toUpperCase();
3699        }
3700        return mangled;
3701    }
3702
3703    /** Map an identifier to it's corresponding tree-node variable.
3704      * This is context-sensitive, depending on the rule and alternative
3705      * being generated
3706      * @param idParam The identifier name to map
3707      * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
3708      */

3709    public String JavaDoc mapTreeId(String JavaDoc idParam, ActionTransInfo transInfo) {
3710        // if not in an action of a rule, nothing to map.
3711
if ( currentRule==null ) return idParam;
3712
3713        boolean in_var = false;
3714        String JavaDoc id = idParam;
3715        if (grammar instanceof TreeWalkerGrammar)
3716        {
3717            if ( !grammar.buildAST )
3718            {
3719                in_var = true;
3720            }
3721            // If the id ends with "_in", then map it to the input variable
3722
else if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3)
3723            {
3724                // Strip off the "_in"
3725
id = id.substring(0, id.length()-3);
3726                in_var = true;
3727            }
3728        }
3729
3730        // Check the rule labels. If id is a label, then the output
3731
// variable is label_AST, and the input variable is plain label.
3732
for (int i = 0; i < currentRule.labeledElements.size(); i++)
3733        {
3734            AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
3735            if (elt.getLabel().equals(id))
3736            {
3737                return in_var ? id : id + "_AST";
3738            }
3739        }
3740
3741        // Failing that, check the id-to-variable map for the alternative.
3742
// If the id is in the map, then output variable is the name in the
3743
// map, and input variable is name_in
3744
String JavaDoc s = (String JavaDoc)treeVariableMap.get(id);
3745        if (s != null)
3746        {
3747            if (s == NONUNIQUE)
3748            {
3749                // There is more than one element with this id
3750
antlrTool.error("Ambiguous reference to AST element "+id+
3751                                " in rule "+currentRule.getRuleName());
3752                return null;
3753            }
3754            else if (s.equals(currentRule.getRuleName()))
3755            {
3756                // a recursive call to the enclosing rule is
3757
// ambiguous with the rule itself.
3758
// if( in_var )
3759
// System.out.println("returning null (rulename)");
3760
antlrTool.error("Ambiguous reference to AST element "+id+
3761                                " in rule "+currentRule.getRuleName());
3762                return null;
3763            }
3764            else
3765            {
3766                return in_var ? s + "_in" : s;
3767            }
3768        }
3769
3770        // Failing that, check the rule name itself. Output variable
3771
// is rule_AST; input variable is rule_AST_in (treeparsers).
3772
if( id.equals(currentRule.getRuleName()) )
3773        {
3774            String JavaDoc r = in_var ? id + "_AST_in" : id + "_AST";
3775            if ( transInfo!=null ) {
3776                if ( !in_var ) {
3777                    transInfo.refRuleRoot = r;
3778                }
3779            }
3780            return r;
3781        }
3782        else
3783        {
3784            // id does not map to anything -- return itself.
3785
return id;
3786        }
3787    }
3788
3789    /** Given an element and the name of an associated AST variable,
3790      * create a mapping between the element "name" and the variable name.
3791      */

3792    private void mapTreeVariable(AlternativeElement e, String JavaDoc name)
3793    {
3794        // For tree elements, defer to the root
3795
if (e instanceof TreeElement) {
3796            mapTreeVariable( ((TreeElement)e).root, name);
3797            return;
3798        }
3799
3800        // Determine the name of the element, if any, for mapping purposes
3801
String JavaDoc elName = null;
3802
3803        // Don't map labeled items
3804
if (e.getLabel() == null) {
3805            if (e instanceof TokenRefElement) {
3806                // use the token id
3807
elName = ((TokenRefElement)e).atomText;
3808            }
3809            else if (e instanceof RuleRefElement) {
3810                // use the rule name
3811
elName = ((RuleRefElement)e).targetRule;
3812            }
3813        }
3814        // Add the element to the tree variable map if it has a name
3815
if (elName != null) {
3816            if (treeVariableMap.get(elName) != null) {
3817                // Name is already in the map -- mark it as duplicate
3818
treeVariableMap.remove(elName);
3819                treeVariableMap.put(elName, NONUNIQUE);
3820            }
3821            else {
3822                treeVariableMap.put(elName, name);
3823            }
3824        }
3825    }
3826
3827    /** Lexically process tree-specifiers in the action.
3828     * This will replace #id and #(...) with the appropriate
3829     * function calls and/or variables.
3830     */

3831    protected String JavaDoc processActionForSpecialSymbols(String JavaDoc actionStr,
3832                                                    int line,
3833                                                    RuleBlock currentRule,
3834                                                    ActionTransInfo tInfo)
3835    {
3836        if ( actionStr==null || actionStr.length()==0 )
3837            return null;
3838
3839        // The action trans info tells us (at the moment) whether an
3840
// assignment was done to the rule's tree root.
3841
if (grammar==null)
3842            return actionStr;
3843
3844        // see if we have anything to do...
3845
if ((grammar.buildAST && actionStr.indexOf('#') != -1) ||
3846            grammar instanceof TreeWalkerGrammar ||
3847            ((grammar instanceof LexerGrammar ||
3848            grammar instanceof ParserGrammar)
3849                && actionStr.indexOf('$') != -1) )
3850        {
3851            // Create a lexer to read an action and return the translated version
3852
persistence.antlr.actions.csharp.ActionLexer lexer = new persistence.antlr.actions.csharp.ActionLexer(actionStr, currentRule, this, tInfo);
3853
3854            lexer.setLineOffset(line);
3855            lexer.setFilename(grammar.getFilename());
3856            lexer.setTool(antlrTool);
3857
3858            try {
3859                lexer.mACTION(true);
3860                actionStr = lexer.getTokenObject().getText();
3861                // System.out.println("action translated: "+actionStr);
3862
// System.out.println("trans info is "+tInfo);
3863
}
3864            catch (RecognitionException ex) {
3865                lexer.reportError(ex);
3866                return actionStr;
3867            }
3868            catch (TokenStreamException tex) {
3869                antlrTool.panic("Error reading action:"+actionStr);
3870                return actionStr;
3871            }
3872            catch (CharStreamException io) {
3873                antlrTool.panic("Error reading action:"+actionStr);
3874                return actionStr;
3875            }
3876        }
3877        return actionStr;
3878    }
3879
3880    private void setupGrammarParameters(Grammar g) {
3881        if (g instanceof ParserGrammar ||
3882             g instanceof LexerGrammar ||
3883             g instanceof TreeWalkerGrammar
3884            )
3885        {
3886            /* RK: options also have to be added to Grammar.java and for options
3887             * on the file level entries have to be defined in
3888             * DefineGrammarSymbols.java and passed around via 'globals' in antlrTool.java
3889             */

3890            if( antlrTool.nameSpace != null )
3891                nameSpace = new CSharpNameSpace( antlrTool.nameSpace.getName() );
3892            //genHashLines = antlrTool.genHashLines;
3893

3894            /* let grammar level options override filelevel ones...
3895             */

3896            if( g.hasOption("namespace") ) {
3897                Token t = g.getOption("namespace");
3898                if( t != null ) {
3899                    nameSpace = new CSharpNameSpace(t.getText());
3900                }
3901            }
3902            /*
3903            if( g.hasOption("genHashLines") ) {
3904                Token t = g.getOption("genHashLines");
3905                if( t != null ) {
3906                    String val = StringUtils.stripFrontBack(t.getText(),"\"","\"");
3907                    genHashLines = val.equals("true");
3908                }
3909            }
3910            */

3911        }
3912
3913        if (g instanceof ParserGrammar) {
3914            labeledElementASTType = "AST";
3915            if ( g.hasOption("ASTLabelType") ) {
3916                Token tsuffix = g.getOption("ASTLabelType");
3917                if ( tsuffix != null ) {
3918                    String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
3919                    if ( suffix != null ) {
3920                        usingCustomAST = true;
3921                        labeledElementASTType = suffix;
3922                    }
3923                }
3924            }
3925            labeledElementType = "Token ";
3926            labeledElementInit = "null";
3927            commonExtraArgs = "";
3928            commonExtraParams = "";
3929            commonLocalVars = "";
3930            lt1Value = "LT(1)";
3931            exceptionThrown = "RecognitionException";
3932            throwNoViable = "throw new NoViableAltException(LT(1), getFilename());";
3933        }
3934        else if (g instanceof LexerGrammar) {
3935            labeledElementType = "char ";
3936            labeledElementInit = "'\\0'";
3937            commonExtraArgs = "";
3938            commonExtraParams = "bool _createToken";
3939            commonLocalVars = "int _ttype; Token _token=null; int _begin=text.Length;";
3940            lt1Value = "LA(1)";
3941            exceptionThrown = "RecognitionException";
3942            throwNoViable = "throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());";
3943        }
3944        else if (g instanceof TreeWalkerGrammar) {
3945            labeledElementASTType = "AST";
3946            labeledElementType = "AST";
3947            if ( g.hasOption("ASTLabelType") ) {
3948                Token tsuffix = g.getOption("ASTLabelType");
3949                if ( tsuffix != null ) {
3950                    String JavaDoc suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
3951                    if ( suffix != null ) {
3952                        usingCustomAST = true;
3953                        labeledElementASTType = suffix;
3954                        labeledElementType = suffix;
3955                    }
3956                }
3957            }
3958            if ( !g.hasOption("ASTLabelType") ) {
3959                g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,"AST"));
3960            }
3961            labeledElementInit = "null";
3962            commonExtraArgs = "_t";
3963            commonExtraParams = "AST _t";
3964            commonLocalVars = "";
3965            if (usingCustomAST)
3966                lt1Value = "(_t==ASTNULL) ? null : (" + labeledElementASTType + ")_t";
3967            else
3968                lt1Value = "_t";
3969            exceptionThrown = "RecognitionException";
3970            throwNoViable = "throw new NoViableAltException(_t);";
3971        }
3972        else {
3973            antlrTool.panic("Unknown grammar type");
3974        }
3975    }
3976
3977    /** This method exists so a subclass, namely VAJCodeGenerator,
3978     * can open the file in its own evil way. JavaCodeGenerator
3979     * simply opens a text file...
3980     */

3981    public void setupOutput(String JavaDoc className) throws IOException JavaDoc
3982    {
3983        currentOutput = antlrTool.openOutputFile(className + ".cs");
3984    }
3985
3986    /** Helper method from Eric Smith's version of CSharpCodeGenerator.*/
3987    private static String JavaDoc OctalToUnicode(String JavaDoc str)
3988    {
3989        // only do any conversion if the string looks like "'\003'"
3990
if ( (4 <= str.length()) &&
3991            ('\'' == str.charAt(0)) &&
3992            ('\\' == str.charAt(1)) &&
3993            (('0' <= str.charAt(2)) && ('7' >= str.charAt(2))) &&
3994            ('\'' == str.charAt(str.length()-1)) )
3995        {
3996            // convert octal representation to decimal, then to hex
3997
Integer JavaDoc x = Integer.valueOf(str.substring(2, str.length()-1), 8);
3998
3999            return "'\\x" + Integer.toHexString(x.intValue()) + "'";
4000        }
4001        else {
4002            return str;
4003        }
4004    }
4005
4006    /** Helper method that returns the name of the interface/class/enum type for
4007        token type constants.
4008     */

4009    public String JavaDoc getTokenTypesClassName()
4010    {
4011        TokenManager tm = grammar.tokenManager;
4012        return new String JavaDoc(tm.getName() + TokenTypesFileSuffix);
4013    }
4014
4015    private void declareSaveIndexVariableIfNeeded()
4016    {
4017        if (!bSaveIndexCreated)
4018        {
4019            println("int _saveIndex = 0;");
4020            bSaveIndexCreated = true;
4021        }
4022    }
4023}
4024
Popular Tags