KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > persistence > antlr > DocBookCodeGenerator


1 package persistence.antlr;
2
3 /* ANTLR Translator Generator
4  * Project led by Terence Parr at http://www.jGuru.com
5  * Software rights: http://www.antlr.org/license.html
6  *
7  */

8
9 /** TODO: strip comments from javadoc entries
10  */

11
12 import java.util.Enumeration JavaDoc;
13
14 import persistence.antlr.collections.impl.BitSet;
15 import persistence.antlr.collections.impl.Vector;
16
17 import java.io.PrintWriter JavaDoc; //SAS: changed for proper text file io
18
import java.io.IOException JavaDoc;
19 import java.io.FileWriter JavaDoc;
20
21 /**Generate P.sgml, a cross-linked representation of P with or without actions */
22 public class DocBookCodeGenerator extends CodeGenerator {
23     /** non-zero if inside syntactic predicate generation */
24     protected int syntacticPredLevel = 0;
25
26     /** true during lexer generation, false during parser generation */
27     protected boolean doingLexRules = false;
28
29     protected boolean firstElementInAlt;
30
31     protected AlternativeElement prevAltElem = null; // what was generated last?
32

33     /** Create a Diagnostic code-generator using the given Grammar
34      * The caller must still call setTool, setBehavior, and setAnalyzer
35      * before generating code.
36      */

37     public DocBookCodeGenerator() {
38         super();
39         charFormatter = new JavaCharFormatter();
40     }
41
42     /** Encode a string for printing in a HTML document..
43      * e.g. encode '<' '>' and similar stuff
44      * @param s the string to encode
45      */

46     static String JavaDoc HTMLEncode(String JavaDoc s) {
47         StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
48
49         for (int i = 0, len = s.length(); i < len; i++) {
50             char c = s.charAt(i);
51             if (c == '&')
52                 buf.append("&amp;");
53             else if (c == '\"')
54                 buf.append("&quot;");
55             else if (c == '\'')
56                 buf.append("&#039;");
57             else if (c == '<')
58                 buf.append("&lt;");
59             else if (c == '>')
60                 buf.append("&gt;");
61             else
62                 buf.append(c);
63         }
64         return buf.toString();
65     }
66
67     /** Encode a string for printing in a HTML document..
68      * e.g. encode '<' '>' and similar stuff
69      * @param s the string to encode
70      */

71     static String JavaDoc QuoteForId(String JavaDoc s) {
72         StringBuffer JavaDoc buf = new StringBuffer JavaDoc();
73
74         for (int i = 0, len = s.length(); i < len; i++) {
75             char c = s.charAt(i);
76             if (c == '_')
77                 buf.append(".");
78             else
79                 buf.append(c);
80         }
81         return buf.toString();
82     }
83
84     public void gen() {
85         // Do the code generation
86
try {
87             // Loop over all grammars
88
Enumeration JavaDoc grammarIter = behavior.grammars.elements();
89             while (grammarIter.hasMoreElements()) {
90                 Grammar g = (Grammar)grammarIter.nextElement();
91
92                 // Connect all the components to each other
93
/*
94                 g.setGrammarAnalyzer(analyzer);
95                 analyzer.setGrammar(g);
96                 */

97                 g.setCodeGenerator(this);
98
99                 // To get right overloading behavior across hetrogeneous grammars
100
g.generate();
101
102                 if (antlrTool.hasError()) {
103                     antlrTool.fatalError("Exiting due to errors.");
104                 }
105
106             }
107
108         }
109         catch (IOException JavaDoc e) {
110             antlrTool.reportException(e, null);
111         }
112     }
113
114     /** Generate code for the given grammar element.
115      * @param blk The {...} action to generate
116      */

117     public void gen(ActionElement action) {
118         // no-op
119
}
120
121     /** Generate code for the given grammar element.
122      * @param blk The "x|y|z|..." block to generate
123      */

124     public void gen(AlternativeBlock blk) {
125         genGenericBlock(blk, "");
126     }
127
128     /** Generate code for the given grammar element.
129      * @param blk The block-end element to generate. Block-end
130      * elements are synthesized by the grammar parser to represent
131      * the end of a block.
132      */

133     public void gen(BlockEndElement end) {
134         // no-op
135
}
136
137     /** Generate code for the given grammar element.
138      * @param blk The character literal reference to generate
139      */

140     public void gen(CharLiteralElement atom) {
141         if (atom.not) {
142             _print("~");
143         }
144         _print(HTMLEncode(atom.atomText) + " ");
145     }
146
147     /** Generate code for the given grammar element.
148      * @param blk The character-range reference to generate
149      */

150     public void gen(CharRangeElement r) {
151         print(r.beginText + ".." + r.endText + " ");
152     }
153
154     /** Generate the lexer HTML file */
155     public void gen(LexerGrammar g) throws IOException JavaDoc {
156         setGrammar(g);
157         antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt);
158         currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt);
159         //SAS: changed for proper text file io
160

161         tabs = 0;
162         doingLexRules = true;
163
164         // Generate header common to all TXT output files
165
genHeader();
166
167         // Output the user-defined lexer premamble
168
// RK: guess not..
169
// println(grammar.preambleAction.getText());
170

171         // Generate lexer class definition
172
println("");
173
174         // print javadoc comment if any
175
if (grammar.comment != null) {
176             _println(HTMLEncode(grammar.comment));
177         }
178
179         println("<para>Definition of lexer " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".</para>");
180
181         // Generate user-defined parser class members
182
// printAction(grammar.classMemberAction.getText());
183

184         /*
185         // Generate string literals
186         println("");
187         println("*** String literals used in the parser");
188         println("The following string literals were used in the parser.");
189         println("An actual code generator would arrange to place these literals");
190         println("into a table in the generated lexer, so that actions in the");
191         println("generated lexer could match token text against the literals.");
192         println("String literals used in the lexer are not listed here, as they");
193         println("are incorporated into the mainstream lexer processing.");
194         tabs++;
195         // Enumerate all of the symbols and look for string literal symbols
196         Enumeration ids = grammar.getSymbols();
197         while ( ids.hasMoreElements() ) {
198             GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
199             // Only processing string literals -- reject other symbol entries
200             if ( sym instanceof StringLiteralSymbol ) {
201                 StringLiteralSymbol s = (StringLiteralSymbol)sym;
202                 println(s.getId() + " = " + s.getTokenType());
203             }
204         }
205         tabs--;
206         println("*** End of string literals used by the parser");
207         */

208
209         // Generate nextToken() rule.
210
// nextToken() is a synthetic lexer rule that is the implicit OR of all
211
// user-defined lexer rules.
212
genNextToken();
213
214         // Generate code for each rule in the lexer
215

216         Enumeration JavaDoc ids = grammar.rules.elements();
217         while (ids.hasMoreElements()) {
218             RuleSymbol rs = (RuleSymbol)ids.nextElement();
219             if (!rs.id.equals("mnextToken")) {
220                 genRule(rs);
221             }
222         }
223
224         // Close the lexer output file
225
currentOutput.close();
226         currentOutput = null;
227         doingLexRules = false;
228     }
229
230     /** Generate code for the given grammar element.
231      * @param blk The (...)+ block to generate
232      */

233     public void gen(OneOrMoreBlock blk) {
234         genGenericBlock(blk, "+");
235     }
236
237     /** Generate the parser HTML file */
238     public void gen(ParserGrammar g) throws IOException JavaDoc {
239         setGrammar(g);
240         // Open the output stream for the parser and set the currentOutput
241
antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml");
242         currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml");
243
244         tabs = 0;
245
246         // Generate the header common to all output files.
247
genHeader();
248
249         // Generate parser class definition
250
println("");
251
252         // print javadoc comment if any
253
if (grammar.comment != null) {
254             _println(HTMLEncode(grammar.comment));
255         }
256
257         println("<para>Definition of parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".</para>");
258
259         // Enumerate the parser rules
260
Enumeration JavaDoc rules = grammar.rules.elements();
261         while (rules.hasMoreElements()) {
262             println("");
263             // Get the rules from the list and downcast it to proper type
264
GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
265             // Only process parser rules
266
if (sym instanceof RuleSymbol) {
267                 genRule((RuleSymbol)sym);
268             }
269         }
270         tabs--;
271         println("");
272
273         genTail();
274
275         // Close the parser output stream
276
currentOutput.close();
277         currentOutput = null;
278     }
279
280     /** Generate code for the given grammar element.
281      * @param blk The rule-reference to generate
282      */

283     public void gen(RuleRefElement rr) {
284         RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
285
286         // Generate the actual rule description
287
_print("<link linkend=\"" + QuoteForId(rr.targetRule) + "\">");
288         _print(rr.targetRule);
289         _print("</link>");
290         // RK: Leave out args..
291
// if (rr.args != null) {
292
// _print("["+rr.args+"]");
293
// }
294
_print(" ");
295     }
296
297     /** Generate code for the given grammar element.
298      * @param blk The string-literal reference to generate
299      */

300     public void gen(StringLiteralElement atom) {
301         if (atom.not) {
302             _print("~");
303         }
304         _print(HTMLEncode(atom.atomText));
305         _print(" ");
306     }
307
308     /** Generate code for the given grammar element.
309      * @param blk The token-range reference to generate
310      */

311     public void gen(TokenRangeElement r) {
312         print(r.beginText + ".." + r.endText + " ");
313     }
314
315     /** Generate code for the given grammar element.
316      * @param blk The token-reference to generate
317      */

318     public void gen(TokenRefElement atom) {
319         if (atom.not) {
320             _print("~");
321         }
322         _print(atom.atomText);
323         _print(" ");
324     }
325
326     public void gen(TreeElement t) {
327         print(t + " ");
328     }
329
330     /** Generate the tree-walker TXT file */
331     public void gen(TreeWalkerGrammar g) throws IOException JavaDoc {
332         setGrammar(g);
333         // Open the output stream for the parser and set the currentOutput
334
antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml");
335         currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml");
336         //SAS: changed for proper text file io
337

338         tabs = 0;
339
340         // Generate the header common to all output files.
341
genHeader();
342
343         // Output the user-defined parser premamble
344
println("");
345 // println("*** Tree-walker Preamble Action.");
346
// println("This action will appear before the declaration of your tree-walker class:");
347
// tabs++;
348
// println(grammar.preambleAction.getText());
349
// tabs--;
350
// println("*** End of tree-walker Preamble Action");
351

352         // Generate tree-walker class definition
353
println("");
354
355         // print javadoc comment if any
356
if (grammar.comment != null) {
357             _println(HTMLEncode(grammar.comment));
358         }
359
360         println("<para>Definition of tree parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".</para>");
361
362         // Generate user-defined tree-walker class members
363
// println("");
364
// println("*** User-defined tree-walker class members:");
365
// println("These are the member declarations that you defined for your class:");
366
// tabs++;
367
// printAction(grammar.classMemberAction.getText());
368
// tabs--;
369
// println("*** End of user-defined tree-walker class members");
370

371         // Generate code for each rule in the grammar
372
println("");
373 // println("*** tree-walker rules:");
374
tabs++;
375
376         // Enumerate the tree-walker rules
377
Enumeration JavaDoc rules = grammar.rules.elements();
378         while (rules.hasMoreElements()) {
379             println("");
380             // Get the rules from the list and downcast it to proper type
381
GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
382             // Only process tree-walker rules
383
if (sym instanceof RuleSymbol) {
384                 genRule((RuleSymbol)sym);
385             }
386         }
387         tabs--;
388         println("");
389 // println("*** End of tree-walker rules");
390

391 // println("");
392
// println("*** End of tree-walker");
393

394         // Close the tree-walker output stream
395
currentOutput.close();
396         currentOutput = null;
397     }
398
399     /** Generate a wildcard element */
400     public void gen(WildcardElement wc) {
401         /*
402         if ( wc.getLabel()!=null ) {
403             _print(wc.getLabel()+"=");
404         }
405         */

406         _print(". ");
407     }
408
409     /** Generate code for the given grammar element.
410      * @param blk The (...)* block to generate
411      */

412     public void gen(ZeroOrMoreBlock blk) {
413         genGenericBlock(blk, "*");
414     }
415
416     protected void genAlt(Alternative alt) {
417         if (alt.getTreeSpecifier() != null) {
418             _print(alt.getTreeSpecifier().getText());
419         }
420         prevAltElem = null;
421         for (AlternativeElement elem = alt.head;
422              !(elem instanceof BlockEndElement);
423              elem = elem.next) {
424             elem.generate();
425             firstElementInAlt = false;
426             prevAltElem = elem;
427         }
428     }
429     /** Generate the header for a block, which may be a RuleBlock or a
430      * plain AlternativeBLock. This generates any variable declarations,
431      * init-actions, and syntactic-predicate-testing variables.
432      * @blk The block for which the preamble is to be generated.
433      */

434 // protected void genBlockPreamble(AlternativeBlock blk) {
435
// RK: don't dump out init actions
436
// dump out init action
437
// if ( blk.initAction!=null ) {
438
// printAction("{" + blk.initAction + "}");
439
// }
440
// }
441
/** Generate common code for a block of alternatives; return a postscript
442      * that needs to be generated at the end of the block. Other routines
443      * may append else-clauses and such for error checking before the postfix
444      * is generated.
445      */

446     public void genCommonBlock(AlternativeBlock blk) {
447         if (blk.alternatives.size() > 1)
448             println("<itemizedlist mark=\"none\">");
449         for (int i = 0; i < blk.alternatives.size(); i++) {
450             Alternative alt = blk.getAlternativeAt(i);
451             AlternativeElement elem = alt.head;
452
453             if (blk.alternatives.size() > 1)
454                 print("<listitem><para>");
455
456             // dump alt operator |
457
if (i > 0 && blk.alternatives.size() > 1) {
458                 _print("| ");
459             }
460
461             // Dump the alternative, starting with predicates
462
//
463
boolean save = firstElementInAlt;
464             firstElementInAlt = true;
465             tabs++; // in case we do a newline in alt, increase the tab indent
466

467             genAlt(alt);
468             tabs--;
469             firstElementInAlt = save;
470             if (blk.alternatives.size() > 1)
471                 _println("</para></listitem>");
472         }
473         if (blk.alternatives.size() > 1)
474             println("</itemizedlist>");
475     }
476
477     /** Generate a textual representation of the follow set
478      * for a block.
479      * @param blk The rule block of interest
480      */

481     public void genFollowSetForRuleBlock(RuleBlock blk) {
482         Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
483         printSet(grammar.maxk, 1, follow);
484     }
485
486     protected void genGenericBlock(AlternativeBlock blk, String JavaDoc blkOp) {
487         if (blk.alternatives.size() > 1) {
488             // make sure we start on a new line
489
_println("");
490             if (!firstElementInAlt) {
491                 // only do newline if the last element wasn't a multi-line block
492
//if ( prevAltElem==null ||
493
// !(prevAltElem instanceof AlternativeBlock) ||
494
// ((AlternativeBlock)prevAltElem).alternatives.size()==1 )
495
//{
496
_println("(");
497                 //}
498
//else
499
//{
500
// _print("(");
501
//}
502
// _println("");
503
// print("(\t");
504
}
505             else {
506                 _print("(");
507             }
508         }
509         else {
510             _print("( ");
511         }
512         // RK: don't dump init actions
513
// genBlockPreamble(blk);
514
genCommonBlock(blk);
515         if (blk.alternatives.size() > 1) {
516             _println("");
517             print(")" + blkOp + " ");
518             // if not last element of alt, need newline & to indent
519
if (!(blk.next instanceof BlockEndElement)) {
520                 _println("");
521                 print("");
522             }
523         }
524         else {
525             _print(")" + blkOp + " ");
526         }
527     }
528
529     /** Generate a header that is common to all TXT files */
530     protected void genHeader() {
531         println("<?xml version=\"1.0\" standalone=\"no\"?>");
532         println("<!DOCTYPE book PUBLIC \"-//OASIS//DTD DocBook V3.1//EN\">");
533         println("<book lang=\"en\">");
534         println("<bookinfo>");
535         println("<title>Grammar " + grammar.getClassName() + "</title>");
536         println(" <author>");
537         println(" <firstname></firstname>");
538         println(" <othername></othername>");
539         println(" <surname></surname>");
540         println(" <affiliation>");
541         println(" <address>");
542         println(" <email></email>");
543         println(" </address>");
544         println(" </affiliation>");
545         println(" </author>");
546         println(" <othercredit>");
547         println(" <contrib>");
548         println(" Generated by <ulink url=\"http://www.ANTLR.org/\">ANTLR</ulink>" + antlrTool.version);
549         println(" from " + antlrTool.grammarFile);
550         println(" </contrib>");
551         println(" </othercredit>");
552         println(" <pubdate></pubdate>");
553         println(" <abstract>");
554         println(" <para>");
555         println(" </para>");
556         println(" </abstract>");
557         println("</bookinfo>");
558         println("<chapter>");
559         println("<title></title>");
560     }
561
562     /**Generate the lookahead set for an alternate. */
563     protected void genLookaheadSetForAlt(Alternative alt) {
564         if (doingLexRules && alt.cache[1].containsEpsilon()) {
565             println("MATCHES ALL");
566             return;
567         }
568         int depth = alt.lookaheadDepth;
569         if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
570             // if the decision is nondeterministic, do the best we can: LL(k)
571
// any predicates that are around will be generated later.
572
depth = grammar.maxk;
573         }
574         for (int i = 1; i <= depth; i++) {
575             Lookahead lookahead = alt.cache[i];
576             printSet(depth, i, lookahead);
577         }
578     }
579
580     /** Generate a textual representation of the lookahead set
581      * for a block.
582      * @param blk The block of interest
583      */

584     public void genLookaheadSetForBlock(AlternativeBlock blk) {
585         // Find the maximal lookahead depth over all alternatives
586
int depth = 0;
587         for (int i = 0; i < blk.alternatives.size(); i++) {
588             Alternative alt = blk.getAlternativeAt(i);
589             if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
590                 depth = grammar.maxk;
591                 break;
592             }
593             else if (depth < alt.lookaheadDepth) {
594                 depth = alt.lookaheadDepth;
595             }
596         }
597
598         for (int i = 1; i <= depth; i++) {
599             Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
600             printSet(depth, i, lookahead);
601         }
602     }
603
604     /** Generate the nextToken rule.
605      * nextToken is a synthetic lexer rule that is the implicit OR of all
606      * user-defined lexer rules.
607      */

608     public void genNextToken() {
609         println("");
610         println("/** Lexer nextToken rule:");
611         println(" * The lexer nextToken rule is synthesized from all of the user-defined");
612         println(" * lexer rules. It logically consists of one big alternative block with");
613         println(" * each user-defined rule being an alternative.");
614         println(" */");
615
616         // Create the synthesized rule block for nextToken consisting
617
// of an alternate block containing all the user-defined lexer rules.
618
RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
619
620         // Define the nextToken rule symbol
621
RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
622         nextTokenRs.setDefined();
623         nextTokenRs.setBlock(blk);
624         nextTokenRs.access = "private";
625         grammar.define(nextTokenRs);
626
627         /*
628         // Analyze the synthesized block
629         if (!grammar.theLLkAnalyzer.deterministic(blk))
630         {
631             println("The grammar analyzer has determined that the synthesized");
632             println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
633             println("This means that there is some overlap of the character");
634             println("lookahead for two or more of your lexer rules.");
635         }
636         */

637
638         genCommonBlock(blk);
639     }
640
641     /** Generate code for a named rule block
642      * @param s The RuleSymbol describing the rule to generate
643      */

644     public void genRule(RuleSymbol s) {
645         if (s == null || !s.isDefined()) return; // undefined rule
646
println("");
647
648         if (s.access.length() != 0) {
649             if (!s.access.equals("public")) {
650                 _print("<para>" + s.access + " </para>");
651             }
652         }
653
654         println("<section id=\"" + QuoteForId(s.getId()) + "\">");
655         println("<title>" + s.getId() + "</title>");
656         if (s.comment != null) {
657             _println("<para>" + HTMLEncode(s.comment) + "</para>");
658         }
659         println("<para>");
660
661         // Get rule return type and arguments
662
RuleBlock rblk = s.getBlock();
663
664         // RK: for HTML output not of much value...
665
// Gen method return value(s)
666
// if (rblk.returnAction != null) {
667
// _print("["+rblk.returnAction+"]");
668
// }
669
// Gen arguments
670
// if (rblk.argAction != null)
671
// {
672
// _print(" returns [" + rblk.argAction+"]");
673
// }
674
_println("");
675         print(s.getId() + ":\t");
676         tabs++;
677
678         // Dump any init-action
679
// genBlockPreamble(rblk);
680

681         // Dump the alternates of the rule
682
genCommonBlock(rblk);
683
684         _println("");
685 // println(";");
686
tabs--;
687         _println("</para>");
688         _println("</section><!-- section \"" + s.getId() + "\" -->");
689     }
690
691     /** Generate the syntactic predicate. This basically generates
692      * the alternative block, buts tracks if we are inside a synPred
693      * @param blk The syntactic predicate block
694      */

695     protected void genSynPred(SynPredBlock blk) {
696         // no op
697
}
698
699     public void genTail() {
700         println("</chapter>");
701         println("</book>");
702     }
703
704     /** Generate the token types TXT file */
705     protected void genTokenTypes(TokenManager tm) throws IOException JavaDoc {
706         // Open the token output TXT file and set the currentOutput stream
707
antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
708         currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
709         //SAS: changed for proper text file io
710
tabs = 0;
711
712         // Generate the header common to all diagnostic files
713
genHeader();
714
715         // Generate a string for each token. This creates a static
716
// array of Strings indexed by token type.
717
println("");
718         println("*** Tokens used by the parser");
719         println("This is a list of the token numeric values and the corresponding");
720         println("token identifiers. Some tokens are literals, and because of that");
721         println("they have no identifiers. Literals are double-quoted.");
722         tabs++;
723
724         // Enumerate all the valid token types
725
Vector v = tm.getVocabulary();
726         for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
727             String JavaDoc s = (String JavaDoc)v.elementAt(i);
728             if (s != null) {
729                 println(s + " = " + i);
730             }
731         }
732
733         // Close the interface
734
tabs--;
735         println("*** End of tokens used by the parser");
736
737         // Close the tokens output file
738
currentOutput.close();
739         currentOutput = null;
740     }
741
742     /// unused.
743
protected String JavaDoc processActionForSpecialSymbols(String JavaDoc actionStr,
744                                                     int line,
745                                                     RuleBlock currentRule,
746                                                     ActionTransInfo tInfo) {
747         return actionStr;
748     }
749
750     /** Get a string for an expression to generate creation of an AST subtree.
751      * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
752      */

753     public String JavaDoc getASTCreateString(Vector v) {
754         return null;
755     }
756
757     /** Get a string for an expression to generate creating of an AST node
758      * @param str The arguments to the AST constructor
759      */

760     public String JavaDoc getASTCreateString(GrammarAtom atom, String JavaDoc str) {
761         return null;
762     }
763
764     /** Map an identifier to it's corresponding tree-node variable.
765      * This is context-sensitive, depending on the rule and alternative
766      * being generated
767      * @param id The identifier name to map
768      * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
769      */

770     public String JavaDoc mapTreeId(String JavaDoc id, ActionTransInfo tInfo) {
771         return id;
772     }
773
774     /** Format a lookahead or follow set.
775      * @param depth The depth of the entire lookahead/follow
776      * @param k The lookahead level to print
777      * @param lookahead The lookahead/follow set to print
778      */

779     public void printSet(int depth, int k, Lookahead lookahead) {
780         int numCols = 5;
781
782         int[] elems = lookahead.fset.toArray();
783
784         if (depth != 1) {
785             print("k==" + k + ": {");
786         }
787         else {
788             print("{ ");
789         }
790         if (elems.length > numCols) {
791             _println("");
792             tabs++;
793             print("");
794         }
795
796         int column = 0;
797         for (int i = 0; i < elems.length; i++) {
798             column++;
799             if (column > numCols) {
800                 _println("");
801                 print("");
802                 column = 0;
803             }
804             if (doingLexRules) {
805                 _print(charFormatter.literalChar(elems[i]));
806             }
807             else {
808                 _print((String JavaDoc)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
809             }
810             if (i != elems.length - 1) {
811                 _print(", ");
812             }
813         }
814
815         if (elems.length > numCols) {
816             _println("");
817             tabs--;
818             print("");
819         }
820         _println(" }");
821     }
822 }
823
Popular Tags