1 package persistence.antlr; 2 3 8 9 import java.util.Hashtable ; 10 11 import persistence.antlr.collections.impl.BitSet; 12 13 19 public class DefineGrammarSymbols implements ANTLRGrammarParseBehavior { 20 protected Hashtable grammars = new Hashtable (); 22 protected Hashtable tokenManagers = new Hashtable (); 24 protected Grammar grammar; 26 protected Tool tool; 28 LLkAnalyzer analyzer; 30 String [] args; 33 static final String DEFAULT_TOKENMANAGER_NAME = "*default"; 35 protected Hashtable headerActions = new Hashtable (); 38 Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); String language = "Java"; 42 43 protected int numLexers = 0; 44 protected int numParsers = 0; 45 protected int numTreeParsers = 0; 46 47 public DefineGrammarSymbols(Tool tool_, String [] args_, LLkAnalyzer analyzer_) { 48 tool = tool_; 49 args = args_; 50 analyzer = analyzer_; 51 } 52 53 public void _refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) { 54 if (!(grammar instanceof LexerGrammar)) { 55 String str = lit.getText(); 57 if (grammar.tokenManager.getTokenSymbol(str) != null) { 58 return; 60 } 61 StringLiteralSymbol sl = new StringLiteralSymbol(str); 62 int tt = grammar.tokenManager.nextTokenType(); 63 sl.setTokenType(tt); 64 grammar.tokenManager.define(sl); 65 } 66 } 67 68 69 public void _refToken(Token assignId, 70 Token t, 71 Token label, 72 Token args, 73 boolean inverted, 74 int autoGenType, 75 boolean lastInRule) { 76 String id = t.getText(); 77 if (!grammar.tokenManager.tokenDefined(id)) { 78 83 int tt = grammar.tokenManager.nextTokenType(); 84 TokenSymbol ts = new TokenSymbol(id); 85 ts.setTokenType(tt); 86 grammar.tokenManager.define(ts); 87 } 88 } 89 90 91 public void abortGrammar() { 92 if (grammar != null && grammar.getClassName() != null) { 93 grammars.remove(grammar.getClassName()); 94 } 95 grammar = null; 96 } 97 98 public void beginAlt(boolean doAST_) { 99 } 100 101 public void beginChildList() { 102 } 103 104 public void beginExceptionGroup() { 106 } 107 108 public void beginExceptionSpec(Token label) { 109 } 110 111 public void beginSubRule(Token label, Token start, boolean not) { 112 } 113 114 public void beginTree(Token tok) throws SemanticException { 115 } 116 117 118 public void defineRuleName(Token r, 119 String access, 120 boolean ruleAutoGen, 121 String docComment) 122 throws SemanticException { 123 String id = r.getText(); 124 125 if (r.type == ANTLRTokenTypes.TOKEN_REF) { 127 id = CodeGenerator.encodeLexerRuleName(id); 129 if (!grammar.tokenManager.tokenDefined(r.getText())) { 131 int tt = grammar.tokenManager.nextTokenType(); 132 TokenSymbol ts = new TokenSymbol(r.getText()); 133 ts.setTokenType(tt); 134 grammar.tokenManager.define(ts); 135 } 136 } 137 138 RuleSymbol rs; 139 if (grammar.isDefined(id)) { 140 rs = (RuleSymbol)grammar.getSymbol(id); 142 if (rs.isDefined()) { 144 tool.error("redefinition of rule " + id, grammar.getFilename(), r.getLine(), r.getColumn()); 145 } 146 } 147 else { 148 rs = new RuleSymbol(id); 149 grammar.define(rs); 150 } 151 rs.setDefined(); 152 rs.access = access; 153 rs.comment = docComment; 154 } 155 156 159 public void defineToken(Token tokname, Token tokliteral) { 160 String name = null; 161 String literal = null; 162 if (tokname != null) { 163 name = tokname.getText(); 164 } 165 if (tokliteral != null) { 166 literal = tokliteral.getText(); 167 } 168 if (literal != null) { 171 StringLiteralSymbol sl = (StringLiteralSymbol)grammar.tokenManager.getTokenSymbol(literal); 172 if (sl != null) { 173 if (name == null || sl.getLabel() != null) { 179 tool.warning("Redefinition of literal in tokens {...}: " + literal, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn()); 180 return; 181 } 182 else if (name != null) { 183 sl.setLabel(name); 185 grammar.tokenManager.mapToTokenSymbol(name, sl); 187 } 188 } 189 if (name != null) { 192 TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(name); 193 if (ts != null) { 194 if (ts instanceof StringLiteralSymbol) { 197 tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn()); 198 return; 199 } 200 int ttype = ts.getTokenType(); 207 sl = new StringLiteralSymbol(literal); 209 sl.setTokenType(ttype); 210 sl.setLabel(name); 211 grammar.tokenManager.define(sl); 213 grammar.tokenManager.mapToTokenSymbol(name, sl); 215 return; 216 } 217 } 219 sl = new StringLiteralSymbol(literal); 220 int tt = grammar.tokenManager.nextTokenType(); 221 sl.setTokenType(tt); 222 sl.setLabel(name); 223 grammar.tokenManager.define(sl); 224 if (name != null) { 225 grammar.tokenManager.mapToTokenSymbol(name, sl); 227 } 228 } 229 230 else { 232 if (grammar.tokenManager.tokenDefined(name)) { 233 tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokname.getLine(), tokname.getColumn()); 234 return; 235 } 236 int tt = grammar.tokenManager.nextTokenType(); 237 TokenSymbol ts = new TokenSymbol(name); 238 ts.setTokenType(tt); 239 grammar.tokenManager.define(ts); 240 } 241 } 242 243 public void endAlt() { 244 } 245 246 public void endChildList() { 247 } 248 249 public void endExceptionGroup() { 250 } 251 252 public void endExceptionSpec() { 253 } 254 255 public void endGrammar() { 256 } 257 258 263 public void endOptions() { 264 if (grammar.exportVocab == null && grammar.importVocab == null) { 266 grammar.exportVocab = grammar.getClassName(); 267 if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { 269 grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME; 271 TokenManager tm = (TokenManager)tokenManagers.get(DEFAULT_TOKENMANAGER_NAME); 272 grammar.setTokenManager(tm); 274 return; 275 } 276 TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool); 279 grammar.setTokenManager(tm); 280 tokenManagers.put(grammar.exportVocab, tm); 282 tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); 284 return; 285 } 286 287 if (grammar.exportVocab == null && grammar.importVocab != null) { 289 grammar.exportVocab = grammar.getClassName(); 290 if (grammar.importVocab.equals(grammar.exportVocab)) { 292 tool.warning("Grammar " + grammar.getClassName() + 293 " cannot have importVocab same as default output vocab (grammar name); ignored."); 294 grammar.importVocab = null; 296 endOptions(); 297 return; 298 } 299 if (tokenManagers.containsKey(grammar.importVocab)) { 302 TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab); 306 TokenManager dup = (TokenManager)tm.clone(); 308 dup.setName(grammar.exportVocab); 309 dup.setReadOnly(false); 311 grammar.setTokenManager(dup); 312 tokenManagers.put(grammar.exportVocab, dup); 313 return; 314 } 315 ImportVocabTokenManager tm = 318 new ImportVocabTokenManager(grammar, 319 grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt, 320 grammar.exportVocab, 321 tool); 322 tm.setReadOnly(false); tokenManagers.put(grammar.exportVocab, tm); 325 grammar.setTokenManager(tm); 328 329 if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { 331 tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); 332 } 333 334 return; 335 } 336 337 if (grammar.exportVocab != null && grammar.importVocab == null) { 339 if (tokenManagers.containsKey(grammar.exportVocab)) { 341 TokenManager tm = (TokenManager)tokenManagers.get(grammar.exportVocab); 343 grammar.setTokenManager(tm); 345 return; 346 } 347 TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool); 350 grammar.setTokenManager(tm); 351 tokenManagers.put(grammar.exportVocab, tm); 353 if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { 355 tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); 356 } 357 return; 358 } 359 360 if (grammar.exportVocab != null && grammar.importVocab != null) { 362 if (grammar.importVocab.equals(grammar.exportVocab)) { 364 tool.error("exportVocab of " + grammar.exportVocab + " same as importVocab; probably not what you want"); 365 } 366 if (tokenManagers.containsKey(grammar.importVocab)) { 368 TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab); 371 TokenManager dup = (TokenManager)tm.clone(); 373 dup.setName(grammar.exportVocab); 374 dup.setReadOnly(false); 376 grammar.setTokenManager(dup); 377 tokenManagers.put(grammar.exportVocab, dup); 378 return; 379 } 380 ImportVocabTokenManager tm = 382 new ImportVocabTokenManager(grammar, 383 grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt, 384 grammar.exportVocab, 385 tool); 386 tm.setReadOnly(false); tokenManagers.put(grammar.exportVocab, tm); 389 grammar.setTokenManager(tm); 391 392 if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) { 394 tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm); 395 } 396 397 return; 398 } 399 } 400 401 public void endRule(String r) { 402 } 403 404 public void endSubRule() { 405 } 406 407 public void endTree() { 408 } 409 410 public void hasError() { 411 } 412 413 public void noASTSubRule() { 414 } 415 416 public void oneOrMoreSubRule() { 417 } 418 419 public void optionalSubRule() { 420 } 421 422 public void setUserExceptions(String thr) { 423 } 424 425 public void refAction(Token action) { 426 } 427 428 public void refArgAction(Token action) { 429 } 430 431 public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) { 432 } 433 434 public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) { 435 } 436 437 public void refElementOption(Token option, Token value) { 438 } 439 440 public void refTokensSpecElementOption(Token tok, Token option, Token value) { 441 } 442 443 public void refExceptionHandler(Token exTypeAndName, Token action) { 444 } 445 446 public void refHeaderAction(Token name, Token act) { 448 String key; 449 450 if (name == null) 451 key = ""; 452 else 453 key = StringUtils.stripFrontBack(name.getText(), "\"", "\""); 454 455 if (headerActions.containsKey(key)) { 458 if (key.equals("")) 459 tool.error(act.getLine() + ": header action already defined"); 460 else 461 tool.error(act.getLine() + ": header action '" + key + "' already defined"); 462 } 463 headerActions.put(key, act); 464 } 465 466 public String getHeaderAction(String name) { 467 Token t = (Token)headerActions.get(name); 468 if (t == null) { 469 return ""; 470 } 471 return t.getText(); 472 } 473 474 public void refInitAction(Token action) { 475 } 476 477 public void refMemberAction(Token act) { 478 } 479 480 public void refPreambleAction(Token act) { 481 thePreambleAction = act; 482 } 483 484 public void refReturnAction(Token returnAction) { 485 } 486 487 public void refRule(Token idAssign, 488 Token r, 489 Token label, 490 Token args, 491 int autoGenType) { 492 String id = r.getText(); 493 if (r.type == ANTLRTokenTypes.TOKEN_REF) { 495 id = CodeGenerator.encodeLexerRuleName(id); 497 } 498 if (!grammar.isDefined(id)) { 499 grammar.define(new RuleSymbol(id)); 500 } 501 } 502 503 public void refSemPred(Token pred) { 504 } 505 506 public void refStringLiteral(Token lit, 507 Token label, 508 int autoGenType, 509 boolean lastInRule) { 510 _refStringLiteral(lit, label, autoGenType, lastInRule); 511 } 512 513 514 public void refToken(Token assignId, Token t, Token label, Token args, 515 boolean inverted, int autoGenType, boolean lastInRule) { 516 _refToken(assignId, t, label, args, inverted, autoGenType, lastInRule); 517 } 518 519 public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) { 520 if (t1.getText().charAt(0) == '"') { 523 refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE, lastInRule); 524 } 525 else { 526 _refToken(null, t1, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule); 527 } 528 if (t2.getText().charAt(0) == '"') { 529 _refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE, lastInRule); 530 } 531 else { 532 _refToken(null, t2, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule); 533 } 534 } 535 536 public void refTreeSpecifier(Token treeSpec) { 537 } 538 539 public void refWildcard(Token t, Token label, int autoGenType) { 540 } 541 542 543 public void reset() { 544 grammar = null; 545 } 546 547 public void setArgOfRuleRef(Token argaction) { 548 } 549 550 551 public void setCharVocabulary(BitSet b) { 552 ((LexerGrammar)grammar).setCharVocabulary(b); 554 } 555 556 561 public void setFileOption(Token key, Token value, String filename) { 562 if (key.getText().equals("language")) { 563 if (value.getType() == ANTLRParser.STRING_LITERAL) { 564 language = StringUtils.stripBack(StringUtils.stripFront(value.getText(), '"'), '"'); 565 } 566 else if (value.getType() == ANTLRParser.TOKEN_REF || value.getType() == ANTLRParser.RULE_REF) { 567 language = value.getText(); 568 } 569 else { 570 tool.error("language option must be string or identifier", filename, value.getLine(), value.getColumn()); 571 } 572 } 573 else if (key.getText().equals("mangleLiteralPrefix")) { 574 if (value.getType() == ANTLRParser.STRING_LITERAL) { 575 tool.literalsPrefix = StringUtils.stripFrontBack(value.getText(), "\"", "\""); 576 } 577 else { 578 tool.error("mangleLiteralPrefix option must be string", filename, value.getLine(), value.getColumn()); 579 } 580 } 581 else if (key.getText().equals("upperCaseMangledLiterals")) { 582 if (value.getText().equals("true")) { 583 tool.upperCaseMangledLiterals = true; 584 } 585 else if (value.getText().equals("false")) { 586 tool.upperCaseMangledLiterals = false; 587 } 588 else { 589 grammar.antlrTool.error("Value for upperCaseMangledLiterals must be true or false", filename, key.getLine(), key.getColumn()); 590 } 591 } 592 else if ( key.getText().equals("namespaceStd") || 593 key.getText().equals("namespaceAntlr") || 594 key.getText().equals("genHashLines") 595 ) { 596 if (!language.equals("Cpp")) { 597 tool.error(key.getText() + " option only valid for C++", filename, key.getLine(), key.getColumn()); 598 } 599 else { 600 if (key.getText().equals("noConstructors")) { 601 if (!(value.getText().equals("true") || value.getText().equals("false"))) 602 tool.error("noConstructors option must be true or false", filename, value.getLine(), value.getColumn()); 603 tool.noConstructors = value.getText().equals("true"); 604 } else if (key.getText().equals("genHashLines")) { 605 if (!(value.getText().equals("true") || value.getText().equals("false"))) 606 tool.error("genHashLines option must be true or false", filename, value.getLine(), value.getColumn()); 607 tool.genHashLines = value.getText().equals("true"); 608 } 609 else { 610 if (value.getType() != ANTLRParser.STRING_LITERAL) { 611 tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn()); 612 } 613 else { 614 if (key.getText().equals("namespaceStd")) 615 tool.namespaceStd = value.getText(); 616 else if (key.getText().equals("namespaceAntlr")) 617 tool.namespaceAntlr = value.getText(); 618 } 619 } 620 } 621 } 622 else if ( key.getText().equals("namespace") ) { 623 if ( !language.equals("Cpp") && !language.equals("CSharp") ) 624 { 625 tool.error(key.getText() + " option only valid for C++ and C# (a.k.a CSharp)", filename, key.getLine(), key.getColumn()); 626 } 627 else 628 { 629 if (value.getType() != ANTLRParser.STRING_LITERAL) 630 { 631 tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn()); 632 } 633 else { 634 if (key.getText().equals("namespace")) 635 tool.setNameSpace(value.getText()); 636 } 637 } 638 } 639 else { 640 tool.error("Invalid file-level option: " + key.getText(), filename, key.getLine(), value.getColumn()); 641 } 642 } 643 644 649 public void setGrammarOption(Token key, Token value) { 650 if (key.getText().equals("tokdef") || key.getText().equals("tokenVocabulary")) { 651 tool.error("tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n" + 652 " Use importVocab/exportVocab instead. Please see the documentation.\n" + 653 " The previous options were so heinous that Terence changed the whole\n" + 654 " vocabulary mechanism; it was better to change the names rather than\n" + 655 " subtly change the functionality of the known options. Sorry!", grammar.getFilename(), value.getLine(), value.getColumn()); 656 } 657 else if (key.getText().equals("literal") && 658 grammar instanceof LexerGrammar) { 659 tool.error("the literal option is invalid >= ANTLR 2.6.0.\n" + 660 " Use the \"tokens {...}\" mechanism instead.", 661 grammar.getFilename(), value.getLine(), value.getColumn()); 662 } 663 else if (key.getText().equals("exportVocab")) { 664 if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) { 666 grammar.exportVocab = value.getText(); 667 } 668 else { 669 tool.error("exportVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn()); 670 } 671 } 672 else if (key.getText().equals("importVocab")) { 673 if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) { 674 grammar.importVocab = value.getText(); 675 } 676 else { 677 tool.error("importVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn()); 678 } 679 } 680 else { 681 grammar.setOption(key.getText(), value); 683 } 684 } 685 686 public void setRuleOption(Token key, Token value) { 687 } 688 689 public void setSubruleOption(Token key, Token value) { 690 } 691 692 693 public void startLexer(String file, Token name, String superClass, String doc) { 694 if (numLexers > 0) { 695 tool.panic("You may only have one lexer per grammar file: class " + name.getText()); 696 } 697 numLexers++; 698 reset(); 699 Grammar g = (Grammar)grammars.get(name); 702 if (g != null) { 703 if (!(g instanceof LexerGrammar)) { 704 tool.panic("'" + name.getText() + "' is already defined as a non-lexer"); 705 } 706 else { 707 tool.panic("Lexer '" + name.getText() + "' is already defined"); 708 } 709 } 710 else { 711 LexerGrammar lg = new LexerGrammar(name.getText(), tool, superClass); 713 lg.comment = doc; 714 lg.processArguments(args); 715 lg.setFilename(file); 716 grammars.put(lg.getClassName(), lg); 717 lg.preambleAction = thePreambleAction; 719 thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); 720 grammar = lg; 722 } 723 } 724 725 726 public void startParser(String file, Token name, String superClass, String doc) { 727 if (numParsers > 0) { 728 tool.panic("You may only have one parser per grammar file: class " + name.getText()); 729 } 730 numParsers++; 731 reset(); 732 Grammar g = (Grammar)grammars.get(name); 735 if (g != null) { 736 if (!(g instanceof ParserGrammar)) { 737 tool.panic("'" + name.getText() + "' is already defined as a non-parser"); 738 } 739 else { 740 tool.panic("Parser '" + name.getText() + "' is already defined"); 741 } 742 } 743 else { 744 grammar = new ParserGrammar(name.getText(), tool, superClass); 746 grammar.comment = doc; 747 grammar.processArguments(args); 748 grammar.setFilename(file); 749 grammars.put(grammar.getClassName(), grammar); 750 grammar.preambleAction = thePreambleAction; 752 thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); 753 } 754 } 755 756 757 public void startTreeWalker(String file, Token name, String superClass, String doc) { 758 if (numTreeParsers > 0) { 759 tool.panic("You may only have one tree parser per grammar file: class " + name.getText()); 760 } 761 numTreeParsers++; 762 reset(); 763 Grammar g = (Grammar)grammars.get(name); 766 if (g != null) { 767 if (!(g instanceof TreeWalkerGrammar)) { 768 tool.panic("'" + name.getText() + "' is already defined as a non-tree-walker"); 769 } 770 else { 771 tool.panic("Tree-walker '" + name.getText() + "' is already defined"); 772 } 773 } 774 else { 775 grammar = new TreeWalkerGrammar(name.getText(), tool, superClass); 777 grammar.comment = doc; 778 grammar.processArguments(args); 779 grammar.setFilename(file); 780 grammars.put(grammar.getClassName(), grammar); 781 grammar.preambleAction = thePreambleAction; 783 thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); 784 } 785 } 786 787 public void synPred() { 788 } 789 790 public void zeroOrMoreSubRule() { 791 } 792 } 793 | Popular Tags |