Browse Source

KERNEL-2034 fine-third依赖包fine-third-base支持jdk11,代码化

research/10.0
zhouping 4 years ago
parent
commit
0e8623264b
  1. 2
      fine-antlr-old/README.md
  2. 26
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRError.java
  3. 27
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRException.java
  4. 124
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRGrammarParseBehavior.java
  5. 107
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRHashString.java
  6. 1451
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRLexer.java
  7. 2961
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRParser.java
  8. 82
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRStringBuffer.java
  9. 647
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefLexer.java
  10. 241
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParser.java
  11. 18
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParserTokenTypes.java
  12. 68
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokenTypes.java
  13. 411
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTFactory.java
  14. 76
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTIterator.java
  15. 108
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTNULLType.java
  16. 43
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTPair.java
  17. 14
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTVisitor.java
  18. 35
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/ASDebugStream.java
  19. 24
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/IASDebugStream.java
  20. 22
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/TokenOffsetInfo.java
  21. 33
      fine-antlr-old/src/main/java/com/fr/third/antlr/ActionElement.java
  22. 23
      fine-antlr-old/src/main/java/com/fr/third/antlr/ActionTransInfo.java
  23. 73
      fine-antlr-old/src/main/java/com/fr/third/antlr/Alternative.java
  24. 226
      fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeBlock.java
  25. 43
      fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeElement.java
  26. 495
      fine-antlr-old/src/main/java/com/fr/third/antlr/BaseAST.java
  27. 32
      fine-antlr-old/src/main/java/com/fr/third/antlr/BlockContext.java
  28. 31
      fine-antlr-old/src/main/java/com/fr/third/antlr/BlockEndElement.java
  29. 24
      fine-antlr-old/src/main/java/com/fr/third/antlr/BlockWithImpliedExitPath.java
  30. 53
      fine-antlr-old/src/main/java/com/fr/third/antlr/ByteBuffer.java
  31. 53
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharBuffer.java
  32. 23
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharFormatter.java
  33. 29
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharLiteralElement.java
  34. 95
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharQueue.java
  35. 54
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharRangeElement.java
  36. 409
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharScanner.java
  37. 21
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamException.java
  38. 22
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamIOException.java
  39. 663
      fine-antlr-old/src/main/java/com/fr/third/antlr/CodeGenerator.java
  40. 59
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonAST.java
  41. 47
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonASTWithHiddenTokens.java
  42. 41
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonHiddenStreamToken.java
  43. 56
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonToken.java
  44. 33
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultFileLineFormatter.java
  45. 73
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultJavaCodeGeneratorPrintWriterManager.java
  46. 118
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultToolErrorHandler.java
  47. 811
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefineGrammarSymbols.java
  48. 68
      fine-antlr-old/src/main/java/com/fr/third/antlr/DumpASTVisitor.java
  49. 22
      fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionHandler.java
  50. 29
      fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionSpec.java
  51. 14
      fine-antlr-old/src/main/java/com/fr/third/antlr/FileCopyException.java
  52. 27
      fine-antlr-old/src/main/java/com/fr/third/antlr/FileLineFormatter.java
  53. 288
      fine-antlr-old/src/main/java/com/fr/third/antlr/Grammar.java
  54. 36
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAnalyzer.java
  55. 68
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAtom.java
  56. 62
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarElement.java
  57. 30
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarSymbol.java
  58. 102
      fine-antlr-old/src/main/java/com/fr/third/antlr/ImportVocabTokenManager.java
  59. 131
      fine-antlr-old/src/main/java/com/fr/third/antlr/InputBuffer.java
  60. 34
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaBlockFinishingInfo.java
  61. 87
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCharFormatter.java
  62. 3746
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGenerator.java
  63. 21
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGeneratorPrintWriterManager.java
  64. 1095
      fine-antlr-old/src/main/java/com/fr/third/antlr/LLkAnalyzer.java
  65. 58
      fine-antlr-old/src/main/java/com/fr/third/antlr/LLkGrammarAnalyzer.java
  66. 85
      fine-antlr-old/src/main/java/com/fr/third/antlr/LLkParser.java
  67. 179
      fine-antlr-old/src/main/java/com/fr/third/antlr/LexerGrammar.java
  68. 80
      fine-antlr-old/src/main/java/com/fr/third/antlr/LexerSharedInputState.java
  69. 218
      fine-antlr-old/src/main/java/com/fr/third/antlr/Lookahead.java
  70. 792
      fine-antlr-old/src/main/java/com/fr/third/antlr/MakeGrammar.java
  71. 146
      fine-antlr-old/src/main/java/com/fr/third/antlr/MismatchedCharException.java
  72. 173
      fine-antlr-old/src/main/java/com/fr/third/antlr/MismatchedTokenException.java
  73. 64
      fine-antlr-old/src/main/java/com/fr/third/antlr/NameSpace.java
  74. 40
      fine-antlr-old/src/main/java/com/fr/third/antlr/NoViableAltException.java
  75. 51
      fine-antlr-old/src/main/java/com/fr/third/antlr/NoViableAltForCharException.java
  76. 31
      fine-antlr-old/src/main/java/com/fr/third/antlr/OneOrMoreBlock.java
  77. 50
      fine-antlr-old/src/main/java/com/fr/third/antlr/ParseTree.java
  78. 70
      fine-antlr-old/src/main/java/com/fr/third/antlr/ParseTreeRule.java
  79. 29
      fine-antlr-old/src/main/java/com/fr/third/antlr/ParseTreeToken.java
  80. 374
      fine-antlr-old/src/main/java/com/fr/third/antlr/Parser.java
  81. 99
      fine-antlr-old/src/main/java/com/fr/third/antlr/ParserGrammar.java
  82. 38
      fine-antlr-old/src/main/java/com/fr/third/antlr/ParserSharedInputState.java
  83. 138
      fine-antlr-old/src/main/java/com/fr/third/antlr/PreservingFileWriter.java
  84. 152
      fine-antlr-old/src/main/java/com/fr/third/antlr/PrintWriterWithSMAP.java
  85. 70
      fine-antlr-old/src/main/java/com/fr/third/antlr/RecognitionException.java
  86. 212
      fine-antlr-old/src/main/java/com/fr/third/antlr/RuleBlock.java
  87. 33
      fine-antlr-old/src/main/java/com/fr/third/antlr/RuleEndElement.java
  88. 73
      fine-antlr-old/src/main/java/com/fr/third/antlr/RuleRefElement.java
  89. 53
      fine-antlr-old/src/main/java/com/fr/third/antlr/RuleSymbol.java
  90. 23
      fine-antlr-old/src/main/java/com/fr/third/antlr/SemanticException.java
  91. 143
      fine-antlr-old/src/main/java/com/fr/third/antlr/SimpleTokenManager.java
  92. 65
      fine-antlr-old/src/main/java/com/fr/third/antlr/StringLiteralElement.java
  93. 25
      fine-antlr-old/src/main/java/com/fr/third/antlr/StringLiteralSymbol.java
  94. 85
      fine-antlr-old/src/main/java/com/fr/third/antlr/StringUtils.java
  95. 31
      fine-antlr-old/src/main/java/com/fr/third/antlr/SynPredBlock.java
  96. 78
      fine-antlr-old/src/main/java/com/fr/third/antlr/Token.java
  97. 125
      fine-antlr-old/src/main/java/com/fr/third/antlr/TokenBuffer.java
  98. 60
      fine-antlr-old/src/main/java/com/fr/third/antlr/TokenManager.java
  99. 96
      fine-antlr-old/src/main/java/com/fr/third/antlr/TokenQueue.java
  100. 50
      fine-antlr-old/src/main/java/com/fr/third/antlr/TokenRangeElement.java
  101. Some files were not shown because too many files have changed in this diff Show More

2
fine-antlr-old/README.md

@ -0,0 +1,2 @@
1.`fine-antlr-old`是为了与`fine-antlr4`区别开,<br>
2.源码地址https://cloud.finedevelop.com/projects/PF/repos/thirdtools/browse

26
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRError.java

@ -0,0 +1,26 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRError.java#2 $
*/
public class ANTLRError extends Error {
/**
* ANTLRError constructor comment.
*/
public ANTLRError() {
super();
}
/**
* ANTLRError constructor comment.
* @param s java.lang.String
*/
public ANTLRError(String s) {
super(s);
}
}

27
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRException.java

@ -0,0 +1,27 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRException.java#2 $
*/
public class ANTLRException extends Exception {
public ANTLRException() {
super();
}
public ANTLRException(String s) {
super(s);
}
public ANTLRException(String message, Throwable cause) {
super(message, cause);
}
public ANTLRException(Throwable cause) {
super(cause);
}
}

124
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRGrammarParseBehavior.java

@ -0,0 +1,124 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRGrammarParseBehavior.java#2 $
*/
import com.fr.third.antlr.collections.impl.BitSet;
public interface ANTLRGrammarParseBehavior {
public void abortGrammar();
public void beginAlt(boolean doAST_);
public void beginChildList();
// Exception handling
public void beginExceptionGroup();
public void beginExceptionSpec(Token label);
public void beginSubRule(Token label, Token start, boolean not);
// Trees
public void beginTree(Token tok) throws SemanticException;
public void defineRuleName(Token r, String access, boolean ruleAST, String docComment) throws SemanticException;
public void defineToken(Token tokname, Token tokliteral);
public void endAlt();
public void endChildList();
public void endExceptionGroup();
public void endExceptionSpec();
public void endGrammar();
public void endOptions();
public void endRule(String r);
public void endSubRule();
public void endTree();
public void hasError();
public void noASTSubRule();
public void oneOrMoreSubRule();
public void optionalSubRule();
public void refAction(Token action);
public void refArgAction(Token action);
public void setUserExceptions(String thr);
public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule);
public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule);
public void refElementOption(Token option, Token value);
public void refTokensSpecElementOption(Token tok, Token option, Token value);
public void refExceptionHandler(Token exTypeAndName, Token action);
public void refHeaderAction(Token name, Token act);
public void refInitAction(Token action);
public void refMemberAction(Token act);
public void refPreambleAction(Token act);
public void refReturnAction(Token returnAction);
public void refRule(Token idAssign, Token r, Token label, Token arg, int autoGenType);
public void refSemPred(Token pred);
public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule);
public void refToken(Token assignId, Token t, Token label, Token args,
boolean inverted, int autoGenType, boolean lastInRule);
public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule);
// Tree specifiers
public void refTreeSpecifier(Token treeSpec);
public void refWildcard(Token t, Token label, int autoGenType);
public void setArgOfRuleRef(Token argaction);
public void setCharVocabulary(BitSet b);
// Options
public void setFileOption(Token key, Token value, String filename);
public void setGrammarOption(Token key, Token value);
public void setRuleOption(Token key, Token value);
public void setSubruleOption(Token key, Token value);
public void startLexer(String file, Token name, String superClass, String doc);
// Flow control for grammars
public void startParser(String file, Token name, String superClass, String doc);
public void startTreeWalker(String file, Token name, String superClass, String doc);
public void synPred();
public void zeroOrMoreSubRule();
}

107
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRHashString.java

@ -0,0 +1,107 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRHashString.java#2 $
*/
// class implements a String-like object whose sole purpose is to be
// entered into a lexer HashTable. It uses a lexer object to get
// information about case sensitivity.
public class ANTLRHashString {
// only one of s or buf is non-null
private String s;
private char[] buf;
private int len;
private CharScanner lexer;
private static final int prime = 151;
public ANTLRHashString(char[] buf, int length, CharScanner lexer) {
this.lexer = lexer;
setBuffer(buf, length);
}
// Hash strings constructed this way are unusable until setBuffer or setString are called.
public ANTLRHashString(CharScanner lexer) {
this.lexer = lexer;
}
public ANTLRHashString(String s, CharScanner lexer) {
this.lexer = lexer;
setString(s);
}
private final char charAt(int index) {
return (s != null) ? s.charAt(index) : buf[index];
}
// Return true if o is an ANTLRHashString equal to this.
public boolean equals(Object o) {
if (!(o instanceof ANTLRHashString) && !(o instanceof String)) {
return false;
}
ANTLRHashString s;
if (o instanceof String) {
s = new ANTLRHashString((String)o, lexer);
}
else {
s = (ANTLRHashString)o;
}
int l = length();
if (s.length() != l) {
return false;
}
if (lexer.getCaseSensitiveLiterals()) {
for (int i = 0; i < l; i++) {
if (charAt(i) != s.charAt(i)) {
return false;
}
}
}
else {
for (int i = 0; i < l; i++) {
if (lexer.toLower(charAt(i)) != lexer.toLower(s.charAt(i))) {
return false;
}
}
}
return true;
}
public int hashCode() {
int hashval = 0;
int l = length();
if (lexer.getCaseSensitiveLiterals()) {
for (int i = 0; i < l; i++) {
hashval = hashval * prime + charAt(i);
}
}
else {
for (int i = 0; i < l; i++) {
hashval = hashval * prime + lexer.toLower(charAt(i));
}
}
return hashval;
}
private final int length() {
return (s != null) ? s.length() : len;
}
public void setBuffer(char[] buf, int length) {
this.buf = buf;
this.len = length;
s = null;
}
public void setString(String s) {
this.s = s;
buf = null;
}
}

1451
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRLexer.java

File diff suppressed because it is too large Load Diff

2961
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRParser.java

File diff suppressed because it is too large Load Diff

82
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRStringBuffer.java

@ -0,0 +1,82 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRStringBuffer.java#2 $
*/
// Implementation of a StringBuffer-like object that does not have the
// unfortunate side-effect of creating Strings with very large buffers.
public class ANTLRStringBuffer {
protected char[] buffer = null;
protected int length = 0; // length and also where to store next char
public ANTLRStringBuffer() {
buffer = new char[50];
}
public ANTLRStringBuffer(int n) {
buffer = new char[n];
}
public final void append(char c) {
// This would normally be an "ensureCapacity" method, but inlined
// here for speed.
if (length >= buffer.length) {
// Compute a new length that is at least double old length
int newSize = buffer.length;
while (length >= newSize) {
newSize *= 2;
}
// Allocate new array and copy buffer
char[] newBuffer = new char[newSize];
for (int i = 0; i < length; i++) {
newBuffer[i] = buffer[i];
}
buffer = newBuffer;
}
buffer[length] = c;
length++;
}
public final void append(String s) {
for (int i = 0; i < s.length(); i++) {
append(s.charAt(i));
}
}
public final char charAt(int index) {
return buffer[index];
}
final public char[] getBuffer() {
return buffer;
}
public final int length() {
return length;
}
public final void setCharAt(int index, char ch) {
buffer[index] = ch;
}
public final void setLength(int newLength) {
if (newLength < length) {
length = newLength;
}
else {
while (newLength > length) {
append('\0');
}
}
}
public final String toString() {
return new String(buffer, 0, length);
}
}

647
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefLexer.java

@ -0,0 +1,647 @@
// $ANTLR : "tokdef.g" -> "ANTLRTokdefLexer.java"$
package com.fr.third.antlr;
import java.io.InputStream;
import java.io.Reader;
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.BitSet;
public class ANTLRTokdefLexer extends com.fr.third.antlr.CharScanner implements ANTLRTokdefParserTokenTypes, TokenStream
{
public ANTLRTokdefLexer(InputStream in) {
this(new ByteBuffer(in));
}
public ANTLRTokdefLexer(Reader in) {
this(new CharBuffer(in));
}
public ANTLRTokdefLexer(InputBuffer ib) {
this(new LexerSharedInputState(ib));
}
public ANTLRTokdefLexer(LexerSharedInputState state) {
super(state);
caseSensitiveLiterals = true;
setCaseSensitive(true);
literals = new Hashtable();
}
public Token nextToken() throws TokenStreamException {
Token theRetToken=null;
tryAgain:
for (;;) {
Token _token = null;
int _ttype = Token.INVALID_TYPE;
resetText();
try { // for char stream error handling
try { // for lexical error handling
switch ( LA(1)) {
case '\t': case '\n': case '\r': case ' ':
{
mWS(true);
theRetToken=_returnToken;
break;
}
case '(':
{
mLPAREN(true);
theRetToken=_returnToken;
break;
}
case ')':
{
mRPAREN(true);
theRetToken=_returnToken;
break;
}
case '=':
{
mASSIGN(true);
theRetToken=_returnToken;
break;
}
case '"':
{
mSTRING(true);
theRetToken=_returnToken;
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F': case 'G': case 'H':
case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P':
case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z': case 'a': case 'b':
case 'c': case 'd': case 'e': case 'f':
case 'g': case 'h': case 'i': case 'j':
case 'k': case 'l': case 'm': case 'n':
case 'o': case 'p': case 'q': case 'r':
case 's': case 't': case 'u': case 'v':
case 'w': case 'x': case 'y': case 'z':
{
mID(true);
theRetToken=_returnToken;
break;
}
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
{
mINT(true);
theRetToken=_returnToken;
break;
}
default:
if ((LA(1)=='/') && (LA(2)=='/')) {
mSL_COMMENT(true);
theRetToken=_returnToken;
}
else if ((LA(1)=='/') && (LA(2)=='*')) {
mML_COMMENT(true);
theRetToken=_returnToken;
}
else {
if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
}
}
if ( _returnToken==null ) continue tryAgain; // found SKIP token
_ttype = _returnToken.getType();
_returnToken.setType(_ttype);
return _returnToken;
}
catch (RecognitionException e) {
throw new TokenStreamRecognitionException(e);
}
}
catch (CharStreamException cse) {
if ( cse instanceof CharStreamIOException ) {
throw new TokenStreamIOException(((CharStreamIOException)cse).io);
}
else {
throw new TokenStreamException(cse.getMessage());
}
}
}
}
public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = WS;
int _saveIndex;
{
switch ( LA(1)) {
case ' ':
{
match(' ');
break;
}
case '\t':
{
match('\t');
break;
}
case '\r':
{
match('\r');
{
if ((LA(1)=='\n')) {
match('\n');
}
else {
}
}
newline();
break;
}
case '\n':
{
match('\n');
newline();
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
_ttype = Token.SKIP;
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = SL_COMMENT;
int _saveIndex;
match("//");
{
_loop234:
do {
if ((_tokenSet_0.member(LA(1)))) {
{
match(_tokenSet_0);
}
}
else {
break _loop234;
}
} while (true);
}
{
switch ( LA(1)) {
case '\n':
{
match('\n');
break;
}
case '\r':
{
match('\r');
{
if ((LA(1)=='\n')) {
match('\n');
}
else {
}
}
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
_ttype = Token.SKIP; newline();
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ML_COMMENT;
int _saveIndex;
match("/*");
{
_loop239:
do {
if ((LA(1)=='*') && (_tokenSet_1.member(LA(2)))) {
match('*');
matchNot('/');
}
else if ((LA(1)=='\n')) {
match('\n');
newline();
}
else if ((_tokenSet_2.member(LA(1)))) {
matchNot('*');
}
else {
break _loop239;
}
} while (true);
}
match("*/");
_ttype = Token.SKIP;
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = LPAREN;
int _saveIndex;
match('(');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = RPAREN;
int _saveIndex;
match(')');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ASSIGN;
int _saveIndex;
match('=');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = STRING;
int _saveIndex;
match('"');
{
_loop245:
do {
if ((LA(1)=='\\')) {
mESC(false);
}
else if ((_tokenSet_3.member(LA(1)))) {
matchNot('"');
}
else {
break _loop245;
}
} while (true);
}
match('"');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ESC;
int _saveIndex;
match('\\');
{
switch ( LA(1)) {
case 'n':
{
match('n');
break;
}
case 'r':
{
match('r');
break;
}
case 't':
{
match('t');
break;
}
case 'b':
{
match('b');
break;
}
case 'f':
{
match('f');
break;
}
case '"':
{
match('"');
break;
}
case '\'':
{
match('\'');
break;
}
case '\\':
{
match('\\');
break;
}
case '0': case '1': case '2': case '3':
{
{
matchRange('0','3');
}
{
if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
mDIGIT(false);
{
if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
mDIGIT(false);
}
else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
}
else {
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
}
else {
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
break;
}
case '4': case '5': case '6': case '7':
{
{
matchRange('4','7');
}
{
if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
mDIGIT(false);
}
else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
}
else {
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
break;
}
case 'u':
{
match('u');
mXDIGIT(false);
mXDIGIT(false);
mXDIGIT(false);
mXDIGIT(false);
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = DIGIT;
int _saveIndex;
matchRange('0','9');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = XDIGIT;
int _saveIndex;
switch ( LA(1)) {
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
{
matchRange('0','9');
break;
}
case 'a': case 'b': case 'c': case 'd':
case 'e': case 'f':
{
matchRange('a','f');
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F':
{
matchRange('A','F');
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ID;
int _saveIndex;
{
switch ( LA(1)) {
case 'a': case 'b': case 'c': case 'd':
case 'e': case 'f': case 'g': case 'h':
case 'i': case 'j': case 'k': case 'l':
case 'm': case 'n': case 'o': case 'p':
case 'q': case 'r': case 's': case 't':
case 'u': case 'v': case 'w': case 'x':
case 'y': case 'z':
{
matchRange('a','z');
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F': case 'G': case 'H':
case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P':
case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z':
{
matchRange('A','Z');
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
{
_loop258:
do {
switch ( LA(1)) {
case 'a': case 'b': case 'c': case 'd':
case 'e': case 'f': case 'g': case 'h':
case 'i': case 'j': case 'k': case 'l':
case 'm': case 'n': case 'o': case 'p':
case 'q': case 'r': case 's': case 't':
case 'u': case 'v': case 'w': case 'x':
case 'y': case 'z':
{
matchRange('a','z');
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F': case 'G': case 'H':
case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P':
case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z':
{
matchRange('A','Z');
break;
}
case '_':
{
match('_');
break;
}
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
{
matchRange('0','9');
break;
}
default:
{
break _loop258;
}
}
} while (true);
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = INT;
int _saveIndex;
{
int _cnt261=0;
_loop261:
do {
if (((LA(1) >= '0' && LA(1) <= '9'))) {
mDIGIT(false);
}
else {
if ( _cnt261>=1 ) { break _loop261; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
}
_cnt261++;
} while (true);
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
private static final long[] mk_tokenSet_0() {
long[] data = new long[8];
data[0]=-9224L;
for (int i = 1; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
private static final long[] mk_tokenSet_1() {
long[] data = new long[8];
data[0]=-140737488355336L;
for (int i = 1; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
private static final long[] mk_tokenSet_2() {
long[] data = new long[8];
data[0]=-4398046512136L;
for (int i = 1; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
private static final long[] mk_tokenSet_3() {
long[] data = new long[8];
data[0]=-17179869192L;
data[1]=-268435457L;
for (int i = 2; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
}

241
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParser.java

@ -0,0 +1,241 @@
// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
package com.fr.third.antlr;
import com.fr.third.antlr.collections.impl.BitSet;
/** Simple lexer/parser for reading token definition files
in support of the import/export vocab option for grammars.
*/
public class ANTLRTokdefParser extends com.fr.third.antlr.LLkParser implements ANTLRTokdefParserTokenTypes
{
// This chunk of error reporting code provided by Brian Smith
private com.fr.third.antlr.Tool antlrTool;
/** In order to make it so existing subclasses don't break, we won't require
* that the antlr.Tool instance be passed as a constructor element. Instead,
* the antlr.Tool instance should register itself via {@link #initTool(com.fr.third.antlr.Tool)}
* @throws IllegalStateException if a tool has already been registered
* @since 2.7.2
*/
public void setTool(com.fr.third.antlr.Tool tool) {
if (antlrTool == null) {
antlrTool = tool;
}
else {
throw new IllegalStateException("antlr.Tool already registered");
}
}
/** @since 2.7.2 */
protected com.fr.third.antlr.Tool getTool() {
return antlrTool;
}
/** Delegates the error message to the tool if any was registered via
* {@link #initTool(com.fr.third.antlr.Tool)}
* @since 2.7.2
*/
public void reportError(String s) {
if (getTool() != null) {
getTool().error(s, getFilename(), -1, -1);
}
else {
super.reportError(s);
}
}
/** Delegates the error message to the tool if any was registered via
* {@link #initTool(com.fr.third.antlr.Tool)}
* @since 2.7.2
*/
public void reportError(RecognitionException e) {
if (getTool() != null) {
getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn());
}
else {
super.reportError(e);
}
}
/** Delegates the warning message to the tool if any was registered via
* {@link #initTool(com.fr.third.antlr.Tool)}
* @since 2.7.2
*/
public void reportWarning(String s) {
if (getTool() != null) {
getTool().warning(s, getFilename(), -1, -1);
}
else {
super.reportWarning(s);
}
}
protected ANTLRTokdefParser(TokenBuffer tokenBuf, int k) {
super(tokenBuf,k);
tokenNames = _tokenNames;
}
public ANTLRTokdefParser(TokenBuffer tokenBuf) {
this(tokenBuf,3);
}
protected ANTLRTokdefParser(TokenStream lexer, int k) {
super(lexer,k);
tokenNames = _tokenNames;
}
public ANTLRTokdefParser(TokenStream lexer) {
this(lexer,3);
}
public ANTLRTokdefParser(ParserSharedInputState state) {
super(state,3);
tokenNames = _tokenNames;
}
public final void file(
ImportVocabTokenManager tm
) throws RecognitionException, TokenStreamException {
Token name = null;
try { // for error handling
name = LT(1);
match(ID);
{
_loop225:
do {
if ((LA(1)==ID||LA(1)==STRING)) {
line(tm);
}
else {
break _loop225;
}
} while (true);
}
}
catch (RecognitionException ex) {
reportError(ex);
consume();
consumeUntil(_tokenSet_0);
}
}
public final void line(
ImportVocabTokenManager tm
) throws RecognitionException, TokenStreamException {
Token s1 = null;
Token lab = null;
Token s2 = null;
Token id = null;
Token para = null;
Token id2 = null;
Token i = null;
Token t=null; Token s=null;
try { // for error handling
{
if ((LA(1)==STRING)) {
s1 = LT(1);
match(STRING);
s = s1;
}
else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==STRING)) {
lab = LT(1);
match(ID);
t = lab;
match(ASSIGN);
s2 = LT(1);
match(STRING);
s = s2;
}
else if ((LA(1)==ID) && (LA(2)==LPAREN)) {
id = LT(1);
match(ID);
t=id;
match(LPAREN);
para = LT(1);
match(STRING);
match(RPAREN);
}
else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==INT)) {
id2 = LT(1);
match(ID);
t=id2;
}
else {
throw new NoViableAltException(LT(1), getFilename());
}
}
match(ASSIGN);
i = LT(1);
match(INT);
Integer value = Integer.valueOf(i.getText());
// if literal found, define as a string literal
if ( s!=null ) {
tm.define(s.getText(), value.intValue());
// if label, then label the string and map label to token symbol also
if ( t!=null ) {
StringLiteralSymbol sl =
(StringLiteralSymbol) tm.getTokenSymbol(s.getText());
sl.setLabel(t.getText());
tm.mapToTokenSymbol(t.getText(), sl);
}
}
// define token (not a literal)
else if ( t!=null ) {
tm.define(t.getText(), value.intValue());
if ( para!=null ) {
TokenSymbol ts = tm.getTokenSymbol(t.getText());
ts.setParaphrase(
para.getText()
);
}
}
}
catch (RecognitionException ex) {
reportError(ex);
consume();
consumeUntil(_tokenSet_1);
}
}
public static final String[] _tokenNames = {
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"ID",
"STRING",
"ASSIGN",
"LPAREN",
"RPAREN",
"INT",
"WS",
"SL_COMMENT",
"ML_COMMENT",
"ESC",
"DIGIT",
"XDIGIT"
};
private static final long[] mk_tokenSet_0() {
long[] data = { 2L, 0L};
return data;
}
public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
private static final long[] mk_tokenSet_1() {
long[] data = { 50L, 0L};
return data;
}
public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
}

18
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParserTokenTypes.java

@ -0,0 +1,18 @@
// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
package com.fr.third.antlr;
public interface ANTLRTokdefParserTokenTypes {
int EOF = 1;
int NULL_TREE_LOOKAHEAD = 3;
int ID = 4;
int STRING = 5;
int ASSIGN = 6;
int LPAREN = 7;
int RPAREN = 8;
int INT = 9;
int WS = 10;
int SL_COMMENT = 11;
int ML_COMMENT = 12;
int ESC = 13;
int DIGIT = 14;
int XDIGIT = 15;
}

68
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokenTypes.java

@ -0,0 +1,68 @@
// $ANTLR 2.7.3rc3: "antlr.g" -> "ANTLRLexer.java"$
package com.fr.third.antlr;
public interface ANTLRTokenTypes {
int EOF = 1;
int NULL_TREE_LOOKAHEAD = 3;
int LITERAL_tokens = 4;
int LITERAL_header = 5;
int STRING_LITERAL = 6;
int ACTION = 7;
int DOC_COMMENT = 8;
int LITERAL_lexclass = 9;
int LITERAL_class = 10;
int LITERAL_extends = 11;
int LITERAL_Lexer = 12;
int LITERAL_TreeParser = 13;
int OPTIONS = 14;
int ASSIGN = 15;
int SEMI = 16;
int RCURLY = 17;
int LITERAL_charVocabulary = 18;
int CHAR_LITERAL = 19;
int INT = 20;
int OR = 21;
int RANGE = 22;
int TOKENS = 23;
int TOKEN_REF = 24;
int OPEN_ELEMENT_OPTION = 25;
int CLOSE_ELEMENT_OPTION = 26;
int LPAREN = 27;
int RPAREN = 28;
int LITERAL_Parser = 29;
int LITERAL_protected = 30;
int LITERAL_public = 31;
int LITERAL_private = 32;
int BANG = 33;
int ARG_ACTION = 34;
int LITERAL_returns = 35;
int COLON = 36;
int LITERAL_throws = 37;
int COMMA = 38;
int LITERAL_exception = 39;
int LITERAL_catch = 40;
int RULE_REF = 41;
int NOT_OP = 42;
int SEMPRED = 43;
int TREE_BEGIN = 44;
int QUESTION = 45;
int STAR = 46;
int PLUS = 47;
int IMPLIES = 48;
int CARET = 49;
int WILDCARD = 50;
int LITERAL_options = 51;
int WS = 52;
int COMMENT = 53;
int SL_COMMENT = 54;
int ML_COMMENT = 55;
int ESC = 56;
int DIGIT = 57;
int XDIGIT = 58;
int NESTED_ARG_ACTION = 59;
int NESTED_ACTION = 60;
int WS_LOOP = 61;
int INTERNAL_RULE_REF = 62;
int WS_OPT = 63;
}

411
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTFactory.java

@ -0,0 +1,411 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTFactory.java#2 $
*/
import java.lang.reflect.Constructor;
import java.util.Hashtable;
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.impl.ASTArray;
/** AST Support code shared by TreeParser and Parser.
* We use delegation to share code (and have only one
* bit of code to maintain) rather than subclassing
* or superclassing (forces AST support code to be
* loaded even when you don't want to do AST stuff).
*
* Typically, setASTNodeType is used to specify the
* homogeneous type of node to create, but you can override
* create to make heterogeneous nodes etc...
*/
public class ASTFactory {
/** Name of AST class to create during tree construction.
* Null implies that the create method should create
* a default AST type such as CommonAST. This is for
* homogeneous nodes.
*/
protected String theASTNodeType = null;
protected Class theASTNodeTypeClass = null;
/** How to specify the classname to create for a particular
* token type. Note that ANTLR allows you to say, for example,
*
tokens {
PLUS<AST=PLUSNode>;
...
}
*
* and it tracks everything statically. #[PLUS] will make you
* a PLUSNode w/o use of this table.
*
* For tokens that ANTLR cannot track statically like #[i],
* you can use this table to map PLUS (Integer) -> PLUSNode (Class)
* etc... ANTLR sets the class map from the tokens {...} section
* via the ASTFactory(Hashtable) ctor in antlr.Parser.
*/
protected Hashtable tokenTypeToASTClassMap = null;
public ASTFactory() {
}
/** Create factory with a specific mapping from token type
* to Java AST node type. Your subclasses of ASTFactory
* can override and reuse the map stuff.
*/
public ASTFactory(Hashtable tokenTypeToClassMap) {
setTokenTypeToASTClassMap(tokenTypeToClassMap);
}
/** Specify an "override" for the Java AST object created for a
* specific token. It is provided as a convenience so
* you can specify node types dynamically. ANTLR sets
* the token type mapping automatically from the tokens{...}
* section, but you can change that mapping with this method.
* ANTLR does it's best to statically determine the node
* type for generating parsers, but it cannot deal with
* dynamic values like #[LT(1)]. In this case, it relies
* on the mapping. Beware differences in the tokens{...}
* section and what you set via this method. Make sure
* they are the same.
*
* Set className to null to remove the mapping.
*
* @since 2.7.2
*/
public void setTokenTypeASTNodeType(int tokenType, String className)
throws IllegalArgumentException
{
if ( tokenTypeToASTClassMap==null ) {
tokenTypeToASTClassMap = new Hashtable();
}
if ( className==null ) {
tokenTypeToASTClassMap.remove(new Integer(tokenType));
return;
}
Class c = null;
try {
c = Utils.loadClass(className);
tokenTypeToASTClassMap.put(new Integer(tokenType), c);
}
catch (Exception e) {
throw new IllegalArgumentException("Invalid class, "+className);
}
}
/** For a given token type, what is the AST node object type to create
* for it?
* @since 2.7.2
*/
public Class getASTNodeType(int tokenType) {
// try node specific class
if ( tokenTypeToASTClassMap!=null ) {
Class c = (Class)tokenTypeToASTClassMap.get(new Integer(tokenType));
if ( c!=null ) {
return c;
}
}
// try a global specified class
if (theASTNodeTypeClass != null) {
return theASTNodeTypeClass;
}
// default to the common type
return CommonAST.class;
}
/** Add a child to the current AST */
public void addASTChild(ASTPair currentAST, AST child) {
if (child != null) {
if (currentAST.root == null) {
// Make new child the current root
currentAST.root = child;
}
else {
if (currentAST.child == null) {
// Add new child to current root
currentAST.root.setFirstChild(child);
}
else {
currentAST.child.setNextSibling(child);
}
}
// Make new child the current child
currentAST.child = child;
currentAST.advanceChildToEnd();
}
}
/** Create a new empty AST node; if the user did not specify
* an AST node type, then create a default one: CommonAST.
*/
public AST create() {
return create(Token.INVALID_TYPE);
}
public AST create(int type) {
Class c = getASTNodeType(type);
AST t = create(c);
if ( t!=null ) {
t.initialize(type, "");
}
return t;
}
public AST create(int type, String txt) {
AST t = create(type);
if ( t!=null ) {
t.initialize(type, txt);
}
return t;
}
/** Create an AST node with the token type and text passed in, but
* with a specific Java object type. Typically called when you
* say @[PLUS,"+",PLUSNode] in an antlr action.
* @since 2.7.2
*/
public AST create(int type, String txt, String className) {
AST t = create(className);
if ( t!=null ) {
t.initialize(type, txt);
}
return t;
}
/** Create a new empty AST node; if the user did not specify
* an AST node type, then create a default one: CommonAST.
*/
public AST create(AST tr) {
if (tr == null) return null; // create(null) == null
AST t = create(tr.getType());
if ( t!=null ) {
t.initialize(tr);
}
return t;
}
public AST create(Token tok) {
AST t = create(tok.getType());
if ( t!=null ) {
t.initialize(tok);
}
return t;
}
/** ANTLR generates reference to this when you reference a token
* that has a specified heterogeneous AST node type. This is
* also a special case node creation routine for backward
* compatibility. Before, ANTLR generated "new T(tokenObject)"
* and so I must call the appropriate constructor not T().
*
* @since 2.7.2
*/
public AST create(Token tok, String className) {
AST t = createUsingCtor(tok,className);
return t;
}
/**
* @since 2.7.2
*/
public AST create(String className) {
Class c = null;
try {
c = Utils.loadClass(className);
}
catch (Exception e) {
throw new IllegalArgumentException("Invalid class, "+className);
}
return create(c);
}
/**
* @since 2.7.2
*/
protected AST createUsingCtor(Token token, String className) {
Class c = null;
AST t = null;
try {
c = Utils.loadClass(className);
Class[] tokenArgType = new Class[] { com.fr.third.antlr.Token.class };
try {
Constructor ctor = c.getConstructor(tokenArgType);
t = (AST)ctor.newInstance(new Object[]{token}); // make a new one
}
catch (NoSuchMethodException e){
// just do the regular thing if you can't find the ctor
// Your AST must have default ctor to use this.
t = create(c);
if ( t!=null ) {
t.initialize(token);
}
}
}
catch (Exception e) {
throw new IllegalArgumentException("Invalid class or can't make instance, "+className);
}
return t;
}
/**
* @since 2.7.2
*/
protected AST create(Class c) {
AST t = null;
try {
t = (AST)c.newInstance(); // make a new one
}
catch (Exception e) {
error("Can't create AST Node " + c.getName());
return null;
}
return t;
}
/** Copy a single node with same Java AST objec type.
* Ignore the tokenType->Class mapping since you know
* the type of the node, t.getClass(), and doing a dup.
*
* clone() is not used because we want all AST creation
* to go thru the factory so creation can be
* tracked. Returns null if t is null.
*/
public AST dup(AST t) {
if ( t==null ) {
return null;
}
AST dup_t = create(t.getClass());
dup_t.initialize(t);
return dup_t;
}
/** Duplicate tree including siblings of root. */
public AST dupList(AST t) {
AST result = dupTree(t); // if t == null, then result==null
AST nt = result;
while (t != null) { // for each sibling of the root
t = t.getNextSibling();
nt.setNextSibling(dupTree(t)); // dup each subtree, building new tree
nt = nt.getNextSibling();
}
return result;
}
/**Duplicate a tree, assuming this is a root node of a tree--
* duplicate that node and what's below; ignore siblings of root node.
*/
public AST dupTree(AST t) {
AST result = dup(t); // make copy of root
// copy all children of root.
if (t != null) {
result.setFirstChild(dupList(t.getFirstChild()));
}
return result;
}
/** Make a tree from a list of nodes. The first element in the
* array is the root. If the root is null, then the tree is
* a simple list not a tree. Handles null children nodes correctly.
* For example, build(a, b, null, c) yields tree (a b c). build(null,a,b)
* yields tree (nil a b).
*/
public AST make(AST[] nodes) {
if (nodes == null || nodes.length == 0) return null;
AST root = nodes[0];
AST tail = null;
if (root != null) {
root.setFirstChild(null); // don't leave any old pointers set
}
// link in children;
for (int i = 1; i < nodes.length; i++) {
if (nodes[i] == null) continue; // ignore null nodes
if (root == null) {
// Set the root and set it up for a flat list
root = tail = nodes[i];
}
else if (tail == null) {
root.setFirstChild(nodes[i]);
tail = root.getFirstChild();
}
else {
tail.setNextSibling(nodes[i]);
tail = tail.getNextSibling();
}
// Chase tail to last sibling
while (tail.getNextSibling() != null) {
tail = tail.getNextSibling();
}
}
return root;
}
/** Make a tree from a list of nodes, where the nodes are contained
* in an ASTArray object
*/
public AST make(ASTArray nodes) {
return make(nodes.array);
}
/** Make an AST the root of current AST */
public void makeASTRoot(ASTPair currentAST, AST root) {
if (root != null) {
// Add the current root as a child of new root
root.addChild(currentAST.root);
// The new current child is the last sibling of the old root
currentAST.child = currentAST.root;
currentAST.advanceChildToEnd();
// Set the new root
currentAST.root = root;
}
}
public void setASTNodeClass(Class c) {
if ( c!=null ) {
theASTNodeTypeClass = c;
theASTNodeType = c.getName();
}
}
public void setASTNodeClass(String t) {
theASTNodeType = t;
try {
theASTNodeTypeClass = Utils.loadClass(t); // get class def
}
catch (Exception e) {
// either class not found,
// class is interface/abstract, or
// class or initializer is not accessible.
error("Can't find/access AST Node type" + t);
}
}
/** Specify the type of node to create during tree building.
* @deprecated since 2.7.1
*/
public void setASTNodeType(String t) {
setASTNodeClass(t);
}
public Hashtable getTokenTypeToASTClassMap() {
return tokenTypeToASTClassMap;
}
public void setTokenTypeToASTClassMap(Hashtable tokenTypeToClassMap) {
this.tokenTypeToASTClassMap = tokenTypeToClassMap;
}
/** To change where error messages go, can subclass/override this method
* and then setASTFactory in Parser and TreeParser. This method removes
* a prior dependency on class antlr.Tool.
*/
public void error(String e) {
System.err.println(e);
}
}

76
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTIterator.java

@ -0,0 +1,76 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTIterator.java#2 $
*/
import com.fr.third.antlr.collections.AST;
public class ASTIterator {
protected AST cursor = null;
protected AST original = null;
public ASTIterator(AST t) {
original = cursor = t;
}
/** Is 'sub' a subtree of 't' beginning at the root? */
public boolean isSubtree(AST t, AST sub) {
AST sibling;
// the empty tree is always a subset of any tree.
if (sub == null) {
return true;
}
// if the tree is empty, return true if the subtree template is too.
if (t == null) {
// if (sub != null) return false;
// return true;
return false;
}
// Otherwise, start walking sibling lists. First mismatch, return false.
for (sibling = t;
sibling != null && sub != null;
sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) {
// as a quick optimization, check roots first.
if (sibling.getType() != sub.getType()) return false;
// if roots match, do full match test on children.
if (sibling.getFirstChild() != null) {
if (!isSubtree(sibling.getFirstChild(), sub.getFirstChild())) return false;
}
}
return true;
}
/** Find the next subtree with structure and token types equal to
* those of 'template'.
*/
public AST next(AST template) {
AST t = null;
AST sibling = null;
if (cursor == null) { // do nothing if no tree to work on
return null;
}
// Start walking sibling list looking for subtree matches.
for (; cursor != null; cursor = cursor.getNextSibling()) {
// as a quick optimization, check roots first.
if (cursor.getType() == template.getType()) {
// if roots match, do full match test on children.
if (cursor.getFirstChild() != null) {
if (isSubtree(cursor.getFirstChild(), template.getFirstChild())) {
return cursor;
}
}
}
}
return t;
}
}

108
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTNULLType.java

@ -0,0 +1,108 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTNULLType.java#2 $
*/
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.ASTEnumeration;
/** There is only one instance of this class **/
public class ASTNULLType implements AST {
public void addChild(AST c) {
}
public boolean equals(AST t) {
return false;
}
public boolean equalsList(AST t) {
return false;
}
public boolean equalsListPartial(AST t) {
return false;
}
public boolean equalsTree(AST t) {
return false;
}
public boolean equalsTreePartial(AST t) {
return false;
}
public ASTEnumeration findAll(AST tree) {
return null;
}
public ASTEnumeration findAllPartial(AST subtree) {
return null;
}
public AST getFirstChild() {
return this;
}
public AST getNextSibling() {
return this;
}
public String getText() {
return "<ASTNULL>";
}
public int getType() {
return Token.NULL_TREE_LOOKAHEAD;
}
public int getLine() {
return 0;
}
public int getColumn() {
return 0;
}
public int getNumberOfChildren() {
return 0;
}
public void initialize(int t, String txt) {
}
public void initialize(AST t) {
}
public void initialize(Token t) {
}
public void setFirstChild(AST c) {
}
public void setNextSibling(AST n) {
}
public void setText(String text) {
}
public void setType(int ttype) {
}
public String toString() {
return getText();
}
public String toStringList() {
return getText();
}
public String toStringTree() {
return getText();
}
}

43
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTPair.java

@ -0,0 +1,43 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTPair.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** ASTPair: utility class used for manipulating a pair of ASTs
* representing the current AST root and current AST sibling.
* This exists to compensate for the lack of pointers or 'var'
* arguments in Java.
*/
public class ASTPair {
public AST root; // current root of tree
public AST child; // current child to which siblings are added
/** Make sure that child is the last sibling */
public final void advanceChildToEnd() {
if (child != null) {
while (child.getNextSibling() != null) {
child = child.getNextSibling();
}
}
}
/** Copy an ASTPair. Don't call it clone() because we want type-safety */
public ASTPair copy() {
ASTPair tmp = new ASTPair();
tmp.root = root;
tmp.child = child;
return tmp;
}
public String toString() {
String r = root == null ? "null" : root.getText();
String c = child == null ? "null" : child.getText();
return "[" + r + "," + c + "]";
}
}

14
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTVisitor.java

@ -0,0 +1,14 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTVisitor.java#2 $
*/
import com.fr.third.antlr.collections.AST;
public interface ASTVisitor {
public void visit(AST node);
}

35
fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/ASDebugStream.java

@ -0,0 +1,35 @@
package com.fr.third.antlr.ASdebug;
import com.fr.third.antlr.Token;
import com.fr.third.antlr.TokenStream;
/**
* Default implementation of <code>IASDebugStream</code> methods.
* @author Prashant Deva
*/
public final class ASDebugStream
{
public static String getEntireText(TokenStream stream)
{
if (stream instanceof IASDebugStream)
{
IASDebugStream dbgStream = (IASDebugStream) stream;
return dbgStream.getEntireText();
}
return null;
}
public static TokenOffsetInfo getOffsetInfo(TokenStream stream, Token token)
{
if (stream instanceof IASDebugStream)
{
IASDebugStream dbgStream = (IASDebugStream) stream;
return dbgStream.getOffsetInfo(token);
}
return null;
}
}

24
fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/IASDebugStream.java

@ -0,0 +1,24 @@
package com.fr.third.antlr.ASdebug;
import com.fr.third.antlr.Token;
/**
* Provides information used by the 'Input Text' view
* of Antlr Studio.
* @author Prashant Deva
*/
public interface IASDebugStream
{
/**
* Returns the entire text input to the lexer.
* @return The entire text or <code>null</code>, if error occured or System.in was used.
*/
String getEntireText();
/**
* Returns the offset information for the token
* @param token the token whose information need to be retrieved
* @return offset info, or <code>null</code>
*/
TokenOffsetInfo getOffsetInfo(Token token);
}

22
fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/TokenOffsetInfo.java

@ -0,0 +1,22 @@
package com.fr.third.antlr.ASdebug;
/**
* Provides offset info for a token.<br>
* All offsets are 0-based.
* @author Prashant Deva
*/
public class TokenOffsetInfo
{
public final int beginOffset, length;
public TokenOffsetInfo(int offset, int length)
{
this.beginOffset = offset;
this.length = length;
}
public int getEndOffset()
{
return beginOffset+length-1;
}
}

33
fine-antlr-old/src/main/java/com/fr/third/antlr/ActionElement.java

@ -0,0 +1,33 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ActionElement.java#2 $
*/
class ActionElement extends AlternativeElement {
protected String actionText;
protected boolean isSemPred = false;
public ActionElement(Grammar g, Token t) {
super(g);
actionText = t.getText();
line = t.getLine();
column = t.getColumn();
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
return " " + actionText + (isSemPred?"?":"");
}
}

23
fine-antlr-old/src/main/java/com/fr/third/antlr/ActionTransInfo.java

@ -0,0 +1,23 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ActionTransInfo.java#2 $
*/
/**
* This class contains information about how an action
* was translated (using the AST conversion rules).
*/
public class ActionTransInfo {
public boolean assignToRoot = false; // somebody did a "#rule = "
public String refRuleRoot = null; // somebody referenced #rule; string is translated var
public String followSetName = null; // somebody referenced $FOLLOW; string is the name of the lookahead set
public String toString() {
return "assignToRoot:" + assignToRoot + ", refRuleRoot:"
+ refRuleRoot + ", FOLLOW Set:" + followSetName;
}
}

73
fine-antlr-old/src/main/java/com/fr/third/antlr/Alternative.java

@ -0,0 +1,73 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Alternative.java#2 $
*/
/** Intermediate data class holds information about an alternative */
class Alternative {
// Tracking alternative linked list
AlternativeElement head; // head of alt element list
AlternativeElement tail; // last element added
// Syntactic predicate block if non-null
protected SynPredBlock synPred;
// Semantic predicate action if non-null
protected String semPred;
// Exception specification if non-null
protected ExceptionSpec exceptionSpec;
// Init action if non-null;
protected Lookahead[] cache; // lookahead for alt. Filled in by
// deterministic() only!!!!!!! Used for
// code gen after calls to deterministic()
// and used by deterministic for (...)*, (..)+,
// and (..)? blocks. 1..k
protected int lookaheadDepth; // each alt has different look depth possibly.
// depth can be NONDETERMINISTIC too.
// 0..n-1
// If non-null, Tree specification ala -> A B C (not implemented)
protected Token treeSpecifier = null;
// True of AST generation is on for this alt
private boolean doAutoGen;
public Alternative() {
}
public Alternative(AlternativeElement firstElement) {
addElement(firstElement);
}
public void addElement(AlternativeElement e) {
// Link the element into the list
if (head == null) {
head = tail = e;
}
else {
tail.next = e;
tail = e;
}
}
public boolean atStart() {
return head == null;
}
public boolean getAutoGen() {
// Don't build an AST if there is a tree-rewrite-specifier
return doAutoGen && treeSpecifier == null;
}
public Token getTreeSpecifier() {
return treeSpecifier;
}
public void setAutoGen(boolean doAutoGen_) {
doAutoGen = doAutoGen_;
}
}

226
fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeBlock.java

@ -0,0 +1,226 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/AlternativeBlock.java#2 $
*/
import com.fr.third.antlr.collections.impl.Vector;
/**A list of alternatives */
class AlternativeBlock extends AlternativeElement {
protected String initAction = null; // string for init action {...}
protected Vector alternatives; // Contains Alternatives
protected String label; // can label a looping block to break out of it.
protected int alti, altj; // which alts are being compared at the moment with
// deterministic()?
protected int analysisAlt; // which alt are we computing look on? Must be alti or altj
protected boolean hasAnAction = false; // does any alt have an action?
protected boolean hasASynPred = false; // does any alt have a syntactic predicate?
protected int ID = 0; // used to generate unique variables
protected static int nblks; // how many blocks have we allocated?
boolean not = false; // true if block is inverted.
boolean greedy = true; // Blocks are greedy by default
boolean greedySet = false; // but, if not explicitly greedy, warning might be generated
protected boolean doAutoGen = true; // false if no AST (or text) to be generated for block
protected boolean warnWhenFollowAmbig = true; // warn when an empty path or exit path
protected boolean generateAmbigWarnings = true; // the general warning "shut-up" mechanism
// conflicts with alt of subrule.
// Turning this off will suppress stuff
// like the if-then-else ambig.
public AlternativeBlock(Grammar g) {
super(g);
alternatives = new Vector(5);
this.not = false;
nblks++;
ID = nblks;
}
public AlternativeBlock(Grammar g, Token start, boolean not) {
super(g, start);
alternatives = new Vector(5);
// this.line = start.getLine();
// this.column = start.getColumn();
this.not = not;
nblks++;
ID = nblks;
}
public void addAlternative(Alternative alt) {
alternatives.appendElement(alt);
}
public void generate() {
grammar.generator.gen(this);
}
public Alternative getAlternativeAt(int i) {
return (Alternative)alternatives.elementAt(i);
}
public Vector getAlternatives() {
return alternatives;
}
public boolean getAutoGen() {
return doAutoGen;
}
public String getInitAction() {
return initAction;
}
public String getLabel() {
return label;
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void prepareForAnalysis() {
for (int i = 0; i < alternatives.size(); i++) {
// deterministic() uses an alternative cache and sets lookahead depth
Alternative a = (Alternative)alternatives.elementAt(i);
a.cache = new Lookahead[grammar.maxk + 1];
a.lookaheadDepth = GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT;
}
}
/**Walk the syntactic predicate and, for a rule ref R, remove
* the ref from the list of FOLLOW references for R (stored
* in the symbol table.
*/
public void removeTrackingOfRuleRefs(Grammar g) {
for (int i = 0; i < alternatives.size(); i++) {
Alternative alt = getAlternativeAt(i);
AlternativeElement elem = alt.head;
while (elem != null) {
if (elem instanceof RuleRefElement) {
RuleRefElement rr = (RuleRefElement)elem;
RuleSymbol rs = (RuleSymbol)g.getSymbol(rr.targetRule);
if (rs == null) {
grammar.antlrTool.error("rule " + rr.targetRule + " referenced in (...)=>, but not defined");
}
else {
rs.references.removeElement(rr);
}
}
else if (elem instanceof AlternativeBlock) {// recurse into subrules
((AlternativeBlock)elem).removeTrackingOfRuleRefs(g);
}
elem = elem.next;
}
}
}
public void setAlternatives(Vector v) {
alternatives = v;
}
public void setAutoGen(boolean doAutoGen_) {
doAutoGen = doAutoGen_;
}
public void setInitAction(String initAction_) {
initAction = initAction_;
}
public void setLabel(String label_) {
label = label_;
}
public void setOption(Token key, Token value) {
if (key.getText().equals("warnWhenFollowAmbig")) {
if (value.getText().equals("true")) {
warnWhenFollowAmbig = true;
}
else if (value.getText().equals("false")) {
warnWhenFollowAmbig = false;
}
else {
grammar.antlrTool.error("Value for warnWhenFollowAmbig must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else if (key.getText().equals("generateAmbigWarnings")) {
if (value.getText().equals("true")) {
generateAmbigWarnings = true;
}
else if (value.getText().equals("false")) {
generateAmbigWarnings = false;
}
else {
grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else if (key.getText().equals("greedy")) {
if (value.getText().equals("true")) {
greedy = true;
greedySet = true;
}
else if (value.getText().equals("false")) {
greedy = false;
greedySet = true;
}
else {
grammar.antlrTool.error("Value for greedy must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else {
grammar.antlrTool.error("Invalid subrule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn());
}
}
public String toString() {
String s = " (";
if (initAction != null) {
s += initAction;
}
for (int i = 0; i < alternatives.size(); i++) {
Alternative alt = getAlternativeAt(i);
Lookahead cache[] = alt.cache;
int k = alt.lookaheadDepth;
// dump lookahead set
if (k == GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT) {
}
else if (k == GrammarAnalyzer.NONDETERMINISTIC) {
s += "{?}:";
}
else {
s += " {";
for (int j = 1; j <= k; j++) {
s += cache[j].toString(",", grammar.tokenManager.getVocabulary());
if (j < k && cache[j + 1] != null) s += ";";
}
s += "}:";
}
// dump alternative including pred (if any)
AlternativeElement p = alt.head;
String pred = alt.semPred;
if (pred != null) {
s += pred;
}
while (p != null) {
s += p;
p = p.next;
}
if (i < (alternatives.size() - 1)) {
s += " |";
}
}
s += " )";
return s;
}
}

43
fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeElement.java

@ -0,0 +1,43 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/AlternativeElement.java#2 $
*/
abstract class AlternativeElement extends GrammarElement {
AlternativeElement next;
protected int autoGenType = AUTO_GEN_NONE;
protected String enclosingRuleName;
public AlternativeElement(Grammar g) {
super(g);
}
public AlternativeElement(Grammar g, Token start) {
super(g, start);
}
public AlternativeElement(Grammar g, Token start, int autoGenType_) {
super(g, start);
autoGenType = autoGenType_;
}
public int getAutoGenType() {
return autoGenType;
}
public void setAutoGenType(int a) {
autoGenType = a;
}
public String getLabel() {
return null;
}
public void setLabel(String label) {
}
}

495
fine-antlr-old/src/main/java/com/fr/third/antlr/BaseAST.java

@ -0,0 +1,495 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BaseAST.java#2 $
*/
import java.io.Serializable;
import java.io.IOException;
import java.io.Writer;
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.ASTEnumeration;
import com.fr.third.antlr.collections.impl.ASTEnumerator;
import com.fr.third.antlr.collections.impl.Vector;
/**
* A Child-Sibling Tree.
*
* A tree with PLUS at the root and with two children 3 and 4 is
* structured as:
*
* PLUS
* |
* 3 -- 4
*
* and can be specified easily in LISP notation as
*
* (PLUS 3 4)
*
* where every '(' starts a new subtree.
*
* These trees are particular useful for translators because of
* the flexibility of the children lists. They are also very easy
* to walk automatically, whereas trees with specific children
* reference fields can't easily be walked automatically.
*
* This class contains the basic support for an AST.
* Most people will create ASTs that are subclasses of
* BaseAST or of CommonAST.
*/
public abstract class BaseAST implements AST, Serializable {
protected BaseAST down;
protected BaseAST right;
private static boolean verboseStringConversion = false;
private static String[] tokenNames = null;
/**Add a node to the end of the child list for this node */
public void addChild(AST node) {
if (node == null) return;
BaseAST t = this.down;
if (t != null) {
while (t.right != null) {
t = t.right;
}
t.right = (BaseAST)node;
}
else {
this.down = (BaseAST)node;
}
}
/** How many children does this node have? */
public int getNumberOfChildren() {
BaseAST t = this.down;
int n = 0;
if (t != null) {
n = 1;
while (t.right != null) {
t = t.right;
n++;
}
return n;
}
return n;
}
private static void doWorkForFindAll(AST nodeToSearch,
Vector v,
AST target,
boolean partialMatch)
{
// Start walking sibling lists, looking for matches.
for (AST sibling = nodeToSearch; sibling != null; sibling = sibling
.getNextSibling()) {
if ((partialMatch && sibling.equalsTreePartial(target))
|| (!partialMatch && sibling.equalsTree(target))) {
v.appendElement(sibling);
}
// regardless of match or not, check any children for matches
if (sibling.getFirstChild() != null) {
doWorkForFindAll(sibling.getFirstChild(), v, target, partialMatch);
}
}
}
/** Is node t equal to this in terms of token type and text? */
public boolean equals(AST t) {
if (t == null) return false;
if ( (this.getText()==null && t.getText()!=null) ||
(this.getText()!=null && t.getText()==null) )
{
return false;
}
if ( this.getText()==null && t.getText()==null ) {
return this.getType() == t.getType();
}
return this.getText().equals(t.getText()) &&
this.getType() == t.getType();
}
/** Is t an exact structural and equals() match of this tree. The
* 'this' reference is considered the start of a sibling list.
*/
public boolean equalsList(AST t) {
AST sibling;
// the empty tree is not a match of any non-null tree.
if (t == null) {
return false;
}
// Otherwise, start walking sibling lists. First mismatch, return false.
for (sibling = this;
sibling != null && t != null;
sibling = sibling.getNextSibling(), t = t.getNextSibling())
{
// as a quick optimization, check roots first.
if (!sibling.equals(t)) {
return false;
}
// if roots match, do full list match test on children.
if (sibling.getFirstChild() != null) {
if (!sibling.getFirstChild().equalsList(t.getFirstChild())) {
return false;
}
}
// sibling has no kids, make sure t doesn't either
else if (t.getFirstChild() != null) {
return false;
}
}
if (sibling == null && t == null) {
return true;
}
// one sibling list has more than the other
return false;
}
/** Is 'sub' a subtree of this list?
* The siblings of the root are NOT ignored.
*/
public boolean equalsListPartial(AST sub) {
AST sibling;
// the empty tree is always a subset of any tree.
if (sub == null) {
return true;
}
// Otherwise, start walking sibling lists. First mismatch, return false.
for (sibling = this;
sibling != null && sub != null;
sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) {
// as a quick optimization, check roots first.
if (!sibling.equals(sub)) return false;
// if roots match, do partial list match test on children.
if (sibling.getFirstChild() != null) {
if (!sibling.getFirstChild().equalsListPartial(sub.getFirstChild())) return false;
}
}
if (sibling == null && sub != null) {
// nothing left to match in this tree, but subtree has more
return false;
}
// either both are null or sibling has more, but subtree doesn't
return true;
}
/** Is tree rooted at 'this' equal to 't'? The siblings
* of 'this' are ignored.
*/
public boolean equalsTree(AST t) {
// check roots first.
if (!this.equals(t)) return false;
// if roots match, do full list match test on children.
if (this.getFirstChild() != null) {
if (!this.getFirstChild().equalsList(t.getFirstChild())) return false;
}
// sibling has no kids, make sure t doesn't either
else if (t.getFirstChild() != null) {
return false;
}
return true;
}
/** Is 't' a subtree of the tree rooted at 'this'? The siblings
* of 'this' are ignored.
*/
public boolean equalsTreePartial(AST sub) {
// the empty tree is always a subset of any tree.
if (sub == null) {
return true;
}
// check roots first.
if (!this.equals(sub)) return false;
// if roots match, do full list partial match test on children.
if (this.getFirstChild() != null) {
if (!this.getFirstChild().equalsListPartial(sub.getFirstChild())) return false;
}
return true;
}
/** Walk the tree looking for all exact subtree matches. Return
* an ASTEnumerator that lets the caller walk the list
* of subtree roots found herein.
*/
public ASTEnumeration findAll(AST target) {
Vector roots = new Vector(10);
AST sibling;
// the empty tree cannot result in an enumeration
if (target == null) {
return null;
}
doWorkForFindAll(this, roots, target, false); // find all matches recursively
return new ASTEnumerator(roots);
}
/** Walk the tree looking for all subtrees. Return
* an ASTEnumerator that lets the caller walk the list
* of subtree roots found herein.
*/
public ASTEnumeration findAllPartial(AST sub) {
Vector roots = new Vector(10);
AST sibling;
// the empty tree cannot result in an enumeration
if (sub == null) {
return null;
}
doWorkForFindAll(this, roots, sub, true); // find all matches recursively
return new ASTEnumerator(roots);
}
/** Get the first child of this node; null if not children */
public AST getFirstChild() {
return down;
}
/** Get the next sibling in line after this one */
public AST getNextSibling() {
return right;
}
/** Get the token text for this node */
public String getText() {
return "";
}
/** Get the token type for this node */
public int getType() {
return 0;
}
public int getLine() {
return 0;
}
public int getColumn() {
return 0;
}
public abstract void initialize(int t, String txt);
public abstract void initialize(AST t);
public abstract void initialize(Token t);
/** Remove all children */
public void removeChildren() {
down = null;
}
public void setFirstChild(AST c) {
down = (BaseAST)c;
}
public void setNextSibling(AST n) {
right = (BaseAST)n;
}
/** Set the token text for this node */
public void setText(String text) {
}
/** Set the token type for this node */
public void setType(int ttype) {
}
public static void setVerboseStringConversion(boolean verbose, String[] names) {
verboseStringConversion = verbose;
tokenNames = names;
}
/** Return an array of strings that maps token ID to it's text. @since 2.7.3 */
public static String[] getTokenNames() {
return tokenNames;
}
public String toString() {
StringBuffer b = new StringBuffer();
// if verbose and type name not same as text (keyword probably)
if (verboseStringConversion &&
getText() != null &&
!getText().equalsIgnoreCase(tokenNames[getType()]) &&
!getText().equalsIgnoreCase(StringUtils.stripFrontBack(tokenNames[getType()], "\"", "\""))) {
b.append('[');
b.append(getText());
b.append(",<");
b.append(tokenNames[getType()]);
b.append(">]");
return b.toString();
}
return getText();
}
/** Print out a child-sibling tree in LISP notation */
public String toStringList() {
AST t = this;
String ts = "";
if (t.getFirstChild() != null) ts += " (";
ts += " " + this.toString();
if (t.getFirstChild() != null) {
ts += ((BaseAST)t.getFirstChild()).toStringList();
}
if (t.getFirstChild() != null) ts += " )";
if (t.getNextSibling() != null) {
ts += ((BaseAST)t.getNextSibling()).toStringList();
}
return ts;
}
public String toStringTree() {
AST t = this;
String ts = "";
if (t.getFirstChild() != null) ts += " (";
ts += " " + this.toString();
if (t.getFirstChild() != null) {
ts += ((BaseAST)t.getFirstChild()).toStringList();
}
if (t.getFirstChild() != null) ts += " )";
return ts;
}
public static String decode(String text) {
char c, c1, c2, c3, c4, c5;
StringBuffer n = new StringBuffer();
for (int i = 0; i < text.length(); i++) {
c = text.charAt(i);
if (c == '&') {
c1 = text.charAt(i + 1);
c2 = text.charAt(i + 2);
c3 = text.charAt(i + 3);
c4 = text.charAt(i + 4);
c5 = text.charAt(i + 5);
if (c1 == 'a' && c2 == 'm' && c3 == 'p' && c4 == ';') {
n.append('&');
i += 5;
}
else if (c1 == 'l' && c2 == 't' && c3 == ';') {
n.append('<');
i += 4;
}
else if (c1 == 'g' && c2 == 't' && c3 == ';') {
n.append('>');
i += 4;
}
else if (c1 == 'q' && c2 == 'u' && c3 == 'o' &&
c4 == 't' && c5 == ';') {
n.append('"');
i += 6;
}
else if (c1 == 'a' && c2 == 'p' && c3 == 'o' &&
c4 == 's' && c5 == ';') {
n.append('\'');
i += 6;
}
else
n.append('&');
}
else
n.append(c);
}
return new String(n);
}
public static String encode(String text) {
char c;
StringBuffer n = new StringBuffer();
for (int i = 0; i < text.length(); i++) {
c = text.charAt(i);
switch (c) {
case '&':
{
n.append("&amp;");
break;
}
case '<':
{
n.append("&lt;");
break;
}
case '>':
{
n.append("&gt;");
break;
}
case '"':
{
n.append("&quot;");
break;
}
case '\'':
{
n.append("&apos;");
break;
}
default :
{
n.append(c);
break;
}
}
}
return new String(n);
}
public void xmlSerializeNode(Writer out)
throws IOException {
StringBuffer buf = new StringBuffer(100);
buf.append('<');
buf.append(getClass().getName() + " ");
buf.append("text=\"" + encode(getText()) + "\" type=\"" +
getType() + "\"/>");
out.write(buf.toString());
}
public void xmlSerializeRootOpen(Writer out)
throws IOException {
StringBuffer buf = new StringBuffer(100);
buf.append('<');
buf.append(getClass().getName() + " ");
buf.append("text=\"" + encode(getText()) + "\" type=\"" +
getType() + "\">\n");
out.write(buf.toString());
}
public void xmlSerializeRootClose(Writer out)
throws IOException {
out.write("</" + getClass().getName() + ">\n");
}
public void xmlSerialize(Writer out) throws IOException {
// print out this node and all siblings
for (AST node = this;
node != null;
node = node.getNextSibling()) {
if (node.getFirstChild() == null) {
// print guts (class name, attributes)
((BaseAST)node).xmlSerializeNode(out);
}
else {
((BaseAST)node).xmlSerializeRootOpen(out);
// print children
((BaseAST)node.getFirstChild()).xmlSerialize(out);
// print end tag
((BaseAST)node).xmlSerializeRootClose(out);
}
}
}
}

32
fine-antlr-old/src/main/java/com/fr/third/antlr/BlockContext.java

@ -0,0 +1,32 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BlockContext.java#2 $
*/
/**BlockContext stores the information needed when creating an
* alternative (list of elements). Entering a subrule requires
* that we save this state as each block of alternatives
* requires state such as "tail of current alternative."
*/
class BlockContext {
AlternativeBlock block; // current block of alternatives
int altNum; // which alt are we accepting 0..n-1
BlockEndElement blockEnd; // used if nested
public void addAlternativeElement(AlternativeElement e) {
currentAlt().addElement(e);
}
public Alternative currentAlt() {
return (Alternative)block.alternatives.elementAt(altNum);
}
public AlternativeElement currentElement() {
return currentAlt().tail;
}
}

31
fine-antlr-old/src/main/java/com/fr/third/antlr/BlockEndElement.java

@ -0,0 +1,31 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BlockEndElement.java#2 $
*/
/**All alternative blocks are "terminated" by BlockEndElements unless
* they are rule blocks (in which case they use RuleEndElement).
*/
class BlockEndElement extends AlternativeElement {
protected boolean[] lock; // for analysis; used to avoid infinite loops
protected AlternativeBlock block;// ending blocks know what block they terminate
public BlockEndElement(Grammar g) {
super(g);
lock = new boolean[g.maxk + 1];
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
//return " [BlkEnd]";
return "";
}
}

24
fine-antlr-old/src/main/java/com/fr/third/antlr/BlockWithImpliedExitPath.java

@ -0,0 +1,24 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BlockWithImpliedExitPath.java#2 $
*/
abstract class BlockWithImpliedExitPath extends AlternativeBlock {
protected int exitLookaheadDepth; // lookahead needed to handle optional path
/** lookahead to bypass block; set
* by deterministic(). 1..k of Lookahead
*/
protected Lookahead[] exitCache = new Lookahead[grammar.maxk + 1];
public BlockWithImpliedExitPath(Grammar g) {
super(g);
}
public BlockWithImpliedExitPath(Grammar g, Token start) {
super(g, start, false);
}
}

53
fine-antlr-old/src/main/java/com/fr/third/antlr/ByteBuffer.java

@ -0,0 +1,53 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ByteBuffer.java#2 $
*/
/**A Stream of characters fed to the lexer from a InputStream that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input characters. Normally,
* "k" characters are stored in the buffer. More characters may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of characters is deferred. In other words, reading the next
* character is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see antlr.CharQueue
*/
// SAS: added this class to handle Binary input w/ FileInputStream
import java.io.IOException;
import java.io.InputStream;
public class ByteBuffer extends InputBuffer {
// char source
public transient InputStream input;
/** Create a character buffer */
public ByteBuffer(InputStream input_) {
super();
input = input_;
}
/** Ensure that the character buffer is sufficiently full */
public void fill(int amount) throws CharStreamException {
try {
syncConsume();
// Fill the buffer sufficiently to hold needed characters
while (queue.nbrEntries < amount + markerOffset) {
// Append the next character
queue.append((char)input.read());
}
}
catch (IOException io) {
throw new CharStreamIOException(io);
}
}
}

53
fine-antlr-old/src/main/java/com/fr/third/antlr/CharBuffer.java

@ -0,0 +1,53 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharBuffer.java#2 $
*/
/**A Stream of characters fed to the lexer from a InputStream that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input characters. Normally,
* "k" characters are stored in the buffer. More characters may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of characters is deferred. In other words, reading the next
* character is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see antlr.CharQueue
*/
import java.io.IOException;
import java.io.Reader;
// SAS: Move most functionality into InputBuffer -- just the file-specific
// stuff is in here
public class CharBuffer extends InputBuffer {
// char source
public transient Reader input;
/** Create a character buffer */
public CharBuffer(Reader input_) { // SAS: for proper text i/o
super();
input = input_;
}
/** Ensure that the character buffer is sufficiently full */
public void fill(int amount) throws CharStreamException {
try {
syncConsume();
// Fill the buffer sufficiently to hold needed characters
while (queue.nbrEntries < amount + markerOffset) {
// Append the next character
queue.append((char)input.read());
}
}
catch (IOException io) {
throw new CharStreamIOException(io);
}
}
}

23
fine-antlr-old/src/main/java/com/fr/third/antlr/CharFormatter.java

@ -0,0 +1,23 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharFormatter.java#2 $
*/
/** Interface used by BitSet to format elements of the set when
* converting to string
*/
public interface CharFormatter {
public String escapeChar(int c, boolean forCharLiteral);
public String escapeString(String s);
public String literalChar(int c);
public String literalString(String s);
}

29
fine-antlr-old/src/main/java/com/fr/third/antlr/CharLiteralElement.java

@ -0,0 +1,29 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharLiteralElement.java#2 $
*/
class CharLiteralElement extends GrammarAtom {
public CharLiteralElement(LexerGrammar g, Token t, boolean inverted, int autoGenType) {
super(g, t, AUTO_GEN_NONE);
tokenType = ANTLRLexer.tokenTypeForCharLiteral(t.getText());
g.charVocabulary.add(tokenType);
line = t.getLine();
not = inverted;
this.autoGenType = autoGenType;
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
}

95
fine-antlr-old/src/main/java/com/fr/third/antlr/CharQueue.java

@ -0,0 +1,95 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharQueue.java#2 $
*/
/** A circular buffer object used by CharBuffer */
public class CharQueue {
/** Physical circular buffer of tokens */
protected char[] buffer;
/** buffer.length-1 for quick modulos */
private int sizeLessOne;
/** physical index of front token */
private int offset;
/** number of tokens in the queue */
protected int nbrEntries;
public CharQueue(int minSize) {
// Find first power of 2 >= to requested size
int size;
if ( minSize<0 ) {
init(16); // pick some value for them
return;
}
// check for overflow
if ( minSize>=(Integer.MAX_VALUE/2) ) {
init(Integer.MAX_VALUE); // wow that's big.
return;
}
for (size = 2; size < minSize; size *= 2) {
}
init(size);
}
/** Add token to end of the queue
* @param tok The token to add
*/
public final void append(char tok) {
if (nbrEntries == buffer.length) {
expand();
}
buffer[(offset + nbrEntries) & sizeLessOne] = tok;
nbrEntries++;
}
/** Fetch a token from the queue by index
* @param idx The index of the token to fetch, where zero is the token at the front of the queue
*/
public final char elementAt(int idx) {
return buffer[(offset + idx) & sizeLessOne];
}
/** Expand the token buffer by doubling its capacity */
private final void expand() {
char[] newBuffer = new char[buffer.length * 2];
// Copy the contents to the new buffer
// Note that this will store the first logical item in the
// first physical array element.
for (int i = 0; i < buffer.length; i++) {
newBuffer[i] = elementAt(i);
}
// Re-initialize with new contents, keep old nbrEntries
buffer = newBuffer;
sizeLessOne = buffer.length - 1;
offset = 0;
}
/** Initialize the queue.
* @param size The initial size of the queue
*/
public void init(int size) {
// Allocate buffer
buffer = new char[size];
// Other initialization
sizeLessOne = size - 1;
offset = 0;
nbrEntries = 0;
}
/** Clear the queue. Leaving the previous buffer alone.
*/
public final void reset() {
offset = 0;
nbrEntries = 0;
}
/** Remove char from front of queue */
public final void removeFirst() {
offset = (offset + 1) & sizeLessOne;
nbrEntries--;
}
}

54
fine-antlr-old/src/main/java/com/fr/third/antlr/CharRangeElement.java

@ -0,0 +1,54 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharRangeElement.java#2 $
*/
class CharRangeElement extends AlternativeElement {
String label;
protected char begin = 0;
protected char end = 0;
protected String beginText;
protected String endText;
public CharRangeElement(LexerGrammar g, Token t1, Token t2, int autoGenType) {
super(g);
begin = (char)ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
beginText = t1.getText();
end = (char)ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
endText = t2.getText();
line = t1.getLine();
// track which characters are referenced in the grammar
for (int i = begin; i <= end; i++) {
g.charVocabulary.add(i);
}
this.autoGenType = autoGenType;
}
public void generate() {
grammar.generator.gen(this);
}
public String getLabel() {
return label;
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void setLabel(String label_) {
label = label_;
}
public String toString() {
if (label != null)
return " " + label + ":" + beginText + ".." + endText;
else
return " " + beginText + ".." + endText;
}
}

409
fine-antlr-old/src/main/java/com/fr/third/antlr/CharScanner.java

@ -0,0 +1,409 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharScanner.java#2 $
*/
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.BitSet;
public abstract class CharScanner implements TokenStream {
static final char NO_CHAR = 0;
public static final char EOF_CHAR = (char)-1;
protected ANTLRStringBuffer text; // text of current token
protected boolean saveConsumedInput = true; // does consume() save characters?
protected Class tokenObjectClass; // what kind of tokens to create?
protected boolean caseSensitive = true;
protected boolean caseSensitiveLiterals = true;
protected Hashtable literals; // set by subclass
/** Tab chars are handled by tab() according to this value; override
* method to do anything weird with tabs.
*/
protected int tabsize = 8;
protected Token _returnToken = null; // used to return tokens w/o using return val.
// Hash string used so we don't new one every time to check literals table
protected ANTLRHashString hashString;
protected LexerSharedInputState inputState;
/** Used during filter mode to indicate that path is desired.
* A subsequent scan error will report an error as usual if
* acceptPath=true;
*/
protected boolean commitToPath = false;
/** Used to keep track of indentdepth for traceIn/Out */
protected int traceDepth = 0;
public CharScanner() {
text = new ANTLRStringBuffer();
hashString = new ANTLRHashString(this);
//TODO alex mod
// setTokenObjectClass("antlr.CommonToken");
setTokenObjectClass("com.fr.third.antlr.CommonToken");
}
public CharScanner(InputBuffer cb) { // SAS: use generic buffer
this();
inputState = new LexerSharedInputState(cb);
}
public CharScanner(LexerSharedInputState sharedState) {
this();
inputState = sharedState;
}
public void append(char c) {
if (saveConsumedInput) {
text.append(c);
}
}
public void append(String s) {
if (saveConsumedInput) {
text.append(s);
}
}
public void commit() {
inputState.input.commit();
}
public void consume() throws CharStreamException {
if (inputState.guessing == 0) {
char c = LA(1);
if (caseSensitive) {
append(c);
}
else {
// use input.LA(), not LA(), to get original case
// CharScanner.LA() would toLower it.
append(inputState.input.LA(1));
}
if (c == '\t') {
tab();
}
else {
inputState.column++;
}
}
inputState.input.consume();
}
/** Consume chars until one matches the given char */
public void consumeUntil(int c) throws CharStreamException {
while (LA(1) != EOF_CHAR && LA(1) != c) {
consume();
}
}
/** Consume chars until one matches the given set */
public void consumeUntil(BitSet set) throws CharStreamException {
while (LA(1) != EOF_CHAR && !set.member(LA(1))) {
consume();
}
}
public boolean getCaseSensitive() {
return caseSensitive;
}
public final boolean getCaseSensitiveLiterals() {
return caseSensitiveLiterals;
}
public int getColumn() {
return inputState.column;
}
public void setColumn(int c) {
inputState.column = c;
}
public boolean getCommitToPath() {
return commitToPath;
}
public String getFilename() {
return inputState.filename;
}
public InputBuffer getInputBuffer() {
return inputState.input;
}
public LexerSharedInputState getInputState() {
return inputState;
}
public void setInputState(LexerSharedInputState state) {
inputState = state;
}
public int getLine() {
return inputState.line;
}
/** return a copy of the current text buffer */
public String getText() {
return text.toString();
}
public Token getTokenObject() {
return _returnToken;
}
public char LA(int i) throws CharStreamException {
if (caseSensitive) {
return inputState.input.LA(i);
}
else {
return toLower(inputState.input.LA(i));
}
}
protected Token makeToken(int t) {
try {
Token tok = (Token)tokenObjectClass.newInstance();
tok.setType(t);
tok.setColumn(inputState.tokenStartColumn);
tok.setLine(inputState.tokenStartLine);
// tracking real start line now: tok.setLine(inputState.line);
return tok;
}
catch (InstantiationException ie) {
panic("can't instantiate token: " + tokenObjectClass);
}
catch (IllegalAccessException iae) {
panic("Token class is not accessible" + tokenObjectClass);
}
return Token.badToken;
}
public int mark() {
return inputState.input.mark();
}
public void match(char c) throws MismatchedCharException, CharStreamException {
if (LA(1) != c) {
throw new MismatchedCharException(LA(1), c, false, this);
}
consume();
}
public void match(BitSet b) throws MismatchedCharException, CharStreamException {
if (!b.member(LA(1))) {
throw new MismatchedCharException(LA(1), b, false, this);
}
else {
consume();
}
}
public void match(String s) throws MismatchedCharException, CharStreamException {
int len = s.length();
for (int i = 0; i < len; i++) {
if (LA(1) != s.charAt(i)) {
throw new MismatchedCharException(LA(1), s.charAt(i), false, this);
}
consume();
}
}
public void matchNot(char c) throws MismatchedCharException, CharStreamException {
if (LA(1) == c) {
throw new MismatchedCharException(LA(1), c, true, this);
}
consume();
}
public void matchRange(char c1, char c2) throws MismatchedCharException, CharStreamException {
if (LA(1) < c1 || LA(1) > c2) throw new MismatchedCharException(LA(1), c1, c2, false, this);
consume();
}
public void newline() {
inputState.line++;
inputState.column = 1;
}
/** advance the current column number by an appropriate amount
* according to tab size. This method is called from consume().
*/
public void tab() {
int c = getColumn();
int nc = ( ((c-1)/tabsize) + 1) * tabsize + 1; // calculate tab stop
setColumn( nc );
}
public void setTabSize( int size ) {
tabsize = size;
}
public int getTabSize() {
return tabsize;
}
/** @see #panic(String)
*/
public void panic() {
System.err.println("CharScanner: panic");
Utils.error("");
}
/** This method is executed by ANTLR internally when it detected an illegal
* state that cannot be recovered from.
* The default implementation of this method calls
* {@link java.lang.System.exit(int)} and writes directly to
* {@link java.lang.System.err)} , which is usually not appropriate when
* a translator is embedded into a larger application. <em>It is highly
* recommended that this method be overridden to handle the error in a
* way appropriate for your application (e.g. throw an unchecked
* exception)</em>.
*/
public void panic(String s) {
System.err.println("CharScanner; panic: " + s);
Utils.error(s);
}
/** Parser error-reporting function can be overridden in subclass */
public void reportError(RecognitionException ex) {
System.err.println(ex);
}
/** Parser error-reporting function can be overridden in subclass */
public void reportError(String s) {
if (getFilename() == null) {
System.err.println("error: " + s);
}
else {
System.err.println(getFilename() + ": error: " + s);
}
}
/** Parser warning-reporting function can be overridden in subclass */
public void reportWarning(String s) {
if (getFilename() == null) {
System.err.println("warning: " + s);
}
else {
System.err.println(getFilename() + ": warning: " + s);
}
}
public void resetText() {
text.setLength(0);
inputState.tokenStartColumn = inputState.column;
inputState.tokenStartLine = inputState.line;
}
public void rewind(int pos) {
inputState.input.rewind(pos);
// RK: should not be here, it is messing up column calculation
// setColumn(inputState.tokenStartColumn);
}
public void setCaseSensitive(boolean t) {
caseSensitive = t;
}
public void setCommitToPath(boolean commit) {
commitToPath = commit;
}
public void setFilename(String f) {
inputState.filename = f;
}
public void setLine(int line) {
inputState.line = line;
}
public void setText(String s) {
resetText();
text.append(s);
}
public void setTokenObjectClass(String cl) {
try {
tokenObjectClass = Utils.loadClass(cl);
}
catch (ClassNotFoundException ce) {
panic("ClassNotFoundException: " + cl);
}
}
// Test the token text against the literals table
// Override this method to perform a different literals test
public int testLiteralsTable(int ttype) {
hashString.setBuffer(text.getBuffer(), text.length());
Integer literalsIndex = (Integer)literals.get(hashString);
if (literalsIndex != null) {
ttype = literalsIndex.intValue();
}
return ttype;
}
/** Test the text passed in against the literals table
* Override this method to perform a different literals test
* This is used primarily when you want to test a portion of
* a token.
*/
public int testLiteralsTable(String text, int ttype) {
ANTLRHashString s = new ANTLRHashString(text, this);
Integer literalsIndex = (Integer)literals.get(s);
if (literalsIndex != null) {
ttype = literalsIndex.intValue();
}
return ttype;
}
// Override this method to get more specific case handling
public char toLower(char c) {
return Character.toLowerCase(c);
}
public void traceIndent() {
for (int i = 0; i < traceDepth; i++)
System.out.print(' ');
}
public void traceIn(String rname) throws CharStreamException {
traceDepth += 1;
traceIndent();
System.out.println("> lexer " + rname + "; c==" + LA(1));
}
public void traceOut(String rname) throws CharStreamException {
traceIndent();
System.out.println("< lexer " + rname + "; c==" + LA(1));
traceDepth -= 1;
}
/** This method is called by YourLexer.nextToken() when the lexer has
* hit EOF condition. EOF is NOT a character.
* This method is not called if EOF is reached during
* syntactic predicate evaluation or during evaluation
* of normal lexical rules, which presumably would be
* an IOException. This traps the "normal" EOF condition.
*
* uponEOF() is called after the complete evaluation of
* the previous token and only if your parser asks
* for another token beyond that last non-EOF token.
*
* You might want to throw token or char stream exceptions
* like: "Heh, premature eof" or a retry stream exception
* ("I found the end of this file, go back to referencing file").
*/
public void uponEOF() throws TokenStreamException, CharStreamException {
}
}

21
fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamException.java

@ -0,0 +1,21 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharStreamException.java#2 $
*/
/**
* Anything that goes wrong while generating a stream of characters
*/
public class CharStreamException extends ANTLRException {
/**
* CharStreamException constructor comment.
* @param s java.lang.String
*/
public CharStreamException(String s) {
super(s);
}
}

22
fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamIOException.java

@ -0,0 +1,22 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharStreamIOException.java#2 $
*/
import java.io.IOException;
/**
* Wrap an IOException in a CharStreamException
*/
public class CharStreamIOException extends CharStreamException {
public IOException io;
public CharStreamIOException(IOException io) {
super(io.getMessage());
this.io = io;
}
}

663
fine-antlr-old/src/main/java/com/fr/third/antlr/CodeGenerator.java

@ -0,0 +1,663 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CodeGenerator.java#2 $
*/
import java.io.IOException;
import java.io.PrintWriter;
import com.fr.third.antlr.collections.impl.BitSet;
import com.fr.third.antlr.collections.impl.Vector;
/**A generic ANTLR code generator. All code generators
* Derive from this class.
*
* <p>
* A CodeGenerator knows about a Grammar data structure and
* a grammar analyzer. The Grammar is walked to generate the
* appropriate code for both a parser and lexer (if present).
* This interface may change slightly so that the lexer is
* itself living inside of a Grammar object (in which case,
* this class generates only one recognizer). The main method
* to call is <tt>gen()</tt>, which initiates all code gen.
*
* <p>
* The interaction of the code generator with the analyzer is
* simple: each subrule block calls deterministic() before generating
* code for the block. Method deterministic() sets lookahead caches
* in each Alternative object. Technically, a code generator
* doesn't need the grammar analyzer if all lookahead analysis
* is done at runtime, but this would result in a slower parser.
*
* <p>
* This class provides a set of support utilities to handle argument
* list parsing and so on.
*
* @author Terence Parr, John Lilley
* @version 2.00a
* @see com.fr.third.antlr.JavaCodeGenerator
* @see com.fr.third.antlr.DiagnosticCodeGenerator
* @see com.fr.third.antlr.LLkAnalyzer
* @see com.fr.third.antlr.Grammar
* @see com.fr.third.antlr.AlternativeElement
* @see com.fr.third.antlr.Lookahead
*/
public abstract class CodeGenerator {
protected com.fr.third.antlr.Tool antlrTool;
/** Current tab indentation for code output */
protected int tabs = 0;
/** Current output Stream */
transient protected PrintWriter currentOutput; // SAS: for proper text i/o
/** The grammar for which we generate code */
protected Grammar grammar = null;
/** List of all bitsets that must be dumped. These are Vectors of BitSet. */
protected Vector bitsetsUsed;
/** The grammar behavior */
protected DefineGrammarSymbols behavior;
/** The LLk analyzer */
protected LLkGrammarAnalyzer analyzer;
/** Object used to format characters in the target language.
* subclass must initialize this to the language-specific formatter
*/
protected CharFormatter charFormatter;
/** Use option "codeGenDebug" to generate debugging output */
protected boolean DEBUG_CODE_GENERATOR = false;
/** Default values for code-generation thresholds */
protected static final int DEFAULT_MAKE_SWITCH_THRESHOLD = 2;
protected static final int DEFAULT_BITSET_TEST_THRESHOLD = 4;
/** If there are more than 8 long words to init in a bitset,
* try to optimize it; e.g., detect runs of -1L and 0L.
*/
protected static final int BITSET_OPTIMIZE_INIT_THRESHOLD = 8;
/** This is a hint for the language-specific code generator.
* A switch() or language-specific equivalent will be generated instead
* of a series of if/else statements for blocks with number of alternates
* greater than or equal to this number of non-predicated LL(1) alternates.
* This is modified by the grammar option "codeGenMakeSwitchThreshold"
*/
protected int makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD;
/** This is a hint for the language-specific code generator.
* A bitset membership test will be generated instead of an
* ORed series of LA(k) comparisions for lookahead sets with
* degree greater than or equal to this value.
* This is modified by the grammar option "codeGenBitsetTestThreshold"
*/
protected int bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD;
private static boolean OLD_ACTION_TRANSLATOR = true;
public static String TokenTypesFileSuffix = "TokenTypes";
public static String TokenTypesFileExt = ".txt";
/** Construct code generator base class */
public CodeGenerator() {
}
/** Output a String to the currentOutput stream.
* Ignored if string is null.
* @param s The string to output
*/
protected void _print(String s) {
if (s != null) {
currentOutput.print(s);
}
}
/** Print an action without leading tabs, attempting to
* preserve the current indentation level for multi-line actions
* Ignored if string is null.
* @param s The action string to output
*/
protected void _printAction(String s) {
if (s == null) {
return;
}
// Skip leading newlines, tabs and spaces
int start = 0;
while (start < s.length() && Character.isSpaceChar(s.charAt(start))) {
start++;
}
// Skip leading newlines, tabs and spaces
int end = s.length() - 1;
while (end > start && Character.isSpaceChar(s.charAt(end))) {
end--;
}
char c = 0;
for (int i = start; i <= end;) {
c = s.charAt(i);
i++;
boolean newline = false;
switch (c) {
case '\n':
newline = true;
break;
case '\r':
if (i <= end && s.charAt(i) == '\n') {
i++;
}
newline = true;
break;
default:
currentOutput.print(c);
break;
}
if (newline) {
currentOutput.println();
printTabs();
// Absorb leading whitespace
while (i <= end && Character.isSpaceChar(s.charAt(i))) {
i++;
}
newline = false;
}
}
currentOutput.println();
}
/** Output a String followed by newline, to the currentOutput stream.
* Ignored if string is null.
* @param s The string to output
*/
protected void _println(String s) {
if (s != null) {
currentOutput.println(s);
}
}
/** Test if a set element array represents a contiguous range.
* @param elems The array of elements representing the set, usually from BitSet.toArray().
* @return true if the elements are a contiguous range (with two or more).
*/
public static boolean elementsAreRange(int[] elems) {
if (elems.length == 0) {
return false;
}
int begin = elems[0];
int end = elems[elems.length - 1];
if (elems.length <= 2) {
// Not enough elements for a range expression
return false;
}
if (end - begin + 1 > elems.length) {
// The set does not represent a contiguous range
return false;
}
int v = begin + 1;
for (int i = 1; i < elems.length - 1; i++) {
if (v != elems[i]) {
// The set does not represent a contiguous range
return false;
}
v++;
}
return true;
}
/** Get the identifier portion of an argument-action token.
* The ID of an action is assumed to be a trailing identifier.
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param t The action token
* @return A string containing the text of the identifier
*/
protected String extractIdOfAction(Token t) {
return extractIdOfAction(t.getText(), t.getLine(), t.getColumn());
}
/** Get the identifier portion of an argument-action.
* The ID of an action is assumed to be a trailing identifier.
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param s The action text
* @param line Line used for error reporting.
* @param column Line used for error reporting.
* @return A string containing the text of the identifier
*/
protected String extractIdOfAction(String s, int line, int column) {
s = removeAssignmentFromDeclaration(s);
// Search back from the end for a non alphanumeric. That marks the
// beginning of the identifier
for (int i = s.length() - 2; i >= 0; i--) {
// TODO: make this work for language-independent identifiers?
if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') {
// Found end of type part
return s.substring(i + 1);
}
}
// Something is bogus, but we cannot parse the language-specific
// actions any better. The compiler will have to catch the problem.
antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column);
return "";
}
/** Get the type string out of an argument-action token.
* The type of an action is assumed to precede a trailing identifier
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param t The action token
* @return A string containing the text of the type
*/
protected String extractTypeOfAction(Token t) {
return extractTypeOfAction(t.getText(), t.getLine(), t.getColumn());
}
/** Get the type portion of an argument-action.
* The type of an action is assumed to precede a trailing identifier
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param s The action text
* @param line Line used for error reporting.
* @return A string containing the text of the type
*/
protected String extractTypeOfAction(String s, int line, int column) {
s = removeAssignmentFromDeclaration(s);
// Search back from the end for a non alphanumeric. That marks the
// beginning of the identifier
for (int i = s.length() - 2; i >= 0; i--) {
// TODO: make this work for language-independent identifiers?
if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') {
// Found end of type part
return s.substring(0, i + 1);
}
}
// Something is bogus, but we cannot parse the language-specific
// actions any better. The compiler will have to catch the problem.
antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column);
return "";
}
/** Generate the code for all grammars
*/
public abstract void gen();
/** Generate code for the given grammar element.
* @param action The {...} action to generate
*/
public abstract void gen(ActionElement action);
/** Generate code for the given grammar element.
* @param blk The "x|y|z|..." block to generate
*/
public abstract void gen(AlternativeBlock blk);
/** Generate code for the given grammar element.
* @param end The block-end element to generate. Block-end
* elements are synthesized by the grammar parser to represent
* the end of a block.
*/
public abstract void gen(BlockEndElement end);
/** Generate code for the given grammar element.
* @param atom The character literal reference to generate
*/
public abstract void gen(CharLiteralElement atom);
/** Generate code for the given grammar element.
* @param r The character-range reference to generate
*/
public abstract void gen(CharRangeElement r);
/** Generate the code for a parser */
public abstract void gen(LexerGrammar g) throws IOException;
/** Generate code for the given grammar element.
* @param blk The (...)+ block to generate
*/
public abstract void gen(OneOrMoreBlock blk);
/** Generate the code for a parser */
public abstract void gen(ParserGrammar g) throws IOException;
/** Generate code for the given grammar element.
* @param rr The rule-reference to generate
*/
public abstract void gen(RuleRefElement rr);
/** Generate code for the given grammar element.
* @param atom The string-literal reference to generate
*/
public abstract void gen(StringLiteralElement atom);
/** Generate code for the given grammar element.
* @param r The token-range reference to generate
*/
public abstract void gen(TokenRangeElement r);
/** Generate code for the given grammar element.
* @param atom The token-reference to generate
*/
public abstract void gen(TokenRefElement atom);
/** Generate code for the given grammar element.
* @param blk The tree to generate code for.
*/
public abstract void gen(TreeElement t);
/** Generate the code for a parser */
public abstract void gen(TreeWalkerGrammar g) throws IOException;
/** Generate code for the given grammar element.
* @param wc The wildcard element to generate
*/
public abstract void gen(WildcardElement wc);
/** Generate code for the given grammar element.
* @param blk The (...)* block to generate
*/
public abstract void gen(ZeroOrMoreBlock blk);
/** Generate the token types as a text file for persistence across shared lexer/parser */
protected void genTokenInterchange(TokenManager tm) throws IOException {
// Open the token output Java file and set the currentOutput stream
String fName = tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt;
currentOutput = antlrTool.openOutputFile(fName);
println("// $ANTLR " + Tool.version + ": " +
antlrTool.fileMinusPath(antlrTool.grammarFile) +
" -> " +
fName +
"$");
tabs = 0;
// Header
println(tm.getName() + " // output token vocab name");
// Generate a definition for each token type
Vector v = tm.getVocabulary();
for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
String s = (String)v.elementAt(i);
if (DEBUG_CODE_GENERATOR) {
System.out.println("gen persistence file entry for: " + s);
}
if (s != null && !s.startsWith("<")) {
// if literal, find label
if (s.startsWith("\"")) {
StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
if (sl != null && sl.label != null) {
print(sl.label + "=");
}
println(s + "=" + i);
}
else {
print(s);
// check for a paraphrase
TokenSymbol ts = (TokenSymbol)tm.getTokenSymbol(s);
if (ts == null) {
antlrTool.warning("undefined token symbol: " + s);
}
else {
if (ts.getParaphrase() != null) {
print("(" + ts.getParaphrase() + ")");
}
}
println("=" + i);
}
}
}
// Close the tokens output file
currentOutput.close();
currentOutput = null;
}
/** Process a string for an simple expression for use in xx/action.g
* it is used to cast simple tokens/references to the right type for
* the generated language.
* @param str A String.
*/
public String processStringForASTConstructor(String str) {
return str;
}
/** Get a string for an expression to generate creation of an AST subtree.
* @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
*/
public abstract String getASTCreateString(Vector v);
/** Get a string for an expression to generate creating of an AST node
* @param str The text of the arguments to the AST construction
*/
public abstract String getASTCreateString(GrammarAtom atom, String str);
/** Given the index of a bitset in the bitset list, generate a unique name.
* Specific code-generators may want to override this
* if the language does not allow '_' or numerals in identifiers.
* @param index The index of the bitset in the bitset list.
*/
protected String getBitsetName(int index) {
return "_tokenSet_" + index;
}
public static String encodeLexerRuleName(String id) {
return "m" + id;
}
public static String decodeLexerRuleName(String id) {
if ( id==null ) {
return null;
}
return id.substring(1,id.length());
}
/** Map an identifier to it's corresponding tree-node variable.
* This is context-sensitive, depending on the rule and alternative
* being generated
* @param id The identifier name to map
* @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
* @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
*/
public abstract String mapTreeId(String id, ActionTransInfo tInfo);
/** Add a bitset to the list of bitsets to be generated.
* if the bitset is already in the list, ignore the request.
* Always adds the bitset to the end of the list, so the
* caller can rely on the position of bitsets in the list.
* The returned position can be used to format the bitset
* name, since it is invariant.
* @param p Bit set to mark for code generation
* @param forParser true if the bitset is used for the parser, false for the lexer
* @return The position of the bitset in the list.
*/
protected int markBitsetForGen(BitSet p) {
// Is the bitset (or an identical one) already marked for gen?
for (int i = 0; i < bitsetsUsed.size(); i++) {
BitSet set = (BitSet)bitsetsUsed.elementAt(i);
if (p.equals(set)) {
// Use the identical one already stored
return i;
}
}
// Add the new bitset
bitsetsUsed.appendElement(p.clone());
return bitsetsUsed.size() - 1;
}
/** Output tab indent followed by a String, to the currentOutput stream.
* Ignored if string is null.
* @param s The string to output.
*/
protected void print(String s) {
if (s != null) {
printTabs();
currentOutput.print(s);
}
}
/** Print an action with leading tabs, attempting to
* preserve the current indentation level for multi-line actions
* Ignored if string is null.
* @param s The action string to output
*/
protected void printAction(String s) {
if (s != null) {
printTabs();
_printAction(s);
}
}
/** Output tab indent followed by a String followed by newline,
* to the currentOutput stream. Ignored if string is null.
* @param s The string to output
*/
protected void println(String s) {
if (s != null) {
printTabs();
currentOutput.println(s);
}
}
/** Output the current tab indentation. This outputs the number of tabs
* indicated by the "tabs" variable to the currentOutput stream.
*/
protected void printTabs() {
for (int i = 1; i <= tabs; i++) {
currentOutput.print('\t');
}
}
/** Lexically process $ and # references within the action.
* This will replace #id and #(...) with the appropriate
* function calls and/or variables etc...
*/
protected abstract String processActionForSpecialSymbols(String actionStr,
int line,
RuleBlock currentRule,
ActionTransInfo tInfo);
public String getFOLLOWBitSet(String ruleName, int k) {
GrammarSymbol rs = grammar.getSymbol(ruleName);
if ( !(rs instanceof RuleSymbol) ) {
return null;
}
RuleBlock blk = ((RuleSymbol)rs).getBlock();
Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(k, blk.endNode);
String followSetName = getBitsetName(markBitsetForGen(follow.fset));
return followSetName;
}
public String getFIRSTBitSet(String ruleName, int k) {
GrammarSymbol rs = grammar.getSymbol(ruleName);
if ( !(rs instanceof RuleSymbol) ) {
return null;
}
RuleBlock blk = ((RuleSymbol)rs).getBlock();
Lookahead first = grammar.theLLkAnalyzer.look(k, blk);
String firstSetName = getBitsetName(markBitsetForGen(first.fset));
return firstSetName;
}
/**
* Remove the assignment portion of a declaration, if any.
* @param d the declaration
* @return the declaration without any assignment portion
*/
protected String removeAssignmentFromDeclaration(String d) {
// If d contains an equal sign, then it's a declaration
// with an initialization. Strip off the initialization part.
if (d.indexOf('=') >= 0) d = d.substring(0, d.indexOf('=')).trim();
return d;
}
/** Set all fields back like one just created */
private void reset() {
tabs = 0;
// Allocate list of bitsets tagged for code generation
bitsetsUsed = new Vector();
currentOutput = null;
grammar = null;
DEBUG_CODE_GENERATOR = false;
makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD;
bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD;
}
public static String reverseLexerRuleName(String id) {
return id.substring(1, id.length());
}
public void setAnalyzer(LLkGrammarAnalyzer analyzer_) {
analyzer = analyzer_;
}
public void setBehavior(DefineGrammarSymbols behavior_) {
behavior = behavior_;
}
/** Set a grammar for the code generator to use */
protected void setGrammar(Grammar g) {
reset();
grammar = g;
// Lookup make-switch threshold in the grammar generic options
if (grammar.hasOption("codeGenMakeSwitchThreshold")) {
try {
makeSwitchThreshold = grammar.getIntegerOption("codeGenMakeSwitchThreshold");
//System.out.println("setting codeGenMakeSwitchThreshold to " + makeSwitchThreshold);
}
catch (NumberFormatException e) {
Token tok = grammar.getOption("codeGenMakeSwitchThreshold");
antlrTool.error(
"option 'codeGenMakeSwitchThreshold' must be an integer",
grammar.getClassName(),
tok.getLine(), tok.getColumn()
);
}
}
// Lookup bitset-test threshold in the grammar generic options
if (grammar.hasOption("codeGenBitsetTestThreshold")) {
try {
bitsetTestThreshold = grammar.getIntegerOption("codeGenBitsetTestThreshold");
//System.out.println("setting codeGenBitsetTestThreshold to " + bitsetTestThreshold);
}
catch (NumberFormatException e) {
Token tok = grammar.getOption("codeGenBitsetTestThreshold");
antlrTool.error(
"option 'codeGenBitsetTestThreshold' must be an integer",
grammar.getClassName(),
tok.getLine(), tok.getColumn()
);
}
}
// Lookup debug code-gen in the grammar generic options
if (grammar.hasOption("codeGenDebug")) {
Token t = grammar.getOption("codeGenDebug");
if (t.getText().equals("true")) {
//System.out.println("setting code-generation debug ON");
DEBUG_CODE_GENERATOR = true;
}
else if (t.getText().equals("false")) {
//System.out.println("setting code-generation debug OFF");
DEBUG_CODE_GENERATOR = false;
}
else {
antlrTool.error("option 'codeGenDebug' must be true or false", grammar.getClassName(), t.getLine(), t.getColumn());
}
}
}
public void setTool(Tool tool) {
antlrTool = tool;
}
}

59
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonAST.java

@ -0,0 +1,59 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonAST.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** Common AST node implementation */
public class CommonAST extends BaseAST {
int ttype = Token.INVALID_TYPE;
String text;
/** Get the token text for this node */
public String getText() {
return text;
}
/** Get the token type for this node */
public int getType() {
return ttype;
}
public void initialize(int t, String txt) {
setType(t);
setText(txt);
}
public void initialize(AST t) {
setText(t.getText());
setType(t.getType());
}
public CommonAST() {
}
public CommonAST(Token tok) {
initialize(tok);
}
public void initialize(Token tok) {
setText(tok.getText());
setType(tok.getType());
}
/** Set the token text for this node */
public void setText(String text_) {
text = text_;
}
/** Set the token type for this node */
public void setType(int ttype_) {
ttype = ttype_;
}
}

47
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonASTWithHiddenTokens.java

@ -0,0 +1,47 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonASTWithHiddenTokens.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** A CommonAST whose initialization copies hidden token
* information from the Token used to create a node.
*/
public class CommonASTWithHiddenTokens extends CommonAST {
protected CommonHiddenStreamToken hiddenBefore, hiddenAfter; // references to hidden tokens
public CommonASTWithHiddenTokens() {
super();
}
public CommonASTWithHiddenTokens(Token tok) {
super(tok);
}
public CommonHiddenStreamToken getHiddenAfter() {
return hiddenAfter;
}
public CommonHiddenStreamToken getHiddenBefore() {
return hiddenBefore;
}
public void initialize(AST t)
{
hiddenBefore = ((CommonASTWithHiddenTokens)t).getHiddenBefore();
hiddenAfter = ((CommonASTWithHiddenTokens)t).getHiddenAfter();
super.initialize(t);
}
public void initialize(Token tok) {
CommonHiddenStreamToken t = (CommonHiddenStreamToken)tok;
super.initialize(t);
hiddenBefore = t.getHiddenBefore();
hiddenAfter = t.getHiddenAfter();
}
}

41
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonHiddenStreamToken.java

@ -0,0 +1,41 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonHiddenStreamToken.java#2 $
*/
public class CommonHiddenStreamToken extends CommonToken {
protected CommonHiddenStreamToken hiddenBefore;
protected CommonHiddenStreamToken hiddenAfter;
public CommonHiddenStreamToken() {
super();
}
public CommonHiddenStreamToken(int t, String txt) {
super(t, txt);
}
public CommonHiddenStreamToken(String s) {
super(s);
}
public CommonHiddenStreamToken getHiddenAfter() {
return hiddenAfter;
}
public CommonHiddenStreamToken getHiddenBefore() {
return hiddenBefore;
}
protected void setHiddenAfter(CommonHiddenStreamToken t) {
hiddenAfter = t;
}
protected void setHiddenBefore(CommonHiddenStreamToken t) {
hiddenBefore = t;
}
}

56
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonToken.java

@ -0,0 +1,56 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonToken.java#2 $
*/
public class CommonToken extends Token {
// most tokens will want line and text information
protected int line;
protected String text = null;
protected int col;
public CommonToken() {
}
public CommonToken(int t, String txt) {
type = t;
setText(txt);
}
public CommonToken(String s) {
text = s;
}
public int getLine() {
return line;
}
public String getText() {
return text;
}
public void setLine(int l) {
line = l;
}
public void setText(String s) {
text = s;
}
public String toString() {
return "[\"" + getText() + "\",<" + type + ">,line=" + line + ",col=" + col + "]";
}
/** Return token's start column */
public int getColumn() {
return col;
}
public void setColumn(int c) {
col = c;
}
}

33
fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultFileLineFormatter.java

@ -0,0 +1,33 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DefaultFileLineFormatter.java#2 $
*/
public class DefaultFileLineFormatter extends FileLineFormatter {
public String getFormatString(String fileName, int line, int column) {
StringBuffer buf = new StringBuffer();
if (fileName != null)
buf.append(fileName + ":");
if (line != -1) {
if (fileName == null)
buf.append("line ");
buf.append(line);
if (column != -1)
buf.append(":" + column);
buf.append(':');
}
buf.append(' ');
return buf.toString();
}
}

73
fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultJavaCodeGeneratorPrintWriterManager.java

@ -0,0 +1,73 @@
package com.fr.third.antlr;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
public class DefaultJavaCodeGeneratorPrintWriterManager implements JavaCodeGeneratorPrintWriterManager {
private Grammar grammar;
private PrintWriterWithSMAP smapOutput;
private PrintWriter currentOutput;
private Tool tool;
private Map sourceMaps = new HashMap();
private String currentFileName;
public PrintWriter setupOutput(Tool tool, Grammar grammar) throws IOException {
return setupOutput(tool, grammar, null);
}
public PrintWriter setupOutput(Tool tool, String fileName) throws IOException {
return setupOutput(tool, null, fileName);
}
public PrintWriter setupOutput(Tool tool, Grammar grammar, String fileName) throws IOException {
this.tool = tool;
this.grammar = grammar;
if (fileName == null)
fileName = grammar.getClassName();
smapOutput = new PrintWriterWithSMAP(tool.openOutputFile(fileName + ".java"));
currentFileName = fileName + ".java";
currentOutput = smapOutput;
return currentOutput;
}
public void startMapping(int sourceLine) {
smapOutput.startMapping(sourceLine);
}
public void startSingleSourceLineMapping(int sourceLine) {
smapOutput.startSingleSourceLineMapping(sourceLine);
}
public void endMapping() {
smapOutput.endMapping();
}
public void finishOutput() throws IOException {
currentOutput.close();
if (grammar != null) {
PrintWriter smapWriter;
smapWriter = tool.openOutputFile(grammar.getClassName() + ".smap");
String grammarFile = grammar.getFilename();
grammarFile = grammarFile.replace('\\', '/');
int lastSlash = grammarFile.lastIndexOf('/');
if (lastSlash != -1)
grammarFile = grammarFile.substring(lastSlash+1);
smapOutput.dump(smapWriter, grammar.getClassName(), grammarFile);
sourceMaps.put(currentFileName, smapOutput.getSourceMap());
}
currentOutput = null;
}
public Map getSourceMaps() {
return sourceMaps;
}
public int getCurrentOutputLine()
{
return smapOutput.getCurrentOutputLine();
}
}

118
fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultToolErrorHandler.java

@ -0,0 +1,118 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DefaultToolErrorHandler.java#2 $
*/
class DefaultToolErrorHandler implements ToolErrorHandler {
DefaultToolErrorHandler(com.fr.third.antlr.Tool tool) {
antlrTool = tool;
}
private final com.fr.third.antlr.Tool antlrTool;
CharFormatter javaCharFormatter = new JavaCharFormatter();
/** Dump token/character sets to a string array suitable for
* {@link antlr.Tool.warning(String[], String, int, int)
* @param output The array that will contain the token/character set dump,
* one element per k (lookahead) value
* @param outputStartIndex The index into <code>output</code> that the
* dump should start at.
* @param lexicalAnalysis true for lexical rule
* @param depth The depth of the ambiguity
* @param sets An array of bitsets containing the ambiguities
*/
private void dumpSets(String[] output,
int outputStartIndex,
Grammar grammar,
boolean lexicalAnalysis,
int depth,
Lookahead[] sets) {
StringBuffer line = new StringBuffer(100);
for (int i = 1; i <= depth; i++) {
line.append("k==").append(i).append(':');
if (lexicalAnalysis) {
String bits = sets[i].fset.toStringWithRanges(",", javaCharFormatter);
if (sets[i].containsEpsilon()) {
line.append("<end-of-token>");
if (bits.length() > 0) {
line.append(',');
}
}
line.append(bits);
} else {
line.append(sets[i].fset.toString(",", grammar.tokenManager.getVocabulary()));
}
output[outputStartIndex++] = line.toString();
line.setLength(0);
}
}
/** Issue a warning about ambiguity between a alternates
* @param blk The block being analyzed
* @param lexicalAnalysis true for lexical rule
* @param depth The depth of the ambiguity
* @param sets An array of bitsets containing the ambiguities
* @param altIdx1 The zero-based index of the first ambiguous alternative
* @param altIdx2 The zero-based index of the second ambiguous alternative
*/
public void warnAltAmbiguity(Grammar grammar,
AlternativeBlock blk,
boolean lexicalAnalysis,
int depth,
Lookahead[] sets,
int altIdx1,
int altIdx2)
{
final StringBuffer line = new StringBuffer(100);
if (blk instanceof RuleBlock && ((RuleBlock)blk).isLexerAutoGenRule()) {
Alternative ai = blk.getAlternativeAt(altIdx1);
Alternative aj = blk.getAlternativeAt(altIdx2);
RuleRefElement rri = (RuleRefElement)ai.head;
RuleRefElement rrj = (RuleRefElement)aj.head;
String ri = CodeGenerator.reverseLexerRuleName(rri.targetRule);
String rj = CodeGenerator.reverseLexerRuleName(rrj.targetRule);
line.append("lexical nondeterminism between rules ");
line.append(ri).append(" and ").append(rj).append(" upon");
}
else {
if (lexicalAnalysis) {
line.append("lexical ");
}
line.append("nondeterminism between alts ");
line.append(altIdx1 + 1).append(" and ");
line.append(altIdx2 + 1).append(" of block upon");
}
final String [] output = new String [depth + 1];
output[0] = line.toString();
dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets);
antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn());
}
/** Issue a warning about ambiguity between an alternate and exit path.
* @param blk The block being analyzed
* @param lexicalAnalysis true for lexical rule
* @param depth The depth of the ambiguity
* @param sets An array of bitsets containing the ambiguities
* @param altIdx The zero-based index of the ambiguous alternative
*/
public void warnAltExitAmbiguity(Grammar grammar,
BlockWithImpliedExitPath blk,
boolean lexicalAnalysis,
int depth,
Lookahead[] sets,
int altIdx
)
{
String [] output = new String[depth + 2];
output[0] = (lexicalAnalysis ? "lexical " : "") + "nondeterminism upon";
dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets);
output[depth + 1] = "between alt " + (altIdx + 1) + " and exit branch of block";
antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn());
}
}

811
fine-antlr-old/src/main/java/com/fr/third/antlr/DefineGrammarSymbols.java

@ -0,0 +1,811 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DefineGrammarSymbols.java#2 $
*/
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.BitSet;
/**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all
* the token and rule symbols to the grammar symbol table.
*
* Token types are assigned to token symbols in this class also.
* The token type for a token is done in the order seen (lexically).
*/
public class DefineGrammarSymbols implements ANTLRGrammarParseBehavior {
// Contains all of the defined parser and lexer Grammar's indexed by name
protected Hashtable grammars = new Hashtable();
// Contains all the TokenManagers indexed by name
protected Hashtable tokenManagers = new Hashtable();
// Current grammar (parser or lexer)
protected Grammar grammar;
// The tool under which this is invoked
protected Tool tool;
// The grammar analyzer object
LLkAnalyzer analyzer;
// The command-line arguments passed to the tool.
// This allows each grammar to parse the arguments as it is created
String[] args;
// Name for default token manager does not match any valid name
static final String DEFAULT_TOKENMANAGER_NAME = "*default";
// Header actions apply to all parsers unless redefined
// Contains all of the header actions indexed by name
protected Hashtable headerActions = new Hashtable();
// Place where preamble is stored until a grammar is defined
Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token
// The target language
String language = "Java";
protected int numLexers = 0;
protected int numParsers = 0;
protected int numTreeParsers = 0;
public DefineGrammarSymbols(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
tool = tool_;
args = args_;
analyzer = analyzer_;
}
public void _refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
if (!(grammar instanceof LexerGrammar)) {
// String literals are treated like tokens except by the lexer
String str = lit.getText();
if (grammar.tokenManager.getTokenSymbol(str) != null) {
// string symbol is already defined
return;
}
StringLiteralSymbol sl = new StringLiteralSymbol(str);
int tt = grammar.tokenManager.nextTokenType();
sl.setTokenType(tt);
grammar.tokenManager.define(sl);
}
}
/** Reference a token */
public void _refToken(Token assignId,
Token t,
Token label,
Token args,
boolean inverted,
int autoGenType,
boolean lastInRule) {
String id = t.getText();
if (!grammar.tokenManager.tokenDefined(id)) {
/*
// RK: dish out a warning if the token was not defined before.
tool.warning("Token '" + id + "' defined outside tokens section",
tool.grammarFile, t.getLine(), t.getColumn());
*/
int tt = grammar.tokenManager.nextTokenType();
TokenSymbol ts = new TokenSymbol(id);
ts.setTokenType(tt);
grammar.tokenManager.define(ts);
}
}
/** Abort the processing of a grammar due to syntax errors */
public void abortGrammar() {
if (grammar != null && grammar.getClassName() != null) {
grammars.remove(grammar.getClassName());
}
grammar = null;
}
public void beginAlt(boolean doAST_) {
}
public void beginChildList() {
}
// Exception handling
public void beginExceptionGroup() {
}
public void beginExceptionSpec(Token label) {
}
public void beginSubRule(Token label, Token start, boolean not) {
}
public void beginTree(Token tok) throws SemanticException {
}
/** Define a lexer or parser rule */
public void defineRuleName(Token r,
String access,
boolean ruleAutoGen,
String docComment)
throws SemanticException {
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) {
if (r.type == ANTLRTokenTypes.TOKEN_REF) {
// lexer rule
id = CodeGenerator.encodeLexerRuleName(id);
// make sure we define it as token identifier also
if (!grammar.tokenManager.tokenDefined(r.getText())) {
int tt = grammar.tokenManager.nextTokenType();
TokenSymbol ts = new TokenSymbol(r.getText());
ts.setTokenType(tt);
grammar.tokenManager.define(ts);
}
}
RuleSymbol rs;
if (grammar.isDefined(id)) {
// symbol seen before?
rs = (RuleSymbol)grammar.getSymbol(id);
// rule just referenced or has it been defined yet?
if (rs.isDefined()) {
tool.error("redefinition of rule " + id, grammar.getFilename(), r.getLine(), r.getColumn());
}
}
else {
rs = new RuleSymbol(id);
grammar.define(rs);
}
rs.setDefined();
rs.access = access;
rs.comment = docComment;
}
/** Define a token from tokens {...}.
* Must be label and literal or just label or just a literal.
*/
public void defineToken(Token tokname, Token tokliteral) {
String name = null;
String literal = null;
if (tokname != null) {
name = tokname.getText();
}
if (tokliteral != null) {
literal = tokliteral.getText();
}
// System.out.println("defining " + name + " with literal " + literal);
//
if (literal != null) {
StringLiteralSymbol sl = (StringLiteralSymbol)grammar.tokenManager.getTokenSymbol(literal);
if (sl != null) {
// This literal is known already.
// If the literal has no label already, but we can provide
// one here, then no problem, just map the label to the literal
// and don't change anything else.
// Otherwise, labels conflict: error.
if (name == null || sl.getLabel() != null) {
tool.warning("Redefinition of literal in tokens {...}: " + literal, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn());
return;
}
else if (name != null) {
// The literal had no label, but new def does. Set it.
sl.setLabel(name);
// Also, map the label to the literal.
grammar.tokenManager.mapToTokenSymbol(name, sl);
}
}
// if they provide a name/label and that name/label already
// exists, just hook this literal onto old token.
if (name != null) {
TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(name);
if (ts != null) {
// watch out that the label is not more than just a token.
// If it already has a literal attached, then: conflict.
if (ts instanceof StringLiteralSymbol) {
tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn());
return;
}
// a simple token symbol such as DECL is defined
// must convert it to a StringLiteralSymbol with a
// label by co-opting token type and killing old
// TokenSymbol. Kill mapping and entry in vector
// of token manager.
// First, claim token type.
int ttype = ts.getTokenType();
// now, create string literal with label
sl = new StringLiteralSymbol(literal);
sl.setTokenType(ttype);
sl.setLabel(name);
// redefine this critter as a string literal
grammar.tokenManager.define(sl);
// make sure the label can be used also.
grammar.tokenManager.mapToTokenSymbol(name, sl);
return;
}
// here, literal was labeled but not by a known token symbol.
}
sl = new StringLiteralSymbol(literal);
int tt = grammar.tokenManager.nextTokenType();
sl.setTokenType(tt);
sl.setLabel(name);
grammar.tokenManager.define(sl);
if (name != null) {
// make the label point at token symbol too
grammar.tokenManager.mapToTokenSymbol(name, sl);
}
}
// create a token in the token manager not a literal
else {
if (grammar.tokenManager.tokenDefined(name)) {
tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokname.getLine(), tokname.getColumn());
return;
}
int tt = grammar.tokenManager.nextTokenType();
TokenSymbol ts = new TokenSymbol(name);
ts.setTokenType(tt);
grammar.tokenManager.define(ts);
}
}
public void endAlt() {
}
public void endChildList() {
}
public void endExceptionGroup() {
}
public void endExceptionSpec() {
}
public void endGrammar() {
}
/** Called after the optional options section, to compensate for
* options that may not have been set.
* This method is bigger than it needs to be, but is much more
* clear if I delineate all the cases.
*/
public void endOptions() {
// NO VOCAB OPTIONS
if (grammar.exportVocab == null && grammar.importVocab == null) {
grammar.exportVocab = grammar.getClassName();
// Can we get initial vocab from default shared vocab?
if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
// Use the already-defined token manager
grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME;
TokenManager tm = (TokenManager)tokenManagers.get(DEFAULT_TOKENMANAGER_NAME);
// System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'");
grammar.setTokenManager(tm);
return;
}
// no shared vocab for file, make new one
// System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'");
TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool);
grammar.setTokenManager(tm);
// Add the token manager to the list of token managers
tokenManagers.put(grammar.exportVocab, tm);
// no default vocab, so make this the default vocab
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
return;
}
// NO OUTPUT, BUT HAS INPUT VOCAB
if (grammar.exportVocab == null && grammar.importVocab != null) {
grammar.exportVocab = grammar.getClassName();
// first make sure input!=output
if (grammar.importVocab.equals(grammar.exportVocab)) {
tool.warning("Grammar " + grammar.getClassName() +
" cannot have importVocab same as default output vocab (grammar name); ignored.");
// kill importVocab option and try again: use default vocab
grammar.importVocab = null;
endOptions();
return;
}
// check to see if the vocab is already in memory
// (defined by another grammar in the file). Not normal situation.
if (tokenManagers.containsKey(grammar.importVocab)) {
// make a copy since we'll be generating a new output vocab
// and we don't want to affect this one. Set the name to
// the default output vocab==classname.
TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab);
// System.out.println("Duping importVocab of " + grammar.importVocab);
TokenManager dup = (TokenManager)tm.clone();
dup.setName(grammar.exportVocab);
// System.out.println("Setting name to " + grammar.exportVocab);
dup.setReadOnly(false);
grammar.setTokenManager(dup);
tokenManagers.put(grammar.exportVocab, dup);
return;
}
// System.out.println("reading in vocab "+grammar.importVocab);
// Must be a file, go get it.
ImportVocabTokenManager tm =
new ImportVocabTokenManager(grammar,
grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt,
grammar.exportVocab,
tool);
tm.setReadOnly(false); // since renamed, can write out
// Add this token manager to the list so its tokens will be generated
tokenManagers.put(grammar.exportVocab, tm);
// System.out.println("vocab renamed to default output vocab of "+tm.getName());
// Assign the token manager to this grammar.
grammar.setTokenManager(tm);
// set default vocab if none
if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
}
return;
}
// OUTPUT VOCAB, BUT NO INPUT VOCAB
if (grammar.exportVocab != null && grammar.importVocab == null) {
// share with previous vocab if it exists
if (tokenManagers.containsKey(grammar.exportVocab)) {
// Use the already-defined token manager
TokenManager tm = (TokenManager)tokenManagers.get(grammar.exportVocab);
// System.out.println("Sharing exportVocab of " + grammar.exportVocab);
grammar.setTokenManager(tm);
return;
}
// create new output vocab
// System.out.println("Creating exportVocab " + grammar.exportVocab);
TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool);
grammar.setTokenManager(tm);
// Add the token manager to the list of token managers
tokenManagers.put(grammar.exportVocab, tm);
// set default vocab if none
if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
}
return;
}
// BOTH INPUT AND OUTPUT VOCAB
if (grammar.exportVocab != null && grammar.importVocab != null) {
// don't want input==output
if (grammar.importVocab.equals(grammar.exportVocab)) {
tool.error("exportVocab of " + grammar.exportVocab + " same as importVocab; probably not what you want");
}
// does the input vocab already exist in memory?
if (tokenManagers.containsKey(grammar.importVocab)) {
// make a copy since we'll be generating a new output vocab
// and we don't want to affect this one.
TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab);
// System.out.println("Duping importVocab of " + grammar.importVocab);
TokenManager dup = (TokenManager)tm.clone();
dup.setName(grammar.exportVocab);
// System.out.println("Setting name to " + grammar.exportVocab);
dup.setReadOnly(false);
grammar.setTokenManager(dup);
tokenManagers.put(grammar.exportVocab, dup);
return;
}
// Must be a file, go get it.
ImportVocabTokenManager tm =
new ImportVocabTokenManager(grammar,
grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt,
grammar.exportVocab,
tool);
tm.setReadOnly(false); // write it out as we've changed name
// Add this token manager to the list so its tokens will be generated
tokenManagers.put(grammar.exportVocab, tm);
// Assign the token manager to this grammar.
grammar.setTokenManager(tm);
// set default vocab if none
if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
}
return;
}
}
public void endRule(String r) {
}
public void endSubRule() {
}
public void endTree() {
}
public void hasError() {
}
public void noASTSubRule() {
}
public void oneOrMoreSubRule() {
}
public void optionalSubRule() {
}
public void setUserExceptions(String thr) {
}
public void refAction(Token action) {
}
public void refArgAction(Token action) {
}
public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
}
public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
}
public void refElementOption(Token option, Token value) {
}
public void refTokensSpecElementOption(Token tok, Token option, Token value) {
}
public void refExceptionHandler(Token exTypeAndName, Token action) {
}
// Header action applies to all parsers and lexers.
public void refHeaderAction(Token name, Token act) {
String key;
if (name == null)
key = "";
else
key = StringUtils.stripFrontBack(name.getText(), "\"", "\"");
// FIXME: depending on the mode the inserted header actions should
// be checked for sanity.
if (headerActions.containsKey(key)) {
if (key.equals(""))
tool.error(act.getLine() + ": header action already defined");
else
tool.error(act.getLine() + ": header action '" + key + "' already defined");
}
headerActions.put(key, act);
}
public String getHeaderAction(String name) {
Token t = (Token)headerActions.get(name);
if (t == null) {
return "";
}
return t.getText();
}
public int getHeaderActionLine(String name) {
Token t = (Token)headerActions.get(name);
if (t == null) {
return 0;
}
return t.getLine();
}
public void refInitAction(Token action) {
}
public void refMemberAction(Token act) {
}
public void refPreambleAction(Token act) {
thePreambleAction = act;
}
public void refReturnAction(Token returnAction) {
}
public void refRule(Token idAssign,
Token r,
Token label,
Token args,
int autoGenType) {
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
if (r.type == ANTLRTokenTypes.TOKEN_REF) {
// lexer rule?
id = CodeGenerator.encodeLexerRuleName(id);
}
if (!grammar.isDefined(id)) {
grammar.define(new RuleSymbol(id));
}
}
public void refSemPred(Token pred) {
}
public void refStringLiteral(Token lit,
Token label,
int autoGenType,
boolean lastInRule) {
_refStringLiteral(lit, label, autoGenType, lastInRule);
}
/** Reference a token */
public void refToken(Token assignId, Token t, Token label, Token args,
boolean inverted, int autoGenType, boolean lastInRule) {
_refToken(assignId, t, label, args, inverted, autoGenType, lastInRule);
}
public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
// ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more
// token refs to the alternative by calling MakeGrammar.refToken etc...
if (t1.getText().charAt(0) == '"') {
refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
else {
_refToken(null, t1, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
if (t2.getText().charAt(0) == '"') {
_refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
else {
_refToken(null, t2, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
}
public void refTreeSpecifier(Token treeSpec) {
}
public void refWildcard(Token t, Token label, int autoGenType) {
}
/** Get ready to process a new grammar */
public void reset() {
grammar = null;
}
public void setArgOfRuleRef(Token argaction) {
}
/** Set the character vocabulary for a lexer */
public void setCharVocabulary(BitSet b) {
// grammar should enforce that this is only called for lexer
((LexerGrammar)grammar).setCharVocabulary(b);
}
/** setFileOption: Associate an option value with a key.
* This applies to options for an entire grammar file.
* @param key The token containing the option name
* @param value The token containing the option value.
*/
public void setFileOption(Token key, Token value, String filename) {
if (key.getText().equals("language")) {
if (value.getType() == ANTLRParser.STRING_LITERAL) {
language = StringUtils.stripBack(StringUtils.stripFront(value.getText(), '"'), '"');
}
else if (value.getType() == ANTLRParser.TOKEN_REF || value.getType() == ANTLRParser.RULE_REF) {
language = value.getText();
}
else {
tool.error("language option must be string or identifier", filename, value.getLine(), value.getColumn());
}
}
else if (key.getText().equals("mangleLiteralPrefix")) {
if (value.getType() == ANTLRParser.STRING_LITERAL) {
tool.literalsPrefix = StringUtils.stripFrontBack(value.getText(), "\"", "\"");
}
else {
tool.error("mangleLiteralPrefix option must be string", filename, value.getLine(), value.getColumn());
}
}
else if (key.getText().equals("upperCaseMangledLiterals")) {
if (value.getText().equals("true")) {
tool.upperCaseMangledLiterals = true;
}
else if (value.getText().equals("false")) {
tool.upperCaseMangledLiterals = false;
}
else {
grammar.antlrTool.error("Value for upperCaseMangledLiterals must be true or false", filename, key.getLine(), key.getColumn());
}
}
else if ( key.getText().equals("namespaceStd") ||
key.getText().equals("namespaceAntlr") ||
key.getText().equals("genHashLines")
) {
if (!language.equals("Cpp")) {
tool.error(key.getText() + " option only valid for C++", filename, key.getLine(), key.getColumn());
}
else {
if (key.getText().equals("noConstructors")) {
if (!(value.getText().equals("true") || value.getText().equals("false")))
tool.error("noConstructors option must be true or false", filename, value.getLine(), value.getColumn());
tool.noConstructors = value.getText().equals("true");
} else if (key.getText().equals("genHashLines")) {
if (!(value.getText().equals("true") || value.getText().equals("false")))
tool.error("genHashLines option must be true or false", filename, value.getLine(), value.getColumn());
tool.genHashLines = value.getText().equals("true");
}
else {
if (value.getType() != ANTLRParser.STRING_LITERAL) {
tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn());
}
else {
if (key.getText().equals("namespaceStd"))
tool.namespaceStd = value.getText();
else if (key.getText().equals("namespaceAntlr"))
tool.namespaceAntlr = value.getText();
}
}
}
}
else if ( key.getText().equals("namespace") ) {
if ( !language.equals("Cpp") && !language.equals("CSharp") )
{
tool.error(key.getText() + " option only valid for C++ and C# (a.k.a CSharp)", filename, key.getLine(), key.getColumn());
}
else
{
if (value.getType() != ANTLRParser.STRING_LITERAL)
{
tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn());
}
else {
if (key.getText().equals("namespace"))
tool.setNameSpace(value.getText());
}
}
}
else {
tool.error("Invalid file-level option: " + key.getText(), filename, key.getLine(), value.getColumn());
}
}
/** setGrammarOption: Associate an option value with a key.
* This function forwards to Grammar.setOption for some options.
* @param key The token containing the option name
* @param value The token containing the option value.
*/
public void setGrammarOption(Token key, Token value) {
if (key.getText().equals("tokdef") || key.getText().equals("tokenVocabulary")) {
tool.error("tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n" +
" Use importVocab/exportVocab instead. Please see the documentation.\n" +
" The previous options were so heinous that Terence changed the whole\n" +
" vocabulary mechanism; it was better to change the names rather than\n" +
" subtly change the functionality of the known options. Sorry!", grammar.getFilename(), value.getLine(), value.getColumn());
}
else if (key.getText().equals("literal") &&
grammar instanceof LexerGrammar) {
tool.error("the literal option is invalid >= ANTLR 2.6.0.\n" +
" Use the \"tokens {...}\" mechanism instead.",
grammar.getFilename(), value.getLine(), value.getColumn());
}
else if (key.getText().equals("exportVocab")) {
// Set the token manager associated with the parser
if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
grammar.exportVocab = value.getText();
}
else {
tool.error("exportVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn());
}
}
else if (key.getText().equals("importVocab")) {
if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
grammar.importVocab = value.getText();
}
else {
tool.error("importVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn());
}
}
else if ( key.getText().equals("k") ) {
if( grammar instanceof TreeWalkerGrammar
&& ! value.getText().equals("1") ) {
tool.error("Treewalkers only support k=1", grammar.getFilename(), value.getLine(), value.getColumn());
}
else {
grammar.setOption(key.getText(), value);
}
}
else {
// Forward all unrecognized options to the grammar
grammar.setOption(key.getText(), value);
}
}
public void setRuleOption(Token key, Token value) {
}
public void setSubruleOption(Token key, Token value) {
}
/** Start a new lexer */
public void startLexer(String file, Token name, String superClass, String doc) {
if (numLexers > 0) {
tool.panic("You may only have one lexer per grammar file: class " + name.getText());
}
numLexers++;
reset();
//System.out.println("Processing lexer '" + name.getText() + "'");
// Does the lexer already exist?
Grammar g = (Grammar)grammars.get(name);
if (g != null) {
if (!(g instanceof LexerGrammar)) {
tool.panic("'" + name.getText() + "' is already defined as a non-lexer");
}
else {
tool.panic("Lexer '" + name.getText() + "' is already defined");
}
}
else {
// Create a new lexer grammar
LexerGrammar lg = new LexerGrammar(name.getText(), tool, superClass);
lg.comment = doc;
lg.processArguments(args);
lg.setFilename(file);
grammars.put(lg.getClassName(), lg);
// Use any preamble action
lg.preambleAction = thePreambleAction;
thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
// This is now the current grammar
grammar = lg;
}
}
/** Start a new parser */
public void startParser(String file, Token name, String superClass, String doc) {
if (numParsers > 0) {
tool.panic("You may only have one parser per grammar file: class " + name.getText());
}
numParsers++;
reset();
//System.out.println("Processing parser '" + name.getText() + "'");
// Is this grammar already defined?
Grammar g = (Grammar)grammars.get(name);
if (g != null) {
if (!(g instanceof ParserGrammar)) {
tool.panic("'" + name.getText() + "' is already defined as a non-parser");
}
else {
tool.panic("Parser '" + name.getText() + "' is already defined");
}
}
else {
// Create a new grammar
grammar = new ParserGrammar(name.getText(), tool, superClass);
grammar.comment = doc;
grammar.processArguments(args);
grammar.setFilename(file);
grammars.put(grammar.getClassName(), grammar);
// Use any preamble action
grammar.preambleAction = thePreambleAction;
thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
}
}
/** Start a new tree-walker */
public void startTreeWalker(String file, Token name, String superClass, String doc) {
if (numTreeParsers > 0) {
tool.panic("You may only have one tree parser per grammar file: class " + name.getText());
}
numTreeParsers++;
reset();
//System.out.println("Processing tree-walker '" + name.getText() + "'");
// Is this grammar already defined?
Grammar g = (Grammar)grammars.get(name);
if (g != null) {
if (!(g instanceof TreeWalkerGrammar)) {
tool.panic("'" + name.getText() + "' is already defined as a non-tree-walker");
}
else {
tool.panic("Tree-walker '" + name.getText() + "' is already defined");
}
}
else {
// Create a new grammar
grammar = new TreeWalkerGrammar(name.getText(), tool, superClass);
grammar.comment = doc;
grammar.processArguments(args);
grammar.setFilename(file);
grammars.put(grammar.getClassName(), grammar);
// Use any preamble action
grammar.preambleAction = thePreambleAction;
thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
}
}
public void synPred() {
}
public void zeroOrMoreSubRule() {
}
}

68
fine-antlr-old/src/main/java/com/fr/third/antlr/DumpASTVisitor.java

@ -0,0 +1,68 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DumpASTVisitor.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** Simple class to dump the contents of an AST to the output */
public class DumpASTVisitor implements ASTVisitor {
protected int level = 0;
private void tabs() {
for (int i = 0; i < level; i++) {
System.out.print(" ");
}
}
public void visit(AST node) {
// Flatten this level of the tree if it has no children
boolean flatten = /*true*/ false;
AST node2;
for (node2 = node; node2 != null; node2 = node2.getNextSibling()) {
if (node2.getFirstChild() != null) {
flatten = false;
break;
}
}
for (node2 = node; node2 != null; node2 = node2.getNextSibling()) {
if (!flatten || node2 == node) {
tabs();
}
if (node2.getText() == null) {
System.out.print("nil");
}
else {
System.out.print(node2.getText());
}
System.out.print(" [" + node2.getType() + "] ");
if (flatten) {
System.out.print(' ');
}
else {
System.out.println("");
}
if (node2.getFirstChild() != null) {
level++;
visit(node2.getFirstChild());
level--;
}
}
if (flatten) {
System.out.println("");
}
}
}

22
fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionHandler.java

@ -0,0 +1,22 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ExceptionHandler.java#2 $
*/
class ExceptionHandler {
// Type of the ANTLR exception class to catch and the variable decl
protected Token exceptionTypeAndName;
// The action to be executed when the exception is caught
protected Token action;
public ExceptionHandler(Token exceptionTypeAndName_,
Token action_) {
exceptionTypeAndName = exceptionTypeAndName_;
action = action_;
}
}

29
fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionSpec.java

@ -0,0 +1,29 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ExceptionSpec.java#2 $
*/
import com.fr.third.antlr.collections.impl.Vector;
class ExceptionSpec {
// Non-null if this refers to a labeled rule
// Use a token instead of a string to get the line information
protected Token label;
// List of ExceptionHandler (catch phrases)
protected Vector handlers;
public ExceptionSpec(Token label_) {
label = label_;
handlers = new Vector();
}
public void addHandler(ExceptionHandler handler) {
handlers.appendElement(handler);
}
}

14
fine-antlr-old/src/main/java/com/fr/third/antlr/FileCopyException.java

@ -0,0 +1,14 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/FileCopyException.java#2 $
*/
class FileCopyException extends java.io.IOException {
public FileCopyException(String msg) {
super(msg);
}
}

27
fine-antlr-old/src/main/java/com/fr/third/antlr/FileLineFormatter.java

@ -0,0 +1,27 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/FileLineFormatter.java#2 $
*/
public abstract class FileLineFormatter {
private static FileLineFormatter formatter = new DefaultFileLineFormatter();
public static FileLineFormatter getFormatter() {
return formatter;
}
public static void setFormatter(FileLineFormatter f) {
formatter = f;
}
/** @param fileName the file that should appear in the prefix. (or null)
* @param line the line (or -1)
* @param column the column (or -1)
*/
public abstract String getFormatString(String fileName, int line, int column);
}

288
fine-antlr-old/src/main/java/com/fr/third/antlr/Grammar.java

@ -0,0 +1,288 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Grammar.java#2 $
*/
import java.io.IOException;
import java.util.Enumeration;
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.Vector;
/**A Grammar holds a set of rules (which are stored
* in a symbol table). Most of the time a grammar
* needs a code generator and an LLkAnalyzer too.
*/
public abstract class Grammar {
protected Tool antlrTool;
protected CodeGenerator generator;
protected LLkGrammarAnalyzer theLLkAnalyzer;
protected Hashtable symbols;
protected boolean buildAST = false;
protected boolean analyzerDebug = false;
protected boolean interactive = false;
protected String superClass = null;
/** The token manager associated with the grammar, if any.
// The token manager is responsible for maintaining the set of valid tokens, and
// is conceptually shared between the lexer and parser. This may be either a
// LexerGrammar or a ImportVocabTokenManager.
*/
protected TokenManager tokenManager;
/** The name of the export vocabulary...used to generate the output
* token types interchange file.
*/
protected String exportVocab = null;
/** The name of the import vocabulary. "Initial conditions"
*/
protected String importVocab = null;
// Mapping from String keys to Token option values
protected Hashtable options;
// Vector of RuleSymbol entries
protected Vector rules;
protected Token preambleAction = new CommonToken(Token.INVALID_TYPE, "");
protected String className = null;
protected String fileName = null;
protected Token classMemberAction = new CommonToken(Token.INVALID_TYPE, "");
protected boolean hasSyntacticPredicate = false;
protected boolean hasUserErrorHandling = false;
// max lookahead that can be attempted for this parser.
protected int maxk = 1;
// options
protected boolean traceRules = false;
protected boolean debuggingOutput = false;
protected boolean defaultErrorHandler = true;
protected String comment = null; // javadoc comment
public Grammar(String className_, Tool tool_, String superClass) {
className = className_;
antlrTool = tool_;
symbols = new Hashtable();
options = new Hashtable();
rules = new Vector(100);
this.superClass = superClass;
}
/** Define a rule */
public void define(RuleSymbol rs) {
rules.appendElement(rs);
// add the symbol to the rules hash table
symbols.put(rs.getId(), rs);
}
/** Top-level call to generate the code for this grammar */
public abstract void generate() throws IOException;
protected String getClassName() {
return className;
}
/* Does this grammar have a default error handler? */
public boolean getDefaultErrorHandler() {
return defaultErrorHandler;
}
public String getFilename() {
return fileName;
}
/** Get an integer option. Given the name of the option find its
* associated integer value. If the associated value is not an integer or
* is not in the table, then throw an exception of type NumberFormatException.
* @param key The name of the option
* @return The value associated with the key.
*/
public int getIntegerOption(String key) throws NumberFormatException {
Token t = (Token)options.get(key);
if (t == null || t.getType() != ANTLRTokenTypes.INT) {
throw new NumberFormatException();
}
else {
return Integer.parseInt(t.getText());
}
}
/** Get an option. Given the name of the option find its associated value.
* @param key The name of the option
* @return The value associated with the key, or null if the key has not been set.
*/
public Token getOption(String key) {
return (Token)options.get(key);
}
// Get name of class from which generated parser/lexer inherits
protected abstract String getSuperClass();
public GrammarSymbol getSymbol(String s) {
return (GrammarSymbol)symbols.get(s);
}
public Enumeration getSymbols() {
return symbols.elements();
}
/** Check the existence of an option in the table
* @param key The name of the option
* @return true if the option is in the table
*/
public boolean hasOption(String key) {
return options.containsKey(key);
}
/** Is a rule symbol defined? (not used for tokens) */
public boolean isDefined(String s) {
return symbols.containsKey(s);
}
/**Process command line arguments. Implemented in subclasses */
public abstract void processArguments(String[] args);
public void setCodeGenerator(CodeGenerator gen) {
generator = gen;
}
public void setFilename(String s) {
fileName = s;
}
public void setGrammarAnalyzer(LLkGrammarAnalyzer a) {
theLLkAnalyzer = a;
}
/** Set a generic option.
* This associates a generic option key with a Token value.
* No validation is performed by this method, although users of the value
* (code generation and/or analysis) may require certain formats.
* The value is stored as a token so that the location of an error
* can be reported.
* @param key The name of the option.
* @param value The value to associate with the key.
* @return true if the option was a valid generic grammar option, false o/w
*/
public boolean setOption(String key, Token value) {
options.put(key, value);
String s = value.getText();
int i;
if (key.equals("k")) {
try {
maxk = getIntegerOption("k");
if ( maxk<=0 ) {
antlrTool.error("option 'k' must be greater than 0 (was " +
value.getText() + ")",
getFilename(),
value.getLine(),
value.getColumn());
maxk = 1;
}
}
catch (NumberFormatException e) {
antlrTool.error("option 'k' must be an integer (was " + value.getText() + ")", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("codeGenMakeSwitchThreshold")) {
try {
i = getIntegerOption("codeGenMakeSwitchThreshold");
}
catch (NumberFormatException e) {
antlrTool.error("option 'codeGenMakeSwitchThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("codeGenBitsetTestThreshold")) {
try {
i = getIntegerOption("codeGenBitsetTestThreshold");
}
catch (NumberFormatException e) {
antlrTool.error("option 'codeGenBitsetTestThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("defaultErrorHandler")) {
if (s.equals("true")) {
defaultErrorHandler = true;
}
else if (s.equals("false")) {
defaultErrorHandler = false;
}
else {
antlrTool.error("Value for defaultErrorHandler must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("analyzerDebug")) {
if (s.equals("true")) {
analyzerDebug = true;
}
else if (s.equals("false")) {
analyzerDebug = false;
}
else {
antlrTool.error("option 'analyzerDebug' must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("codeGenDebug")) {
if (s.equals("true")) {
analyzerDebug = true;
}
else if (s.equals("false")) {
analyzerDebug = false;
}
else {
antlrTool.error("option 'codeGenDebug' must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("classHeaderSuffix")) {
return true;
}
if (key.equals("classHeaderPrefix")) {
return true;
}
if (key.equals("namespaceAntlr")) {
return true;
}
if (key.equals("namespaceStd")) {
return true;
}
if (key.equals("genHashLines")) {
return true;
}
if (key.equals("noConstructors")) {
return true;
}
return false;
}
public void setTokenManager(TokenManager tokenManager_) {
tokenManager = tokenManager_;
}
/** Print out the grammar without actions */
public String toString() {
StringBuffer buf = new StringBuffer(20000);
Enumeration ids = rules.elements();
while (ids.hasMoreElements()) {
RuleSymbol rs = (RuleSymbol)ids.nextElement();
if (!rs.id.equals("mnextToken")) {
buf.append(rs.getBlock().toString());
buf.append("\n\n");
}
}
return buf.toString();
}
}

36
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAnalyzer.java

@ -0,0 +1,36 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarAnalyzer.java#2 $
*/
/**A GrammarAnalyzer computes lookahead from Grammar (which contains
* a grammar symbol table) and can then answer questions about the
* grammar.
*
* To access the RuleBlock for a rule name, the grammar symbol table
* is consulted.
*
* There should be no distinction between static & dynamic analysis.
* In other words, some of the easy analysis can be done statically
* and then the part that is hard statically can be deferred to
* parse-time. Interestingly, computing LL(k) for k>1 lookahead
* statically is O(|T|^k) where T is the grammar vocabulary, but,
* is O(k) at run-time (ignoring the large constant associated with
* the size of the grammar). In English, the difference can be
* described as "find the set of all possible k-sequences of input"
* versus "does this specific k-sequence match?".
*/
public interface GrammarAnalyzer {
/**The epsilon token type is an imaginary type used
* during analysis. It indicates an incomplete look() computation.
* Must be kept consistent with Token constants to be between
* MIN_USER_TYPE and INVALID_TYPE.
*/
// public static final int EPSILON_TYPE = 2;
public static final int NONDETERMINISTIC = Integer.MAX_VALUE; // lookahead depth
public static final int LOOKAHEAD_DEPTH_INIT = -1;
}

68
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAtom.java

@ -0,0 +1,68 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarAtom.java#2 $
*/
/** A GrammarAtom is either a token ref, a character ref, or string.
* The analysis doesn't care.
*/
abstract class GrammarAtom extends AlternativeElement {
protected String label;
protected String atomText;
protected int tokenType = Token.INVALID_TYPE;
protected boolean not = false; // ~T or ~'c' or ~"foo"
/** Set to type of AST node to create during parse. Defaults to what is
* set in the TokenSymbol.
*/
protected String ASTNodeType = null;
public GrammarAtom(Grammar g, Token t, int autoGenType) {
super(g, t, autoGenType);
atomText = t.getText();
}
public String getLabel() {
return label;
}
public String getText() {
return atomText;
}
public int getType() {
return tokenType;
}
public void setLabel(String label_) {
label = label_;
}
public String getASTNodeType() {
return ASTNodeType;
}
public void setASTNodeType(String type) {
ASTNodeType = type;
}
public void setOption(Token option, Token value) {
if (option.getText().equals("AST")) {
setASTNodeType(value.getText());
}
else {
grammar.antlrTool.error("Invalid element option:" + option.getText(),
grammar.getFilename(), option.getLine(), option.getColumn());
}
}
public String toString() {
String s = " ";
if (label != null) s += label + ":";
if (not) s += "~";
return s + atomText;
}
}

62
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarElement.java

@ -0,0 +1,62 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarElement.java#2 $
*/
/**A GrammarElement is a generic node in our
* data structure that holds a grammar in memory.
* This data structure can be used for static
* analysis or for dynamic analysis (during parsing).
* Every node must know which grammar owns it, how
* to generate code, and how to do analysis.
*/
abstract class GrammarElement {
public static final int AUTO_GEN_NONE = 1;
public static final int AUTO_GEN_CARET = 2;
public static final int AUTO_GEN_BANG = 3;
/*
* Note that Java does static argument type matching to
* determine which function to execute on the receiver.
* Here, that implies that we cannot simply say
* grammar.generator.gen(this) in GrammarElement or
* only CodeGenerator.gen(GrammarElement ge) would
* ever be called.
*/
protected Grammar grammar;
protected int line;
protected int column;
public GrammarElement(Grammar g) {
grammar = g;
line = -1;
column = -1;
}
public GrammarElement(Grammar g, Token start) {
grammar = g;
line = start.getLine();
column = start.getColumn();
}
public void generate() {
}
public int getLine() {
return line;
}
public int getColumn() {
return column;
}
public Lookahead look(int k) {
return null;
}
public abstract String toString();
}

30
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarSymbol.java

@ -0,0 +1,30 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarSymbol.java#2 $
*/
/**A GrammarSymbol is a generic symbol that can be
* added to the symbol table for a grammar.
*/
abstract class GrammarSymbol {
protected String id;
public GrammarSymbol() {
}
public GrammarSymbol(String s) {
id = s;
}
public String getId() {
return id;
}
public void setId(String s) {
id = s;
}
}

102
fine-antlr-old/src/main/java/com/fr/third/antlr/ImportVocabTokenManager.java

@ -0,0 +1,102 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ImportVocabTokenManager.java#2 $
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.Reader;
/** Static implementation of the TokenManager, used for importVocab option */
class ImportVocabTokenManager extends SimpleTokenManager implements Cloneable {
private String filename;
protected Grammar grammar;
// FIXME: it would be nice if the path to the original grammar file was
// also searched.
ImportVocabTokenManager(Grammar grammar, String filename_, String name_, Tool tool_) {
// initialize
super(name_, tool_);
this.grammar = grammar;
this.filename = filename_;
// Figure out exactly where the file lives. Check $PWD first,
// and then search in -o <output_dir>.
//
File grammarFile = new File(filename);
if (!grammarFile.exists()) {
grammarFile = new File(antlrTool.getOutputDirectory(), filename);
if (!grammarFile.exists()) {
antlrTool.panic("Cannot find importVocab file '" + filename + "'");
}
}
setReadOnly(true);
// Read a file with lines of the form ID=number
try {
Reader fileIn = new BufferedReader(new FileReader(grammarFile));
ANTLRTokdefLexer tokdefLexer = new ANTLRTokdefLexer(fileIn);
ANTLRTokdefParser tokdefParser = new ANTLRTokdefParser(tokdefLexer);
tokdefParser.setTool(antlrTool);
tokdefParser.setFilename(filename);
tokdefParser.file(this);
}
catch (FileNotFoundException fnf) {
antlrTool.panic("Cannot find importVocab file '" + filename + "'");
}
catch (RecognitionException ex) {
antlrTool.panic("Error parsing importVocab file '" + filename + "': " + ex.toString());
}
catch (TokenStreamException ex) {
antlrTool.panic("Error reading importVocab file '" + filename + "'");
}
}
public Object clone() {
ImportVocabTokenManager tm;
tm = (ImportVocabTokenManager)super.clone();
tm.filename = this.filename;
tm.grammar = this.grammar;
return tm;
}
/** define a token. */
public void define(TokenSymbol ts) {
super.define(ts);
}
/** define a token. Intended for use only when reading the importVocab file. */
public void define(String s, int ttype) {
TokenSymbol ts = null;
if (s.startsWith("\"")) {
ts = new StringLiteralSymbol(s);
}
else {
ts = new TokenSymbol(s);
}
ts.setTokenType(ttype);
super.define(ts);
maxToken = (ttype + 1) > maxToken ? (ttype + 1) : maxToken; // record maximum token type
}
/** importVocab token manager is read-only if output would be same as input */
public boolean isReadOnly() {
return readOnly;
}
/** Get the next unused token type. */
public int nextTokenType() {
return super.nextTokenType();
}
}

131
fine-antlr-old/src/main/java/com/fr/third/antlr/InputBuffer.java

@ -0,0 +1,131 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/InputBuffer.java#2 $
*/
// SAS: Added this class to genericise the input buffers for scanners
// This allows a scanner to use a binary (FileInputStream) or
// text (FileReader) stream of data; the generated scanner
// subclass will define the input stream
// There are two subclasses to this: CharBuffer and ByteBuffer
/**A Stream of characters fed to the lexer from a InputStream that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input characters. Normally,
* "k" characters are stored in the buffer. More characters may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of characters is deferred. In other words, reading the next
* character is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see com.fr.third.antlr.CharQueue
*/
public abstract class InputBuffer {
// Number of active markers
protected int nMarkers = 0;
// Additional offset used when markers are active
protected int markerOffset = 0;
// Number of calls to consume() since last LA() or LT() call
protected int numToConsume = 0;
// Circular queue
protected CharQueue queue;
/** Create an input buffer */
public InputBuffer() {
queue = new CharQueue(1);
}
/** This method updates the state of the input buffer so that
* the text matched since the most recent mark() is no longer
* held by the buffer. So, you either do a mark/rewind for
* failed predicate or mark/commit to keep on parsing without
* rewinding the input.
*/
public void commit() {
nMarkers--;
}
/** Mark another character for deferred consumption */
public void consume() {
numToConsume++;
}
/** Ensure that the input buffer is sufficiently full */
public abstract void fill(int amount) throws CharStreamException;
public String getLAChars() {
StringBuffer la = new StringBuffer();
for (int i = markerOffset; i < queue.nbrEntries; i++)
la.append(queue.elementAt(i));
return la.toString();
}
public String getMarkedChars() {
StringBuffer marked = new StringBuffer();
for (int i = 0; i < markerOffset; i++)
marked.append(queue.elementAt(i));
return marked.toString();
}
public boolean isMarked() {
return (nMarkers != 0);
}
/** Get a lookahead character */
public char LA(int i) throws CharStreamException {
fill(i);
return queue.elementAt(markerOffset + i - 1);
}
/**Return an integer marker that can be used to rewind the buffer to
* its current state.
*/
public int mark() {
syncConsume();
nMarkers++;
return markerOffset;
}
/**Rewind the character buffer to a marker.
* @param mark Marker returned previously from mark()
*/
public void rewind(int mark) {
syncConsume();
markerOffset = mark;
nMarkers--;
}
/** Reset the input buffer
*/
public void reset() {
nMarkers = 0;
markerOffset = 0;
numToConsume = 0;
queue.reset();
}
/** Sync up deferred consumption */
protected void syncConsume() {
while (numToConsume > 0) {
if (nMarkers > 0) {
// guess mode -- leave leading characters and bump offset.
markerOffset++;
}
else {
// normal mode -- remove first character
queue.removeFirst();
}
numToConsume--;
}
}
}

34
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaBlockFinishingInfo.java

@ -0,0 +1,34 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/JavaBlockFinishingInfo.java#2 $
*/
class JavaBlockFinishingInfo {
String postscript; // what to generate to terminate block
boolean generatedSwitch;// did block finish with "default:" of switch?
boolean generatedAnIf;
/** When generating an if or switch, end-of-token lookahead sets
* will become the else or default clause, don't generate an
* error clause in this case.
*/
boolean needAnErrorClause;
public JavaBlockFinishingInfo() {
postscript = null;
generatedSwitch = false;
needAnErrorClause = true;
}
public JavaBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) {
postscript = ps;
generatedSwitch = genS;
this.generatedAnIf = generatedAnIf;
needAnErrorClause = n;
}
}

87
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCharFormatter.java

@ -0,0 +1,87 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/JavaCharFormatter.java#2 $
*/
class JavaCharFormatter implements CharFormatter {
/** Given a character value, return a string representing the character
* that can be embedded inside a string literal or character literal
* This works for Java/C/C++ code-generation and languages with compatible
* special-character-escapment.
* Code-generators for languages should override this method.
* @param c The character of interest.
* @param forCharLiteral true to escape for char literal, false for string literal
*/
public String escapeChar(int c, boolean forCharLiteral) {
switch (c) {
// case GrammarAnalyzer.EPSILON_TYPE : return "<end-of-token>";
case '\n':
return "\\n";
case '\t':
return "\\t";
case '\r':
return "\\r";
case '\\':
return "\\\\";
case '\'':
return forCharLiteral ? "\\'" : "'";
case '"':
return forCharLiteral ? "\"" : "\\\"";
default :
if (c < ' ' || c > 126) {
if ((0x0000 <= c) && (c <= 0x000F)) {
return "\\u000" + Integer.toString(c, 16);
}
else if ((0x0010 <= c) && (c <= 0x00FF)) {
return "\\u00" + Integer.toString(c, 16);
}
else if ((0x0100 <= c) && (c <= 0x0FFF)) {
return "\\u0" + Integer.toString(c, 16);
}
else {
return "\\u" + Integer.toString(c, 16);
}
}
else {
return String.valueOf((char)c);
}
}
}
/** Converts a String into a representation that can be use as a literal
* when surrounded by double-quotes.
* @param s The String to be changed into a literal
*/
public String escapeString(String s) {
String retval = new String();
for (int i = 0; i < s.length(); i++) {
retval += escapeChar(s.charAt(i), false);
}
return retval;
}
/** Given a character value, return a string representing the character
* literal that can be recognized by the target language compiler.
* This works for languages that use single-quotes for character literals.
* Code-generators for languages should override this method.
* @param c The character of interest.
*/
public String literalChar(int c) {
return "'" + escapeChar(c, true) + "'";
}
/** Converts a String into a string literal
* This works for languages that use double-quotes for string literals.
* Code-generators for languages should override this method.
* @param s The String to be changed into a literal
*/
public String literalString(String s) {
return "\"" + escapeString(s) + "\"";
}
}

3746
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGenerator.java

File diff suppressed because it is too large Load Diff

21
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGeneratorPrintWriterManager.java

@ -0,0 +1,21 @@
package com.fr.third.antlr;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Map;
/**
* Defines a strategy that can be used to manage the printwriter
* being used to write JavaCodeGenerator output
*
* TODO generalize so all code gens could use?
*/
public interface JavaCodeGeneratorPrintWriterManager {
public PrintWriter setupOutput(Tool tool, Grammar grammar) throws IOException;
public PrintWriter setupOutput(Tool tool, String fileName) throws IOException;
public void startMapping(int sourceLine);
public void startSingleSourceLineMapping(int sourceLine);
public void endMapping();
public void finishOutput() throws IOException;
public Map getSourceMaps();
}

1095
fine-antlr-old/src/main/java/com/fr/third/antlr/LLkAnalyzer.java

File diff suppressed because it is too large Load Diff

58
fine-antlr-old/src/main/java/com/fr/third/antlr/LLkGrammarAnalyzer.java

@ -0,0 +1,58 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LLkGrammarAnalyzer.java#2 $
*/
public interface LLkGrammarAnalyzer extends GrammarAnalyzer {
public boolean deterministic(AlternativeBlock blk);
public boolean deterministic(OneOrMoreBlock blk);
public boolean deterministic(ZeroOrMoreBlock blk);
public Lookahead FOLLOW(int k, RuleEndElement end);
public Lookahead look(int k, ActionElement action);
public Lookahead look(int k, AlternativeBlock blk);
public Lookahead look(int k, BlockEndElement end);
public Lookahead look(int k, CharLiteralElement atom);
public Lookahead look(int k, CharRangeElement end);
public Lookahead look(int k, GrammarAtom atom);
public Lookahead look(int k, OneOrMoreBlock blk);
public Lookahead look(int k, RuleBlock blk);
public Lookahead look(int k, RuleEndElement end);
public Lookahead look(int k, RuleRefElement rr);
public Lookahead look(int k, StringLiteralElement atom);
public Lookahead look(int k, SynPredBlock blk);
public Lookahead look(int k, TokenRangeElement end);
public Lookahead look(int k, TreeElement end);
public Lookahead look(int k, WildcardElement wc);
public Lookahead look(int k, ZeroOrMoreBlock blk);
public Lookahead look(int k, String rule);
public void setGrammar(Grammar g);
public boolean subruleCanBeInverted(AlternativeBlock blk, boolean forLexer);
}

85
fine-antlr-old/src/main/java/com/fr/third/antlr/LLkParser.java

@ -0,0 +1,85 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LLkParser.java#2 $
*/
/**An LL(k) parser.
*
* @see com.fr.third.antlr.Token
* @see com.fr.third.antlr.TokenBuffer
*/
public class LLkParser extends Parser {
int k;
public LLkParser(int k_) {
k = k_;
}
public LLkParser(ParserSharedInputState state, int k_) {
super(state);
k = k_;
}
public LLkParser(TokenBuffer tokenBuf, int k_) {
k = k_;
setTokenBuffer(tokenBuf);
}
public LLkParser(TokenStream lexer, int k_) {
k = k_;
TokenBuffer tokenBuf = new TokenBuffer(lexer);
setTokenBuffer(tokenBuf);
}
/**Consume another token from the input stream. Can only write sequentially!
* If you need 3 tokens ahead, you must consume() 3 times.
* <p>
* Note that it is possible to overwrite tokens that have not been matched.
* For example, calling consume() 3 times when k=2, means that the first token
* consumed will be overwritten with the 3rd.
*/
public void consume() throws TokenStreamException {
inputState.input.consume();
}
public int LA(int i) throws TokenStreamException {
return inputState.input.LA(i);
}
public Token LT(int i) throws TokenStreamException {
return inputState.input.LT(i);
}
private void trace(String ee, String rname) throws TokenStreamException {
traceIndent();
System.out.print(ee + rname + ((inputState.guessing > 0)?"; [guessing]":"; "));
for (int i = 1; i <= k; i++) {
if (i != 1) {
System.out.print(", ");
}
if ( LT(i)!=null ) {
System.out.print("LA(" + i + ")==" + LT(i).getText());
}
else {
System.out.print("LA(" + i + ")==null");
}
}
System.out.println("");
}
public void traceIn(String rname) throws TokenStreamException {
traceDepth += 1;
trace("> ", rname);
}
public void traceOut(String rname) throws TokenStreamException {
trace("< ", rname);
traceDepth -= 1;
}
}

179
fine-antlr-old/src/main/java/com/fr/third/antlr/LexerGrammar.java

@ -0,0 +1,179 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LexerGrammar.java#2 $
*/
import java.io.IOException;
import com.fr.third.antlr.collections.impl.BitSet;
/** Lexer-specific grammar subclass */
class LexerGrammar extends Grammar {
// character set used by lexer
protected BitSet charVocabulary;
// true if the lexer generates literal testing code for nextToken
protected boolean testLiterals = true;
// true if the lexer generates case-sensitive LA(k) testing
protected boolean caseSensitiveLiterals = true;
/** true if the lexer generates case-sensitive literals testing */
protected boolean caseSensitive = true;
/** true if lexer is to ignore all unrecognized tokens */
protected boolean filterMode = false;
/** if filterMode is true, then filterRule can indicate an optional
* rule to use as the scarf language. If null, programmer used
* plain "filter=true" not "filter=rule".
*/
protected String filterRule = null;
LexerGrammar(String className_, Tool tool_, String superClass) {
super(className_, tool_, superClass);
// by default, use 0..127 for ASCII char vocabulary
BitSet cv = new BitSet();
for (int i = 0; i <= 127; i++) {
cv.add(i);
}
setCharVocabulary(cv);
// Lexer usually has no default error handling
defaultErrorHandler = false;
}
/** Top-level call to generate the code */
public void generate() throws IOException {
generator.gen(this);
}
public String getSuperClass() {
// If debugging, use debugger version of scanner
if (debuggingOutput)
return "debug.DebuggingCharScanner";
return "CharScanner";
}
// Get the testLiterals option value
public boolean getTestLiterals() {
return testLiterals;
}
/**Process command line arguments.
* -trace have all rules call traceIn/traceOut
* -traceLexer have lexical rules call traceIn/traceOut
* -debug generate debugging output for parser debugger
*/
public void processArguments(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-trace")) {
traceRules = true;
antlrTool.setArgOK(i);
}
else if (args[i].equals("-traceLexer")) {
traceRules = true;
antlrTool.setArgOK(i);
}
else if (args[i].equals("-debug")) {
debuggingOutput = true;
antlrTool.setArgOK(i);
}
}
}
/** Set the character vocabulary used by the lexer */
public void setCharVocabulary(BitSet b) {
charVocabulary = b;
}
/** Set lexer options */
public boolean setOption(String key, Token value) {
String s = value.getText();
if (key.equals("buildAST")) {
antlrTool.warning("buildAST option is not valid for lexer", getFilename(), value.getLine(), value.getColumn());
return true;
}
if (key.equals("testLiterals")) {
if (s.equals("true")) {
testLiterals = true;
}
else if (s.equals("false")) {
testLiterals = false;
}
else {
antlrTool.warning("testLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("interactive")) {
if (s.equals("true")) {
interactive = true;
}
else if (s.equals("false")) {
interactive = false;
}
else {
antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("caseSensitive")) {
if (s.equals("true")) {
caseSensitive = true;
}
else if (s.equals("false")) {
caseSensitive = false;
}
else {
antlrTool.warning("caseSensitive option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("caseSensitiveLiterals")) {
if (s.equals("true")) {
caseSensitiveLiterals = true;
}
else if (s.equals("false")) {
caseSensitiveLiterals = false;
}
else {
antlrTool.warning("caseSensitiveLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("filter")) {
if (s.equals("true")) {
filterMode = true;
}
else if (s.equals("false")) {
filterMode = false;
}
else if (value.getType() == ANTLRTokenTypes.TOKEN_REF) {
filterMode = true;
filterRule = s;
}
else {
antlrTool.warning("filter option must be true, false, or a lexer rule name", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("longestPossible")) {
antlrTool.warning("longestPossible option has been deprecated; ignoring it...", getFilename(), value.getLine(), value.getColumn());
return true;
}
if (key.equals("className")) {
super.setOption(key, value);
return true;
}
if (super.setOption(key, value)) {
return true;
}
antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn());
return false;
}
}

80
fine-antlr-old/src/main/java/com/fr/third/antlr/LexerSharedInputState.java

@ -0,0 +1,80 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.jGuru.com
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LexerSharedInputState.java#2 $
*/
import java.io.Reader;
import java.io.InputStream;
/** This object contains the data associated with an
* input stream of characters. Multiple lexers
* share a single LexerSharedInputState to lex
* the same input stream.
*/
public class LexerSharedInputState {
protected int column = 1;
protected int line = 1;
protected int tokenStartColumn = 1;
protected int tokenStartLine = 1;
protected InputBuffer input;
/** What file (if known) caused the problem? */
protected String filename;
public int guessing = 0;
public LexerSharedInputState(InputBuffer inbuf) {
input = inbuf;
}
public LexerSharedInputState(InputStream in) {
this(new ByteBuffer(in));
}
public LexerSharedInputState(Reader in) {
this(new CharBuffer(in));
}
public String getFilename() {
return filename;
}
public InputBuffer getInput() {
return input;
}
public int getLine()
{
return line;
}
public int getTokenStartColumn()
{
return tokenStartColumn;
}
public int getTokenStartLine()
{
return tokenStartLine;
}
public int getColumn()
{
return column;
}
public void reset() {
column = 1;
line = 1;
tokenStartColumn = 1;
tokenStartLine = 1;
guessing = 0;
filename = null;
input.reset();
}
}

218
fine-antlr-old/src/main/java/com/fr/third/antlr/Lookahead.java

@ -0,0 +1,218 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Lookahead.java#2 $
*/
import com.fr.third.antlr.collections.impl.BitSet;
import com.fr.third.antlr.collections.impl.Vector;
/**This object holds all information needed to represent
* the lookahead for any particular lookahead computation
* for a <b>single</b> lookahead depth. Final lookahead
* information is a simple bit set, but intermediate
* stages need computation cycle and FOLLOW information.
*
* <p>
* Concerning the <tt>cycle</tt> variable.
* If lookahead is computed for a RuleEnd node, then
* computation is part of a FOLLOW cycle for this rule.
* If lookahead is computed for a RuleBlock node, the
* computation is part of a FIRST cycle to this rule.
*
* <p>
* Concerning the <tt>epsilonDepth</tt> variable.
* This is not the depth relative to the rule reference
* that epsilon was encountered. That value is
* <pre>
* initial_k - epsilonDepth + 1
* </pre>
* Also, lookahead depths past rule ref for local follow are:
* <pre>
* initial_k - (initial_k - epsilonDepth)
* </pre>
* Used for rule references. If we try
* to compute look(k, ruleref) and there are fewer
* than k lookahead terminals before the end of the
* the rule, epsilon will be returned (don't want to
* pass the end of the rule). We must track when the
* the lookahead got stuck. For example,
* <pre>
* a : b A B E F G;
* b : C ;
* </pre>
* LOOK(5, ref-to(b)) is {<EPSILON>} with depth = 4, which
* indicates that at 2 (5-4+1) tokens ahead, end of rule was reached.
* Therefore, the token at 4=5-(5-4) past rule ref b must be
* included in the set == F.
* The situation is complicated by the fact that a computation
* may hit the end of a rule at many different depths. For example,
* <pre>
* a : b A B C ;
* b : E F // epsilon depth of 1 relative to initial k=3
* | G // epsilon depth of 2
* ;
* </pre>
* Here, LOOK(3,ref-to(b)) returns epsilon, but the depths are
* {1, 2}; i.e., 3-(3-1) and 3-(3-2). Those are the lookahead depths
* past the rule ref needed for the local follow.
*
* <p>
* This is null unless an epsilon is created.
*
* @see com.fr.third.antlr.Lookahead#combineWith(Lookahead)
*/
public class Lookahead implements Cloneable {
/** actual bitset of the lookahead */
BitSet fset;
/** is this computation part of a computation cycle? */
String cycle;
/** What k values were being computed when end of rule hit? */
BitSet epsilonDepth;
/** Does this lookahead depth include Epsilon token type? This
* is used to avoid having a bit in the set for Epsilon as it
* conflicts with parsing binary files.
*/
boolean hasEpsilon = false;
public Lookahead() {
fset = new BitSet();
}
/** create a new lookahead set with the LL(1) set to the parameter */
public Lookahead(BitSet p) {
fset = p;
}
/** create an empty lookahead set, but with cycle */
public Lookahead(String c) {
this();
cycle = c;
}
/** Make a deep copy of everything in this object */
public Object clone() {
Lookahead p = null;
try {
p = (Lookahead)super.clone();
p.fset = (BitSet)fset.clone();
p.cycle = cycle; // strings are immutable
if (epsilonDepth != null) {
p.epsilonDepth = (BitSet)epsilonDepth.clone();
}
}
catch (CloneNotSupportedException e) {
throw new InternalError();
}
return p;
}
public void combineWith(Lookahead q) {
if (cycle == null) { // track at least one cycle
cycle = q.cycle;
}
if (q.containsEpsilon()) {
hasEpsilon = true;
}
// combine epsilon depths
if (epsilonDepth != null) {
if (q.epsilonDepth != null) {
epsilonDepth.orInPlace(q.epsilonDepth);
}
}
else if (q.epsilonDepth != null) {
epsilonDepth = (BitSet)q.epsilonDepth.clone();
}
fset.orInPlace(q.fset);
}
public boolean containsEpsilon() {
return hasEpsilon;
}
/** What is the intersection of two lookahead depths?
* Only the Epsilon "bit" and bitset are considered.
*/
public Lookahead intersection(Lookahead q) {
Lookahead p = new Lookahead(fset.and(q.fset));
if (this.hasEpsilon && q.hasEpsilon) {
p.setEpsilon();
}
return p;
}
public boolean nil() {
return fset.nil() && !hasEpsilon;
}
public static Lookahead of(int el) {
Lookahead look = new Lookahead();
look.fset.add(el);
return look;
}
public void resetEpsilon() {
hasEpsilon = false;
}
public void setEpsilon() {
hasEpsilon = true;
}
public String toString() {
String e = "",b,f = "",d = "";
b = fset.toString(",");
if (containsEpsilon()) {
e = "+<epsilon>";
}
if (cycle != null) {
f = "; FOLLOW(" + cycle + ")";
}
if (epsilonDepth != null) {
d = "; depths=" + epsilonDepth.toString(",");
}
return b + e + f + d;
}
public String toString(String separator, CharFormatter formatter) {
String e = "",b,f = "",d = "";
b = fset.toString(separator, formatter);
if (containsEpsilon()) {
e = "+<epsilon>";
}
if (cycle != null) {
f = "; FOLLOW(" + cycle + ")";
}
if (epsilonDepth != null) {
d = "; depths=" + epsilonDepth.toString(",");
}
return b + e + f + d;
}
public String toString(String separator, CharFormatter formatter, Grammar g) {
if (g instanceof LexerGrammar) {
return toString(separator, formatter);
}
else {
return toString(separator, g.tokenManager.getVocabulary());
}
}
public String toString(String separator, Vector vocab) {
String b,f = "",d = "";
b = fset.toString(separator, vocab);
if (cycle != null) {
f = "; FOLLOW(" + cycle + ")";
}
if (epsilonDepth != null) {
d = "; depths=" + epsilonDepth.toString(",");
}
return b + f + d;
}
}

792
fine-antlr-old/src/main/java/com/fr/third/antlr/MakeGrammar.java

@ -0,0 +1,792 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/MakeGrammar.java#2 $
*/
import com.fr.third.antlr.collections.Stack;
import com.fr.third.antlr.collections.impl.LList;
import com.fr.third.antlr.collections.impl.Vector;
public class MakeGrammar extends DefineGrammarSymbols {
protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
protected RuleRefElement lastRuleRef;
protected RuleEndElement ruleEnd; // used if not nested
protected RuleBlock ruleBlock; // points to block of current rule.
protected int nested = 0; // nesting inside a subrule
protected boolean grammarError = false;
ExceptionSpec currentExceptionSpec = null;
public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
super(tool_, args_, analyzer_);
}
/** Abort the processing of a grammar (due to syntax errors) */
public void abortGrammar() {
String s = "unknown grammar";
if (grammar != null) {
s = grammar.getClassName();
}
tool.error("aborting grammar '" + s + "' due to errors");
super.abortGrammar();
}
protected void addElementToCurrentAlt(AlternativeElement e) {
e.enclosingRuleName = ruleBlock.ruleName;
context().addAlternativeElement(e);
}
public void beginAlt(boolean doAutoGen_) {
super.beginAlt(doAutoGen_);
Alternative alt = new Alternative();
alt.setAutoGen(doAutoGen_);
context().block.addAlternative(alt);
}
public void beginChildList() {
super.beginChildList();
context().block.addAlternative(new Alternative());
}
/** Add an exception group to a rule (currently a no-op) */
public void beginExceptionGroup() {
super.beginExceptionGroup();
if (!(context().block instanceof RuleBlock)) {
tool.panic("beginExceptionGroup called outside of rule block");
}
}
/** Add an exception spec to an exception group or rule block */
public void beginExceptionSpec(Token label) {
// Hack the label string a bit to remove leading/trailing space.
if (label != null) {
label.setText(StringUtils.stripFront(StringUtils.stripBack(label.getText(), " \n\r\t"), " \n\r\t"));
}
super.beginExceptionSpec(label);
// Don't check for currentExceptionSpec!=null because syntax errors
// may leave it set to something.
currentExceptionSpec = new ExceptionSpec(label);
}
public void beginSubRule(Token label, Token start, boolean not) {
super.beginSubRule(label, start, not);
// we don't know what kind of subrule it is yet.
// push a dummy one that will allow us to collect the
// alternatives. Later, we'll switch to real object.
blocks.push(new BlockContext());
context().block = new AlternativeBlock(grammar, start, not);
context().altNum = 0; // reset alternative number
nested++;
// create a final node to which the last elememt of each
// alternative will point.
context().blockEnd = new BlockEndElement(grammar);
// make sure end node points to start of block
context().blockEnd.block = context().block;
labelElement(context().block, label);
}
public void beginTree(Token tok) throws SemanticException {
if (!(grammar instanceof TreeWalkerGrammar)) {
tool.error("Trees only allowed in TreeParser", grammar.getFilename(), tok.getLine(), tok.getColumn());
throw new SemanticException("Trees only allowed in TreeParser");
}
super.beginTree(tok);
blocks.push(new TreeBlockContext());
context().block = new TreeElement(grammar, tok);
context().altNum = 0; // reset alternative number
}
public BlockContext context() {
if (blocks.height() == 0) {
return null;
}
else {
return (BlockContext)blocks.top();
}
}
/**Used to build nextToken() for the lexer.
* This builds a rule which has every "public" rule in the given Vector of
* rules as it's alternate. Each rule ref generates a Token object.
* @param g The Grammar that is being processed
* @param lexRules A vector of lexer rules that will be used to create an alternate block.
* @param rname The name of the resulting rule.
*/
public static RuleBlock createNextTokenRule(Grammar g, Vector lexRules, String rname) {
// create actual rule data structure
RuleBlock rb = new RuleBlock(g, rname);
rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
RuleEndElement ruleEnd = new RuleEndElement(g);
rb.setEndElement(ruleEnd);
ruleEnd.block = rb;
// Add an alternative for each element of the rules vector.
for (int i = 0; i < lexRules.size(); i++) {
RuleSymbol r = (RuleSymbol)lexRules.elementAt(i);
if (!r.isDefined()) {
g.antlrTool.error("Lexer rule " + r.id.substring(1) + " is not defined");
}
else {
if (r.access.equals("public")) {
Alternative alt = new Alternative(); // create alt we'll add to ref rule
RuleBlock targetRuleBlock = r.getBlock();
Vector targetRuleAlts = targetRuleBlock.getAlternatives();
// collect a sem pred if only one alt and it's at the start;
// simple, but faster to implement until real hoisting
if ( targetRuleAlts!=null && targetRuleAlts.size()==1 ) {
Alternative onlyAlt = (Alternative)targetRuleAlts.elementAt(0);
if ( onlyAlt.semPred!=null ) {
// ok, has sem pred, make this rule ref alt have a pred
alt.semPred = onlyAlt.semPred;
// REMOVE predicate from target rule??? NOPE, another
// rule other than nextToken() might invoke it.
}
}
// create a rule ref to lexer rule
// the Token is a RULE_REF not a TOKEN_REF since the
// conversion to mRulename has already taken place
RuleRefElement rr =
new RuleRefElement(g,
new CommonToken(ANTLRTokenTypes.RULE_REF, r.getId()),
GrammarElement.AUTO_GEN_NONE);
rr.setLabel("theRetToken");
rr.enclosingRuleName = "nextToken";
rr.next = ruleEnd;
alt.addElement(rr); // add rule ref to alt
alt.setAutoGen(true); // keep text of elements
rb.addAlternative(alt); // add alt to rule block
r.addReference(rr); // track ref to this rule in rule blk
}
}
}
rb.setAutoGen(true); // keep text of elements
rb.prepareForAnalysis();
//System.out.println(rb);
return rb;
}
/** Return block as if they had typed: "( rule )?" */
private AlternativeBlock createOptionalRuleRef(String rule, Token start) {
// Make the subrule
AlternativeBlock blk = new AlternativeBlock(grammar, start, false);
// Make sure rule is defined
String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
if (!grammar.isDefined(mrule)) {
grammar.define(new RuleSymbol(mrule));
}
// Make the rule ref element
// RK: fixme probably easier to abuse start token..
Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
t.setLine(start.getLine());
t.setLine(start.getColumn());
RuleRefElement rref =
new RuleRefElement(grammar, t, GrammarElement.AUTO_GEN_NONE);
rref.enclosingRuleName = ruleBlock.ruleName;
// Make the end of block element
BlockEndElement end = new BlockEndElement(grammar);
end.block = blk; // end block points back to start of blk
// Make an alternative, putting the rule ref into it
Alternative alt = new Alternative(rref);
alt.addElement(end); // last element in alt points to end of block
// Add the alternative to this block
blk.addAlternative(alt);
// create an empty (optional) alt and add to blk
Alternative optAlt = new Alternative();
optAlt.addElement(end); // points immediately to end of block
blk.addAlternative(optAlt);
blk.prepareForAnalysis();
return blk;
}
public void defineRuleName(Token r,
String access,
boolean ruleAutoGen,
String docComment)
throws SemanticException {
// if ( Character.isUpperCase(r.getText().charAt(0)) ) {
if (r.type == ANTLRTokenTypes.TOKEN_REF) {
if (!(grammar instanceof LexerGrammar)) {
tool.error("Lexical rule " + r.getText() +
" defined outside of lexer",
grammar.getFilename(), r.getLine(), r.getColumn());
r.setText(r.getText().toLowerCase());
}
}
else {
if (grammar instanceof LexerGrammar) {
tool.error("Lexical rule names must be upper case, '" + r.getText() +
"' is not",
grammar.getFilename(), r.getLine(), r.getColumn());
r.setText(r.getText().toUpperCase());
}
}
super.defineRuleName(r, access, ruleAutoGen, docComment);
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
id = CodeGenerator.encodeLexerRuleName(id);
}
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(), ruleAutoGen);
// Lexer rules do not generate default error handling
rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
ruleBlock = rb;
blocks.push(new BlockContext()); // enter new context
context().block = rb;
rs.setBlock(rb);
ruleEnd = new RuleEndElement(grammar);
rb.setEndElement(ruleEnd);
nested = 0;
}
public void endAlt() {
super.endAlt();
if (nested == 0) { // all rule-level alts link to ruleEnd node
addElementToCurrentAlt(ruleEnd);
}
else {
addElementToCurrentAlt(context().blockEnd);
}
context().altNum++;
}
public void endChildList() {
super.endChildList();
// create a final node to which the last elememt of the single
// alternative will point. Done for compatibility with analyzer.
// Does NOT point to any block like alternative blocks because the
// TreeElement is not a block. This is used only as a placeholder.
BlockEndElement be = new BlockEndElement(grammar);
be.block = context().block;
addElementToCurrentAlt(be);
}
public void endExceptionGroup() {
super.endExceptionGroup();
}
public void endExceptionSpec() {
super.endExceptionSpec();
if (currentExceptionSpec == null) {
tool.panic("exception processing internal error -- no active exception spec");
}
if (context().block instanceof RuleBlock) {
// Named rule
((RuleBlock)context().block).addExceptionSpec(currentExceptionSpec);
}
else {
// It must be a plain-old alternative block
if (context().currentAlt().exceptionSpec != null) {
tool.error("Alternative already has an exception specification", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
else {
context().currentAlt().exceptionSpec = currentExceptionSpec;
}
}
currentExceptionSpec = null;
}
/** Called at the end of processing a grammar */
public void endGrammar() {
if (grammarError) {
abortGrammar();
}
else {
super.endGrammar();
}
}
public void endRule(String rule) {
super.endRule(rule);
BlockContext ctx = (BlockContext)blocks.pop(); // remove scope
// record the start of this block in the ending node
ruleEnd.block = ctx.block;
ruleEnd.block.prepareForAnalysis();
//System.out.println(ctx.block);
}
public void endSubRule() {
super.endSubRule();
nested--;
// remove subrule context from scope stack
BlockContext ctx = (BlockContext)blocks.pop();
AlternativeBlock block = ctx.block;
// If the subrule is marked with ~, check that it is
// a valid candidate for analysis
if (
block.not &&
!(block instanceof SynPredBlock) &&
!(block instanceof ZeroOrMoreBlock) &&
!(block instanceof OneOrMoreBlock)
) {
if (!analyzer.subruleCanBeInverted(block, grammar instanceof LexerGrammar)) {
String newline = System.getProperty("line.separator");
tool.error(
"This subrule cannot be inverted. Only subrules of the form:" + newline +
" (T1|T2|T3...) or" + newline +
" ('c1'|'c2'|'c3'...)" + newline +
"may be inverted (ranges are also allowed).",
grammar.getFilename(),
block.getLine(), block.getColumn()
);
}
}
// add the subrule as element if not a syn pred
if (block instanceof SynPredBlock) {
// record a reference to the recently-recognized syn pred in the
// enclosing block.
SynPredBlock synpred = (SynPredBlock)block;
context().block.hasASynPred = true;
context().currentAlt().synPred = synpred;
grammar.hasSyntacticPredicate = true;
synpred.removeTrackingOfRuleRefs(grammar);
}
else {
addElementToCurrentAlt(block);
}
ctx.blockEnd.block.prepareForAnalysis();
}
public void endTree() {
super.endTree();
BlockContext ctx = (BlockContext)blocks.pop();
addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt.
}
/** Remember that a major error occured in the grammar */
public void hasError() {
grammarError = true;
}
private void labelElement(AlternativeElement el, Token label) {
if (label != null) {
// Does this label already exist?
for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
AlternativeElement altEl = (AlternativeElement)ruleBlock.labeledElements.elementAt(i);
String l = altEl.getLabel();
if (l != null && l.equals(label.getText())) {
tool.error("Label '" + label.getText() + "' has already been defined", grammar.getFilename(), label.getLine(), label.getColumn());
return;
}
}
// add this node to the list of labeled elements
el.setLabel(label.getText());
ruleBlock.labeledElements.appendElement(el);
}
}
public void noAutoGenSubRule() {
context().block.setAutoGen(false);
}
public void oneOrMoreSubRule() {
if (context().block.not) {
tool.error("'~' cannot be applied to (...)* subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// create the right kind of object now that we know what that is
// and switch the list of alternatives. Adjust the stack of blocks.
// copy any init action also.
OneOrMoreBlock b = new OneOrMoreBlock(grammar);
setBlock(b, context().block);
BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
blocks.push(new BlockContext());
context().block = b;
context().blockEnd = old.blockEnd;
context().blockEnd.block = b;
}
public void optionalSubRule() {
if (context().block.not) {
tool.error("'~' cannot be applied to (...)? subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// convert (X)? -> (X|) so that we can ignore optional blocks altogether!
// It already thinks that we have a simple subrule, just add option block.
beginAlt(false);
endAlt();
}
public void refAction(Token action) {
super.refAction(action);
context().block.hasAnAction = true;
addElementToCurrentAlt(new ActionElement(grammar, action));
}
public void setUserExceptions(String thr) {
((RuleBlock)context().block).throwsSpec = thr;
}
// Only called for rule blocks
public void refArgAction(Token action) {
((RuleBlock)context().block).argAction = action.getText();
}
public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
if (!(grammar instanceof LexerGrammar)) {
tool.error("Character literal only valid in lexer", grammar.getFilename(), lit.getLine(), lit.getColumn());
return;
}
super.refCharLiteral(lit, label, inverted, autoGenType, lastInRule);
CharLiteralElement cl = new CharLiteralElement((LexerGrammar)grammar, lit, inverted, autoGenType);
// Generate a warning for non-lowercase ASCII when case-insensitive
if (
!((LexerGrammar)grammar).caseSensitive && cl.getType() < 128 &&
Character.toLowerCase((char)cl.getType()) != (char)cl.getType()
) {
tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
}
addElementToCurrentAlt(cl);
labelElement(cl, label);
// if ignore option is set, must add an optional call to the specified rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
}
}
public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
if (!(grammar instanceof LexerGrammar)) {
tool.error("Character range only valid in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
if (rangeMax < rangeMin) {
tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
// Generate a warning for non-lowercase ASCII when case-insensitive
if (!((LexerGrammar)grammar).caseSensitive) {
if (rangeMin < 128 && Character.toLowerCase((char)rangeMin) != (char)rangeMin) {
tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t1.getLine(), t1.getColumn());
}
if (rangeMax < 128 && Character.toLowerCase((char)rangeMax) != (char)rangeMax) {
tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t2.getLine(), t2.getColumn());
}
}
super.refCharRange(t1, t2, label, autoGenType, lastInRule);
CharRangeElement cr = new CharRangeElement((LexerGrammar)grammar, t1, t2, autoGenType);
addElementToCurrentAlt(cr);
labelElement(cr, label);
// if ignore option is set, must add an optional call to the specified rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
}
}
public void refTokensSpecElementOption(Token tok,
Token option,
Token value) {
/*
System.out.println("setting tokens spec option for "+tok.getText());
System.out.println(option.getText()+","+value.getText());
*/
TokenSymbol ts = (TokenSymbol)
grammar.tokenManager.getTokenSymbol(tok.getText());
if (ts == null) {
tool.panic("cannot find " + tok.getText() + "in tokens {...}");
}
if (option.getText().equals("AST")) {
ts.setASTNodeType(value.getText());
}
else {
grammar.antlrTool.error("invalid tokens {...} element option:" +
option.getText(),
grammar.getFilename(),
option.getLine(), option.getColumn());
}
}
public void refElementOption(Token option, Token value) {
/*
System.out.println("setting option for "+context().currentElement());
System.out.println(option.getText()+","+value.getText());
*/
AlternativeElement e = context().currentElement();
if (e instanceof StringLiteralElement ||
e instanceof TokenRefElement ||
e instanceof WildcardElement) {
((GrammarAtom)e).setOption(option, value);
}
else {
tool.error("cannot use element option (" + option.getText() +
") for this kind of element",
grammar.getFilename(), option.getLine(), option.getColumn());
}
}
/** Add an exception handler to an exception spec */
public void refExceptionHandler(Token exTypeAndName, Token action) {
super.refExceptionHandler(exTypeAndName, action);
if (currentExceptionSpec == null) {
tool.panic("exception handler processing internal error");
}
currentExceptionSpec.addHandler(new ExceptionHandler(exTypeAndName, action));
}
public void refInitAction(Token action) {
super.refAction(action);
context().block.setInitAction(action.getText());
}
public void refMemberAction(Token act) {
grammar.classMemberAction = act;
}
public void refPreambleAction(Token act) {
super.refPreambleAction(act);
}
// Only called for rule blocks
public void refReturnAction(Token returnAction) {
if (grammar instanceof LexerGrammar) {
String name = CodeGenerator.encodeLexerRuleName(((RuleBlock)context().block).getRuleName());
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(name);
if (rs.access.equals("public")) {
tool.warning("public Lexical rules cannot specify return type", grammar.getFilename(), returnAction.getLine(), returnAction.getColumn());
return;
}
}
((RuleBlock)context().block).returnAction = returnAction.getText();
}
public void refRule(Token idAssign,
Token r,
Token label,
Token args,
int autoGenType) {
// Disallow parser rule references in the lexer
if (grammar instanceof LexerGrammar) {
// if (!Character.isUpperCase(r.getText().charAt(0))) {
if (r.type != ANTLRTokenTypes.TOKEN_REF) {
tool.error("Parser rule " + r.getText() + " referenced in lexer");
return;
}
if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), r.getLine(), r.getColumn());
}
}
super.refRule(idAssign, r, label, args, autoGenType);
lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
if (args != null) {
lastRuleRef.setArgs(args.getText());
}
if (idAssign != null) {
lastRuleRef.setIdAssign(idAssign.getText());
}
addElementToCurrentAlt(lastRuleRef);
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
id = CodeGenerator.encodeLexerRuleName(id);
}
// update symbol table so it knows what nodes reference the rule.
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
rs.addReference(lastRuleRef);
labelElement(lastRuleRef, label);
}
public void refSemPred(Token pred) {
//System.out.println("refSemPred "+pred.getText());
super.refSemPred(pred);
//System.out.println("context().block: "+context().block);
if (context().currentAlt().atStart()) {
context().currentAlt().semPred = pred.getText();
}
else {
ActionElement a = new ActionElement(grammar, pred);
a.isSemPred = true;
addElementToCurrentAlt(a);
}
//System.out.println("DONE refSemPred "+pred.getText());
}
public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
super.refStringLiteral(lit, label, autoGenType, lastInRule);
if (grammar instanceof TreeWalkerGrammar && autoGenType == GrammarElement.AUTO_GEN_CARET) {
tool.error("^ not allowed in here for tree-walker", grammar.getFilename(), lit.getLine(), lit.getColumn());
}
StringLiteralElement sl = new StringLiteralElement(grammar, lit, autoGenType);
// If case-insensitive, then check each char of the stirng literal
if (grammar instanceof LexerGrammar && !((LexerGrammar)grammar).caseSensitive) {
for (int i = 1; i < lit.getText().length() - 1; i++) {
char c = lit.getText().charAt(i);
if (c < 128 && Character.toLowerCase(c) != c) {
tool.warning("Characters of string literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
break;
}
}
}
addElementToCurrentAlt(sl);
labelElement(sl, label);
// if ignore option is set, must add an optional call to the specified rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
}
}
public void refToken(Token idAssign, Token t, Token label, Token args,
boolean inverted, int autoGenType, boolean lastInRule) {
if (grammar instanceof LexerGrammar) {
// In lexer, token references are really rule references
if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
}
if (inverted) {
tool.error("~TOKEN is not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
}
refRule(idAssign, t, label, args, autoGenType);
// if ignore option is set, must add an optional call to the specified token rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
}
}
else {
// Cannot have token ref args or assignment outside of lexer
if (idAssign != null) {
tool.error("Assignment from token reference only allowed in lexer", grammar.getFilename(), idAssign.getLine(), idAssign.getColumn());
}
if (args != null) {
tool.error("Token reference arguments only allowed in lexer", grammar.getFilename(), args.getLine(), args.getColumn());
}
super.refToken(idAssign, t, label, args, inverted, autoGenType, lastInRule);
TokenRefElement te = new TokenRefElement(grammar, t, inverted, autoGenType);
addElementToCurrentAlt(te);
labelElement(te, label);
}
}
public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
if (grammar instanceof LexerGrammar) {
tool.error("Token range not allowed in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
super.refTokenRange(t1, t2, label, autoGenType, lastInRule);
TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2, autoGenType);
if (tr.end < tr.begin) {
tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
addElementToCurrentAlt(tr);
labelElement(tr, label);
}
public void refTreeSpecifier(Token treeSpec) {
context().currentAlt().treeSpecifier = treeSpec;
}
public void refWildcard(Token t, Token label, int autoGenType) {
super.refWildcard(t, label, autoGenType);
WildcardElement wc = new WildcardElement(grammar, t, autoGenType);
addElementToCurrentAlt(wc);
labelElement(wc, label);
}
/** Get ready to process a new grammar */
public void reset() {
super.reset();
blocks = new LList();
lastRuleRef = null;
ruleEnd = null;
ruleBlock = null;
nested = 0;
currentExceptionSpec = null;
grammarError = false;
}
public void setArgOfRuleRef(Token argAction) {
super.setArgOfRuleRef(argAction);
lastRuleRef.setArgs(argAction.getText());
}
public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
b.setAlternatives(src.getAlternatives());
b.initAction = src.initAction;
//b.lookaheadDepth = src.lookaheadDepth;
b.label = src.label;
b.hasASynPred = src.hasASynPred;
b.hasAnAction = src.hasAnAction;
b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
b.generateAmbigWarnings = src.generateAmbigWarnings;
b.line = src.line;
b.greedy = src.greedy;
b.greedySet = src.greedySet;
}
public void setRuleOption(Token key, Token value) {
//((RuleBlock)context().block).setOption(key, value);
ruleBlock.setOption(key, value);
}
public void setSubruleOption(Token key, Token value) {
((AlternativeBlock)context().block).setOption(key, value);
}
public void synPred() {
if (context().block.not) {
tool.error("'~' cannot be applied to syntactic predicate", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// create the right kind of object now that we know what that is
// and switch the list of alternatives. Adjust the stack of blocks.
// copy any init action also.
SynPredBlock b = new SynPredBlock(grammar);
setBlock(b, context().block);
BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
blocks.push(new BlockContext());
context().block = b;
context().blockEnd = old.blockEnd;
context().blockEnd.block = b;
}
public void zeroOrMoreSubRule() {
if (context().block.not) {
tool.error("'~' cannot be applied to (...)+ subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// create the right kind of object now that we know what that is
// and switch the list of alternatives. Adjust the stack of blocks.
// copy any init action also.
ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
setBlock(b, context().block);
BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
blocks.push(new BlockContext());
context().block = b;
context().blockEnd = old.blockEnd;
context().blockEnd.block = b;
}
}

146
fine-antlr-old/src/main/java/com/fr/third/antlr/MismatchedCharException.java

@ -0,0 +1,146 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/MismatchedCharException.java#2 $
*/
import com.fr.third.antlr.collections.impl.BitSet;
public class MismatchedCharException extends RecognitionException {
// Types of chars
public static final int CHAR = 1;
public static final int NOT_CHAR = 2;
public static final int RANGE = 3;
public static final int NOT_RANGE = 4;
public static final int SET = 5;
public static final int NOT_SET = 6;
// One of the above
public int mismatchType;
// what was found on the input stream
public int foundChar;
// For CHAR/NOT_CHAR and RANGE/NOT_RANGE
public int expecting;
// For RANGE/NOT_RANGE (expecting is lower bound of range)
public int upper;
// For SET/NOT_SET
public BitSet set;
// who knows...they may want to ask scanner questions
public CharScanner scanner;
/**
* MismatchedCharException constructor comment.
*/
public MismatchedCharException() {
super("Mismatched char");
}
// Expected range / not range
public MismatchedCharException(char c, char lower, char upper_, boolean matchNot, CharScanner scanner_) {
super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
mismatchType = matchNot ? NOT_RANGE : RANGE;
foundChar = c;
expecting = lower;
upper = upper_;
scanner = scanner_;
}
// Expected token / not token
public MismatchedCharException(char c, char expecting_, boolean matchNot, CharScanner scanner_) {
super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
mismatchType = matchNot ? NOT_CHAR : CHAR;
foundChar = c;
expecting = expecting_;
scanner = scanner_;
}
// Expected BitSet / not BitSet
public MismatchedCharException(char c, BitSet set_, boolean matchNot, CharScanner scanner_) {
super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
mismatchType = matchNot ? NOT_SET : SET;
foundChar = c;
set = set_;
scanner = scanner_;
}
/**
* Returns a clean error message (no line number/column information)
*/
public String getMessage() {
StringBuffer sb = new StringBuffer();
switch (mismatchType) {
case CHAR:
sb.append("expecting "); appendCharName(sb, expecting);
sb.append(", found "); appendCharName(sb, foundChar);
break;
case NOT_CHAR:
sb.append("expecting anything but '");
appendCharName(sb, expecting);
sb.append("'; got it anyway");
break;
case RANGE:
case NOT_RANGE:
sb.append("expecting token ");
if (mismatchType == NOT_RANGE)
sb.append("NOT ");
sb.append("in range: ");
appendCharName(sb, expecting);
sb.append("..");
appendCharName(sb, upper);
sb.append(", found ");
appendCharName(sb, foundChar);
break;
case SET:
case NOT_SET:
sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of (");
int[] elems = set.toArray();
for (int i = 0; i < elems.length; i++) {
appendCharName(sb, elems[i]);
}
sb.append("), found ");
appendCharName(sb, foundChar);
break;
default :
sb.append(super.getMessage());
break;
}
return sb.toString();
}
/** Append a char to the msg buffer. If special,
* then show escaped version
*/
private void appendCharName(StringBuffer sb, int c) {
switch (c) {
case 65535 :
// 65535 = (char) -1 = EOF
sb.append("'<EOF>'");
break;
case '\n' :
sb.append("'\\n'");
break;
case '\r' :
sb.append("'\\r'");
break;
case '\t' :
sb.append("'\\t'");
break;
default :
sb.append('\'');
sb.append((char) c);
sb.append('\'');
break;
}
}
}

173
fine-antlr-old/src/main/java/com/fr/third/antlr/MismatchedTokenException.java

@ -0,0 +1,173 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/MismatchedTokenException.java#2 $
*/
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.impl.BitSet;
public class MismatchedTokenException extends RecognitionException {
// Token names array for formatting
String[] tokenNames;
// The token that was encountered
public Token token;
// The offending AST node if tree walking
public AST node;
String tokenText = null; // taken from node or token object
// Types of tokens
public static final int TOKEN = 1;
public static final int NOT_TOKEN = 2;
public static final int RANGE = 3;
public static final int NOT_RANGE = 4;
public static final int SET = 5;
public static final int NOT_SET = 6;
// One of the above
public int mismatchType;
// For TOKEN/NOT_TOKEN and RANGE/NOT_RANGE
public int expecting;
// For RANGE/NOT_RANGE (expecting is lower bound of range)
public int upper;
// For SET/NOT_SET
public BitSet set;
/** Looking for AST wildcard, didn't find it */
public MismatchedTokenException() {
super("Mismatched Token: expecting any AST node", "<AST>", -1, -1);
}
// Expected range / not range
public MismatchedTokenException(String[] tokenNames_, AST node_, int lower, int upper_, boolean matchNot) {
super("Mismatched Token", "<AST>", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn());
tokenNames = tokenNames_;
node = node_;
if (node_ == null) {
tokenText = "<empty tree>";
}
else {
tokenText = node_.toString();
}
mismatchType = matchNot ? NOT_RANGE : RANGE;
expecting = lower;
upper = upper_;
}
// Expected token / not token
public MismatchedTokenException(String[] tokenNames_, AST node_, int expecting_, boolean matchNot) {
super("Mismatched Token", "<AST>", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn());
tokenNames = tokenNames_;
node = node_;
if (node_ == null) {
tokenText = "<empty tree>";
}
else {
tokenText = node_.toString();
}
mismatchType = matchNot ? NOT_TOKEN : TOKEN;
expecting = expecting_;
}
// Expected BitSet / not BitSet
public MismatchedTokenException(String[] tokenNames_, AST node_, BitSet set_, boolean matchNot) {
super("Mismatched Token", "<AST>", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn());
tokenNames = tokenNames_;
node = node_;
if (node_ == null) {
tokenText = "<empty tree>";
}
else {
tokenText = node_.toString();
}
mismatchType = matchNot ? NOT_SET : SET;
set = set_;
}
// Expected range / not range
public MismatchedTokenException(String[] tokenNames_, Token token_, int lower, int upper_, boolean matchNot, String fileName_) {
super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn());
tokenNames = tokenNames_;
token = token_;
tokenText = token_.getText();
mismatchType = matchNot ? NOT_RANGE : RANGE;
expecting = lower;
upper = upper_;
}
// Expected token / not token
public MismatchedTokenException(String[] tokenNames_, Token token_, int expecting_, boolean matchNot, String fileName_) {
super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn());
tokenNames = tokenNames_;
token = token_;
tokenText = token_.getText();
mismatchType = matchNot ? NOT_TOKEN : TOKEN;
expecting = expecting_;
}
// Expected BitSet / not BitSet
public MismatchedTokenException(String[] tokenNames_, Token token_, BitSet set_, boolean matchNot, String fileName_) {
super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn());
tokenNames = tokenNames_;
token = token_;
tokenText = token_.getText();
mismatchType = matchNot ? NOT_SET : SET;
set = set_;
}
/**
* Returns a clean error message (no line number/column information)
*/
public String getMessage() {
StringBuffer sb = new StringBuffer();
switch (mismatchType) {
case TOKEN:
sb.append("expecting " + tokenName(expecting) + ", found '" + tokenText + "'");
break;
case NOT_TOKEN:
sb.append("expecting anything but " + tokenName(expecting) + "; got it anyway");
break;
case RANGE:
sb.append("expecting token in range: " + tokenName(expecting) + ".." + tokenName(upper) + ", found '" + tokenText + "'");
break;
case NOT_RANGE:
sb.append("expecting token NOT in range: " + tokenName(expecting) + ".." + tokenName(upper) + ", found '" + tokenText + "'");
break;
case SET:
case NOT_SET:
sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of (");
int[] elems = set.toArray();
for (int i = 0; i < elems.length; i++) {
sb.append(' ');
sb.append(tokenName(elems[i]));
}
sb.append("), found '" + tokenText + "'");
break;
default :
sb.append(super.getMessage());
break;
}
return sb.toString();
}
private String tokenName(int tokenType) {
if (tokenType == Token.INVALID_TYPE) {
return "<Set of tokens>";
}
else if (tokenType < 0 || tokenType >= tokenNames.length) {
return "<" + String.valueOf(tokenType) + ">";
}
else {
return tokenNames[tokenType];
}
}
}

64
fine-antlr-old/src/main/java/com/fr/third/antlr/NameSpace.java

@ -0,0 +1,64 @@
package com.fr.third.antlr;
/**
* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* Container for a C++ namespace specification. Namespaces can be
* nested, so this contains a vector of all the nested names.
*
* @author David Wagner (JPL/Caltech) 8-12-00
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/NameSpace.java#2 $
*/
import java.util.Vector;
import java.util.Enumeration;
import java.io.PrintWriter;
import java.util.StringTokenizer;
public class NameSpace {
private Vector names = new Vector();
private String _name;
public NameSpace(String name) {
_name = new String(name);
parse(name);
}
public String getName()
{
return _name;
}
/**
* Parse a C++ namespace declaration into seperate names
* splitting on :: We could easily parameterize this to make
* the delimiter a language-specific parameter, or use subclasses
* to support C++ namespaces versus java packages. -DAW
*/
protected void parse(String name) {
StringTokenizer tok = new StringTokenizer(name, "::");
while (tok.hasMoreTokens())
names.addElement(tok.nextToken());
}
/**
* Method to generate the required C++ namespace declarations
*/
void emitDeclarations(PrintWriter out) {
for (Enumeration n = names.elements(); n.hasMoreElements();) {
String s = (String)n.nextElement();
out.println("ANTLR_BEGIN_NAMESPACE(" + s + ")");
}
}
/**
* Method to generate the required C++ namespace closures
*/
void emitClosures(PrintWriter out) {
for (int i = 0; i < names.size(); ++i)
out.println("ANTLR_END_NAMESPACE");
}
}

40
fine-antlr-old/src/main/java/com/fr/third/antlr/NoViableAltException.java

@ -0,0 +1,40 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/NoViableAltException.java#2 $
*/
import com.fr.third.antlr.collections.AST;
public class NoViableAltException extends RecognitionException {
public Token token;
public AST node; // handles parsing and treeparsing
public NoViableAltException(AST t) {
super("NoViableAlt", "<AST>", t.getLine(), t.getColumn());
node = t;
}
public NoViableAltException(Token t, String fileName_) {
super("NoViableAlt", fileName_, t.getLine(), t.getColumn());
token = t;
}
/**
* Returns a clean error message (no line number/column information)
*/
public String getMessage() {
if (token != null) {
return "unexpected token: " + token.getText();
}
// must a tree parser error if token==null
if (node == TreeParser.ASTNULL) {
return "unexpected end of subtree";
}
return "unexpected AST node: " + node.toString();
}
}

51
fine-antlr-old/src/main/java/com/fr/third/antlr/NoViableAltForCharException.java

@ -0,0 +1,51 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/NoViableAltForCharException.java#2 $
*/
public class NoViableAltForCharException extends RecognitionException {
public char foundChar;
public NoViableAltForCharException(char c, CharScanner scanner) {
super("NoViableAlt", scanner.getFilename(),
scanner.getLine(), scanner.getColumn());
foundChar = c;
}
/** @deprecated As of ANTLR 2.7.2 use {@see #NoViableAltForCharException(char, String, int, int) } */
public NoViableAltForCharException(char c, String fileName, int line) {
this(c, fileName, line, -1);
}
public NoViableAltForCharException(char c, String fileName, int line, int column) {
super("NoViableAlt", fileName, line, column);
foundChar = c;
}
/**
* Returns a clean error message (no line number/column information)
*/
public String getMessage() {
String mesg = "unexpected char: ";
// I'm trying to mirror a change in the C++ stuff.
// But java seems to lack something convenient isprint-ish..
// actually we're kludging around unicode and non unicode savy
// output stuff like most terms.. Basically one would want to
// be able to tweak the generation of this message.
if ((foundChar >= ' ') && (foundChar <= '~')) {
mesg += '\'';
mesg += foundChar;
mesg += '\'';
}
else {
mesg += "0x"+Integer.toHexString((int)foundChar).toUpperCase();
}
return mesg;
}
}

31
fine-antlr-old/src/main/java/com/fr/third/antlr/OneOrMoreBlock.java

@ -0,0 +1,31 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/OneOrMoreBlock.java#2 $
*/
class OneOrMoreBlock extends BlockWithImpliedExitPath {
public OneOrMoreBlock(Grammar g) {
super(g);
}
public OneOrMoreBlock(Grammar g, Token start) {
super(g, start);
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
return super.toString() + "+";
}
}

50
fine-antlr-old/src/main/java/com/fr/third/antlr/ParseTree.java

@ -0,0 +1,50 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*/
import com.fr.third.antlr.collections.AST;
public abstract class ParseTree extends BaseAST {
/** Walk parse tree and return requested number of derivation steps.
* If steps <= 0, return node text. If steps == 1, return derivation
* string at step.
*/
public String getLeftmostDerivationStep(int step) {
if ( step<=0 ) {
return toString();
}
StringBuffer buf = new StringBuffer(2000);
getLeftmostDerivation(buf, step);
return buf.toString();
}
public String getLeftmostDerivation(int maxSteps) {
StringBuffer buf = new StringBuffer(2000);
buf.append(" "+this.toString());
buf.append('\n');
for (int d=1; d<maxSteps; d++) {
buf.append(" =>");
buf.append(getLeftmostDerivationStep(d));
buf.append('\n');
}
return buf.toString();
}
/** Get derivation and return how many you did (less than requested for
* subtree roots.
*/
protected abstract int getLeftmostDerivation(StringBuffer buf, int step);
// just satisfy BaseAST interface; unused as we manually create nodes
public void initialize(int i, String s) {
}
public void initialize(AST ast) {
}
public void initialize(Token token) {
}
}

70
fine-antlr-old/src/main/java/com/fr/third/antlr/ParseTreeRule.java

@ -0,0 +1,70 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*/
import com.fr.third.antlr.collections.AST;
public class ParseTreeRule extends ParseTree {
public static final int INVALID_ALT = -1;
protected String ruleName;
protected int altNumber; // unused until I modify antlr to record this
public ParseTreeRule(String ruleName) {
this(ruleName,INVALID_ALT);
}
public ParseTreeRule(String ruleName, int altNumber) {
this.ruleName = ruleName;
this.altNumber = altNumber;
}
public String getRuleName() {
return ruleName;
}
/** Do a step-first walk, building up a buffer of tokens until
* you've reached a particular step and print out any rule subroots
* insteads of descending.
*/
protected int getLeftmostDerivation(StringBuffer buf, int step) {
int numReplacements = 0;
if ( step<=0 ) {
buf.append(' ');
buf.append(toString());
return numReplacements;
}
AST child = getFirstChild();
numReplacements = 1;
// walk child printing them out, descending into at most one
while ( child!=null ) {
if ( numReplacements>=step || child instanceof ParseTreeToken ) {
buf.append(' ');
buf.append(child.toString());
}
else {
// descend for at least one more derivation; update count
int remainingReplacements = step-numReplacements;
int n = ((ParseTree)child).getLeftmostDerivation(buf,
remainingReplacements);
numReplacements += n;
}
child = child.getNextSibling();
}
return numReplacements;
}
public String toString() {
if ( altNumber==INVALID_ALT ) {
return '<'+ruleName+'>';
}
else {
return '<'+ruleName+"["+altNumber+"]>";
}
}
}

29
fine-antlr-old/src/main/java/com/fr/third/antlr/ParseTreeToken.java

@ -0,0 +1,29 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*/
public class ParseTreeToken extends ParseTree {
protected Token token;
public ParseTreeToken(Token token) {
this.token = token;
}
protected int getLeftmostDerivation(StringBuffer buf, int step) {
buf.append(' ');
buf.append(toString());
return step; // did on replacements
}
public String toString() {
if ( token!=null ) {
return token.getText();
}
return "<missing token>";
}
}

374
fine-antlr-old/src/main/java/com/fr/third/antlr/Parser.java

@ -0,0 +1,374 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Parser.java#2 $
*/
/**A generic ANTLR parser (LL(k) for k>=1) containing a bunch of
* utility routines useful at any lookahead depth. We distinguish between
* the LL(1) and LL(k) parsers because of efficiency. This may not be
* necessary in the near future.
*
* Each parser object contains the state of the parse including a lookahead
* cache (the form of which is determined by the subclass), whether or
* not the parser is in guess mode, where tokens come from, etc...
*
* <p>
* During <b>guess</b> mode, the current lookahead token(s) and token type(s)
* cache must be saved because the token stream may not have been informed
* to save the token (via <tt>mark</tt>) before the <tt>try</tt> block.
* Guessing is started by:
* <ol>
* <li>saving the lookahead cache.
* <li>marking the current position in the TokenBuffer.
* <li>increasing the guessing level.
* </ol>
*
* After guessing, the parser state is restored by:
* <ol>
* <li>restoring the lookahead cache.
* <li>rewinding the TokenBuffer.
* <li>decreasing the guessing level.
* </ol>
*
* @see antlr.Token
* @see antlr.TokenBuffer
* @see antlr.LLkParser
*/
import java.util.Hashtable;
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.impl.BitSet;
import com.fr.third.antlr.debug.MessageListener;
import com.fr.third.antlr.debug.ParserListener;
import com.fr.third.antlr.debug.ParserMatchListener;
import com.fr.third.antlr.debug.ParserTokenListener;
import com.fr.third.antlr.debug.SemanticPredicateListener;
import com.fr.third.antlr.debug.SyntacticPredicateListener;
import com.fr.third.antlr.debug.TraceListener;
public abstract class Parser {
protected ParserSharedInputState inputState;
/** Nesting level of registered handlers */
// protected int exceptionLevel = 0;
/** Table of token type to token names */
protected String[] tokenNames;
/** AST return value for a rule is squirreled away here */
protected AST returnAST;
/** AST support code; parser delegates to this object.
* This is set during parser construction by default
* to either "new ASTFactory()" or a ctor that
* has a token type to class map for hetero nodes.
*/
protected ASTFactory astFactory = null;
/** Constructed if any AST types specified in tokens{..}.
* Maps an Integer->Class object.
*/
protected Hashtable tokenTypeToASTClassMap = null;
private boolean ignoreInvalidDebugCalls = false;
/** Used to keep track of indentdepth for traceIn/Out */
protected int traceDepth = 0;
public Parser() {
this(new ParserSharedInputState());
}
public Parser(ParserSharedInputState state) {
inputState = state;
}
/** If the user specifies a tokens{} section with heterogeneous
* AST node types, then ANTLR generates code to fill
* this mapping.
*/
public Hashtable getTokenTypeToASTClassMap() {
return tokenTypeToASTClassMap;
}
public void addMessageListener(MessageListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addMessageListener() is only valid if parser built for debugging");
}
public void addParserListener(ParserListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addParserListener() is only valid if parser built for debugging");
}
public void addParserMatchListener(ParserMatchListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addParserMatchListener() is only valid if parser built for debugging");
}
public void addParserTokenListener(ParserTokenListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addParserTokenListener() is only valid if parser built for debugging");
}
public void addSemanticPredicateListener(SemanticPredicateListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addSemanticPredicateListener() is only valid if parser built for debugging");
}
public void addSyntacticPredicateListener(SyntacticPredicateListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addSyntacticPredicateListener() is only valid if parser built for debugging");
}
public void addTraceListener(TraceListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("addTraceListener() is only valid if parser built for debugging");
}
/**Get another token object from the token stream */
public abstract void consume() throws TokenStreamException;
/** Consume tokens until one matches the given token */
public void consumeUntil(int tokenType) throws TokenStreamException {
while (LA(1) != Token.EOF_TYPE && LA(1) != tokenType) {
consume();
}
}
/** Consume tokens until one matches the given token set */
public void consumeUntil(BitSet set) throws TokenStreamException {
while (LA(1) != Token.EOF_TYPE && !set.member(LA(1))) {
consume();
}
}
protected void defaultDebuggingSetup(TokenStream lexer, TokenBuffer tokBuf) {
// by default, do nothing -- we're not debugging
}
/** Get the AST return value squirreled away in the parser */
public AST getAST() {
return returnAST;
}
public ASTFactory getASTFactory() {
return astFactory;
}
public String getFilename() {
return inputState.filename;
}
public ParserSharedInputState getInputState() {
return inputState;
}
public void setInputState(ParserSharedInputState state) {
inputState = state;
}
public String getTokenName(int num) {
return tokenNames[num];
}
public String[] getTokenNames() {
return tokenNames;
}
public boolean isDebugMode() {
return false;
}
/** Return the token type of the ith token of lookahead where i=1
* is the current token being examined by the parser (i.e., it
* has not been matched yet).
*/
public abstract int LA(int i) throws TokenStreamException;
/**Return the ith token of lookahead */
public abstract Token LT(int i) throws TokenStreamException;
// Forwarded to TokenBuffer
public int mark() {
return inputState.input.mark();
}
/**Make sure current lookahead symbol matches token type <tt>t</tt>.
* Throw an exception upon mismatch, which is catch by either the
* error handler or by the syntactic predicate.
*/
public void match(int t) throws MismatchedTokenException, TokenStreamException {
if (LA(1) != t)
throw new MismatchedTokenException(tokenNames, LT(1), t, false, getFilename());
else
// mark token as consumed -- fetch next token deferred until LA/LT
consume();
}
/**Make sure current lookahead symbol matches the given set
* Throw an exception upon mismatch, which is catch by either the
* error handler or by the syntactic predicate.
*/
public void match(BitSet b) throws MismatchedTokenException, TokenStreamException {
if (!b.member(LA(1)))
throw new MismatchedTokenException(tokenNames, LT(1), b, false, getFilename());
else
// mark token as consumed -- fetch next token deferred until LA/LT
consume();
}
public void matchNot(int t) throws MismatchedTokenException, TokenStreamException {
if (LA(1) == t)
// Throws inverted-sense exception
throw new MismatchedTokenException(tokenNames, LT(1), t, true, getFilename());
else
// mark token as consumed -- fetch next token deferred until LA/LT
consume();
}
/** @deprecated as of 2.7.2. This method calls System.exit() and writes
* directly to stderr, which is usually not appropriate when
* a parser is embedded into a larger application. Since the method is
* <code>static</code>, it cannot be overridden to avoid these problems.
* ANTLR no longer uses this method internally or in generated code.
*/
public static void panic() {
System.err.println("Parser: panic");
System.exit(1);
}
public void removeMessageListener(MessageListener l) {
if (!ignoreInvalidDebugCalls)
throw new RuntimeException("removeMessageListener() is only valid if parser built for debugging");
}
public void removeParserListener(ParserListener l) {
if (!ignoreInvalidDebugCalls)
throw new RuntimeException("removeParserListener() is only valid if parser built for debugging");
}
public void removeParserMatchListener(ParserMatchListener l) {
if (!ignoreInvalidDebugCalls)
throw new RuntimeException("removeParserMatchListener() is only valid if parser built for debugging");
}
public void removeParserTokenListener(ParserTokenListener l) {
if (!ignoreInvalidDebugCalls)
throw new RuntimeException("removeParserTokenListener() is only valid if parser built for debugging");
}
public void removeSemanticPredicateListener(SemanticPredicateListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("removeSemanticPredicateListener() is only valid if parser built for debugging");
}
public void removeSyntacticPredicateListener(SyntacticPredicateListener l) {
if (!ignoreInvalidDebugCalls)
throw new IllegalArgumentException("removeSyntacticPredicateListener() is only valid if parser built for debugging");
}
public void removeTraceListener(TraceListener l) {
if (!ignoreInvalidDebugCalls)
throw new RuntimeException("removeTraceListener() is only valid if parser built for debugging");
}
/** Parser error-reporting function can be overridden in subclass */
public void reportError(RecognitionException ex) {
System.err.println(ex);
}
/** Parser error-reporting function can be overridden in subclass */
public void reportError(String s) {
if (getFilename() == null) {
System.err.println("error: " + s);
}
else {
System.err.println(getFilename() + ": error: " + s);
}
}
/** Parser warning-reporting function can be overridden in subclass */
public void reportWarning(String s) {
if (getFilename() == null) {
System.err.println("warning: " + s);
}
else {
System.err.println(getFilename() + ": warning: " + s);
}
}
public void recover(RecognitionException ex,
BitSet tokenSet) throws TokenStreamException {
consume();
consumeUntil(tokenSet);
}
public void rewind(int pos) {
inputState.input.rewind(pos);
}
/** Specify an object with support code (shared by
* Parser and TreeParser. Normally, the programmer
* does not play with this, using setASTNodeType instead.
*/
public void setASTFactory(ASTFactory f) {
astFactory = f;
}
public void setASTNodeClass(String cl) {
astFactory.setASTNodeType(cl);
}
/** Specify the type of node to create during tree building; use setASTNodeClass now
* to be consistent with Token Object Type accessor.
* @deprecated since 2.7.1
*/
public void setASTNodeType(String nodeType) {
setASTNodeClass(nodeType);
}
public void setDebugMode(boolean debugMode) {
if (!ignoreInvalidDebugCalls)
throw new RuntimeException("setDebugMode() only valid if parser built for debugging");
}
public void setFilename(String f) {
inputState.filename = f;
}
public void setIgnoreInvalidDebugCalls(boolean value) {
ignoreInvalidDebugCalls = value;
}
/** Set or change the input token buffer */
public void setTokenBuffer(TokenBuffer t) {
inputState.input = t;
}
public void traceIndent() {
for (int i = 0; i < traceDepth; i++)
System.out.print(' ');
}
public void traceIn(String rname) throws TokenStreamException {
traceDepth += 1;
traceIndent();
System.out.println("> " + rname + "; LA(1)==" + LT(1).getText() +
((inputState.guessing > 0)?" [guessing]":""));
}
public void traceOut(String rname) throws TokenStreamException {
traceIndent();
System.out.println("< " + rname + "; LA(1)==" + LT(1).getText() +
((inputState.guessing > 0)?" [guessing]":""));
traceDepth -= 1;
}
}

99
fine-antlr-old/src/main/java/com/fr/third/antlr/ParserGrammar.java

@ -0,0 +1,99 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ParserGrammar.java#2 $
*/
import java.io.IOException;
/** Parser-specific grammar subclass */
class ParserGrammar extends Grammar {
ParserGrammar(String className_, Tool tool_, String superClass) {
super(className_, tool_, superClass);
}
/** Top-level call to generate the code for this grammar */
public void generate() throws IOException {
generator.gen(this);
}
// Get name of class from which generated parser/lexer inherits
protected String getSuperClass() {
// if debugging, choose the debugging version of the parser
if (debuggingOutput)
return "debug.LLkDebuggingParser";
return "LLkParser";
}
/**Process command line arguments.
* -trace have all rules call traceIn/traceOut
* -traceParser have parser rules call traceIn/traceOut
* -debug generate debugging output for parser debugger
*/
public void processArguments(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-trace")) {
traceRules = true;
antlrTool.setArgOK(i);
}
else if (args[i].equals("-traceParser")) {
traceRules = true;
antlrTool.setArgOK(i);
}
else if (args[i].equals("-debug")) {
debuggingOutput = true;
antlrTool.setArgOK(i);
}
}
}
/** Set parser options -- performs action on the following options:
*/
public boolean setOption(String key, Token value) {
String s = value.getText();
if (key.equals("buildAST")) {
if (s.equals("true")) {
buildAST = true;
}
else if (s.equals("false")) {
buildAST = false;
}
else {
antlrTool.error("buildAST option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("interactive")) {
if (s.equals("true")) {
interactive = true;
}
else if (s.equals("false")) {
interactive = false;
}
else {
antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("ASTLabelType")) {
super.setOption(key, value);
return true;
}
if (key.equals("className")) {
super.setOption(key, value);
return true;
}
if (super.setOption(key, value)) {
return true;
}
antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn());
return false;
}
}

38
fine-antlr-old/src/main/java/com/fr/third/antlr/ParserSharedInputState.java

@ -0,0 +1,38 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ParserSharedInputState.java#2 $
*/
/** This object contains the data associated with an
* input stream of tokens. Multiple parsers
* share a single ParserSharedInputState to parse
* the same stream of tokens.
*/
public class ParserSharedInputState {
/** Where to get token objects */
protected TokenBuffer input;
/** Are we guessing (guessing>0)? */
public int guessing = 0;
/** What file (if known) caused the problem? */
protected String filename;
public void reset() {
guessing = 0;
filename = null;
input.reset();
}
public String getFilename() {
return filename;
}
public TokenBuffer getInput() {
return input;
}
}

138
fine-antlr-old/src/main/java/com/fr/third/antlr/PreservingFileWriter.java

@ -0,0 +1,138 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id:$
* @author Ric Klaren <klaren@cs.utwente.nl>
*/
import java.io.*;
/** PreservingFileWriter only overwrites target if the new file is different.
Mainly added in order to prevent big and unnecessary recompiles in C++
projects.
I/O is buffered.
*/
public class PreservingFileWriter extends FileWriter {
protected File target_file; /// the file we intend to write to
protected File tmp_file; /// the tmp file we create at first
public PreservingFileWriter(String file) throws IOException
{
super(file+".antlr.tmp");
// set up File thingy for target..
target_file = new File(file);
String parentdirname = target_file.getParent();
if( parentdirname != null )
{
File parentdir = new File(parentdirname);
if (!parentdir.exists())
throw new IOException("destination directory of '"+file+"' doesn't exist");
if (!parentdir.canWrite())
throw new IOException("destination directory of '"+file+"' isn't writeable");
}
if( target_file.exists() && ! target_file.canWrite() )
throw new IOException("cannot write to '"+file+"'");
// and for the temp file
tmp_file = new File(file+".antlr.tmp");
// have it nuked at exit
// RK: this is broken on java 1.4 and
// is not compatible with java 1.1 (which is a big problem I'm told :) )
// sigh. Any real language would do this in a destructor ;) ;)
// tmp_file.deleteOnExit();
}
/** Close the file and see if the actual target is different
* if so the target file is overwritten by the copy. If not we do nothing
*/
public void close() throws IOException
{
Reader source = null;
Writer target = null;
try {
// close the tmp file so we can access it safely...
super.close();
char[] buffer = new char[1024];
int cnt;
// target_file != tmp_file so we have to compare and move it..
if( target_file.length() == tmp_file.length() )
{
// Do expensive read'n'compare
Reader tmp;
char[] buf2 = new char[1024];
source = new BufferedReader(new FileReader(tmp_file));
tmp = new BufferedReader(new FileReader(target_file));
int cnt1, cnt2;
boolean equal = true;
while( equal )
{
cnt1 = source.read(buffer,0,1024);
cnt2 = tmp.read(buf2,0,1024);
if( cnt1 != cnt2 )
{
equal = false;
break;
}
if( cnt1 == -1 ) // EOF
break;
for( int i = 0; i < cnt1; i++ )
{
if( buffer[i] != buf2[i] )
{
equal = false;
break;
}
}
}
// clean up...
source.close();
tmp.close();
source = tmp = null;
if( equal )
return;
}
source = new BufferedReader(new FileReader(tmp_file));
target = new BufferedWriter(new FileWriter(target_file));
while(true)
{
cnt = source.read(buffer,0,1024);
if( cnt == -1 )
break;
target.write(buffer, 0, cnt );
}
}
finally {
if( source != null )
{
try { source.close(); }
catch( IOException e ) { ; }
}
if( target != null )
{
try { target.close(); }
catch( IOException e ) { ; }
}
// RK: Now if I'm correct this should be called anytime.
if( tmp_file != null && tmp_file.exists() )
{
tmp_file.delete();
tmp_file = null;
}
}
}
}

152
fine-antlr-old/src/main/java/com/fr/third/antlr/PrintWriterWithSMAP.java

@ -0,0 +1,152 @@
package com.fr.third.antlr;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
// assumes one source file for now -- may need to change if ANTLR allows
// file inclusion in the future
// TODO optimize the output using line ranges for input/output files
// currently this writes one mapping per line
public class PrintWriterWithSMAP extends PrintWriter {
private int currentOutputLine = 1;
private int currentSourceLine = 0;
private Map sourceMap = new HashMap();
private boolean lastPrintCharacterWasCR = false;
private boolean mapLines = false;
private boolean mapSingleSourceLine = false;
private boolean anythingWrittenSinceMapping = false;
public PrintWriterWithSMAP(OutputStream out) {
super(out);
}
public PrintWriterWithSMAP(OutputStream out, boolean autoFlush) {
super(out, autoFlush);
}
public PrintWriterWithSMAP(Writer out) {
super(out);
}
public PrintWriterWithSMAP(Writer out, boolean autoFlush) {
super(out, autoFlush);
}
public void startMapping(int sourceLine) {
mapLines = true;
if (sourceLine != JavaCodeGenerator.CONTINUE_LAST_MAPPING)
currentSourceLine = sourceLine;
}
public void startSingleSourceLineMapping(int sourceLine) {
mapSingleSourceLine = true;
mapLines = true;
if (sourceLine != JavaCodeGenerator.CONTINUE_LAST_MAPPING)
currentSourceLine = sourceLine;
}
public void endMapping() {
mapLine(false);
mapLines = false;
mapSingleSourceLine = false;
}
protected void mapLine(boolean incrementOutputLineCount) {
if (mapLines && anythingWrittenSinceMapping) {
Integer sourceLine = new Integer(currentSourceLine);
Integer outputLine = new Integer(currentOutputLine);
List outputLines = (List)sourceMap.get(sourceLine);
if (outputLines == null) {
outputLines = new ArrayList();
sourceMap.put(sourceLine,outputLines);
}
if (!outputLines.contains(outputLine))
outputLines.add(outputLine);
}
if (incrementOutputLineCount)
currentOutputLine++;
if (!mapSingleSourceLine)
currentSourceLine++;
anythingWrittenSinceMapping = false;
}
public void dump(PrintWriter smapWriter, String targetClassName, String grammarFile) {
smapWriter.println("SMAP");
smapWriter.println(targetClassName + ".java");
smapWriter.println('G');
smapWriter.println("*S G");
smapWriter.println("*F");
smapWriter.println("+ 0 " + grammarFile);
smapWriter.println(grammarFile);
smapWriter.println("*L");
List sortedSourceLines = new ArrayList(sourceMap.keySet());
Collections.sort(sortedSourceLines);
for (Iterator i = sortedSourceLines.iterator(); i.hasNext();) {
Integer sourceLine = (Integer)i.next();
List outputLines = (List)sourceMap.get(sourceLine);
for (Iterator j = outputLines.iterator(); j.hasNext();) {
Integer outputLine = (Integer)j.next();
smapWriter.println(sourceLine + ":" + outputLine);
}
}
smapWriter.println("*E");
smapWriter.close();
}
public void write(char[] buf, int off, int len) {
int stop = off+len;
for(int i = off; i < stop; i++) {
checkChar(buf[i]);
}
super.write(buf,off,len);
}
// after testing, may want to inline this
public void checkChar(int c) {
if (lastPrintCharacterWasCR && c != '\n')
mapLine(true);
else if (c == '\n')
mapLine(true);
else if (!Character.isWhitespace((char)c))
anythingWrittenSinceMapping = true;
lastPrintCharacterWasCR = (c == '\r');
}
public void write(int c) {
checkChar(c);
super.write(c);
}
public void write(String s, int off, int len) {
int stop = off+len;
for(int i = off; i < stop; i++) {
checkChar(s.charAt(i));
}
super.write(s,off,len);
}
// PrintWriter delegates write(char[]) to write(char[], int, int)
// PrintWriter delegates write(String) to write(String, int, int)
// dependent on current impl of PrintWriter, which directly
// dumps a newline sequence to the target file w/o going through
// the other write methods.
public void println() {
mapLine(true);
super.println();
lastPrintCharacterWasCR = false;
}
public Map getSourceMap() {
return sourceMap;
}
public int getCurrentOutputLine() {
return currentOutputLine;
}
}

70
fine-antlr-old/src/main/java/com/fr/third/antlr/RecognitionException.java

@ -0,0 +1,70 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/RecognitionException.java#2 $
*/
public class RecognitionException extends ANTLRException {
public String fileName; // not used by treeparsers
public int line;
public int column;
public RecognitionException() {
super("parsing error");
fileName = null;
line = -1;
column = -1;
}
/**
* RecognitionException constructor comment.
* @param s java.lang.String
*/
public RecognitionException(String s) {
super(s);
fileName = null;
line = -1;
column = -1;
}
/** @deprecated As of ANTLR 2.7.2 use {@see #RecognitionException(char, String, int, int) } */
public RecognitionException(String s, String fileName_, int line_) {
this(s, fileName_, line_, -1);
}
/**
* RecognitionException constructor comment.
* @param s java.lang.String
*/
public RecognitionException(String s, String fileName_, int line_, int column_) {
super(s);
fileName = fileName_;
line = line_;
column = column_;
}
public String getFilename() {
return fileName;
}
public int getLine() {
return line;
}
public int getColumn() {
return column;
}
/** @deprecated As of ANTLR 2.7.0 */
public String getErrorMessage() {
return getMessage();
}
public String toString() {
return FileLineFormatter.getFormatter().
getFormatString(fileName, line, column) + getMessage();
}
}

212
fine-antlr-old/src/main/java/com/fr/third/antlr/RuleBlock.java

@ -0,0 +1,212 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/RuleBlock.java#2 $
*/
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.Vector;
/**A list of alternatives and info contained in
* the rule definition.
*/
public class RuleBlock extends AlternativeBlock {
protected String ruleName;
protected String argAction = null; // string for rule arguments [...]
protected String throwsSpec = null;
protected String returnAction = null;// string for rule return type(s) <...>
protected RuleEndElement endNode; // which node ends this rule?
// Generate literal-testing code for lexer rule?
protected boolean testLiterals = false;
Vector labeledElements; // List of labeled elements found in this rule
// This is a list of AlternativeElement (or subclass)
protected boolean[] lock; // for analysis; used to avoid infinite loops
// 1..k
protected Lookahead cache[];// Each rule can cache it's lookahead computation.
// This cache contains an epsilon
// imaginary token if the FOLLOW is required. No
// FOLLOW information is cached here.
// The FIRST(rule) is stored in this cache; 1..k
// This set includes FIRST of all alts.
Hashtable exceptionSpecs; // table of String-to-ExceptionSpec.
// grammar-settable options
protected boolean defaultErrorHandler = true;
protected String ignoreRule = null;
/** Construct a named rule. */
public RuleBlock(Grammar g, String r) {
super(g);
ruleName = r;
labeledElements = new Vector();
cache = new Lookahead[g.maxk + 1];
exceptionSpecs = new Hashtable();
setAutoGen(g instanceof ParserGrammar);
}
/** Construct a named rule with line number information */
public RuleBlock(Grammar g, String r, int line, boolean doAutoGen_) {
this(g, r);
this.line = line;
setAutoGen(doAutoGen_);
}
public void addExceptionSpec(ExceptionSpec ex) {
if (findExceptionSpec(ex.label) != null) {
if (ex.label != null) {
grammar.antlrTool.error("Rule '" + ruleName + "' already has an exception handler for label: " + ex.label);
}
else {
grammar.antlrTool.error("Rule '" + ruleName + "' already has an exception handler");
}
}
else {
exceptionSpecs.put((ex.label == null ? "" : ex.label.getText()), ex);
}
}
public ExceptionSpec findExceptionSpec(Token label) {
return (ExceptionSpec)exceptionSpecs.get(label == null ? "" : label.getText());
}
public ExceptionSpec findExceptionSpec(String label) {
return (ExceptionSpec)exceptionSpecs.get(label == null ? "" : label);
}
public void generate() {
grammar.generator.gen(this);
}
public boolean getDefaultErrorHandler() {
return defaultErrorHandler;
}
public RuleEndElement getEndElement() {
return endNode;
}
public String getIgnoreRule() {
return ignoreRule;
}
public String getRuleName() {
return ruleName;
}
public boolean getTestLiterals() {
return testLiterals;
}
public boolean isLexerAutoGenRule() {
return ruleName.equals("nextToken");
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void prepareForAnalysis() {
super.prepareForAnalysis();
lock = new boolean[grammar.maxk + 1];
}
// rule option values
public void setDefaultErrorHandler(boolean value) {
defaultErrorHandler = value;
}
public void setEndElement(RuleEndElement re) {
endNode = re;
}
public void setOption(Token key, Token value) {
if (key.getText().equals("defaultErrorHandler")) {
if (value.getText().equals("true")) {
defaultErrorHandler = true;
}
else if (value.getText().equals("false")) {
defaultErrorHandler = false;
}
else {
grammar.antlrTool.error("Value for defaultErrorHandler must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else if (key.getText().equals("testLiterals")) {
if (!(grammar instanceof LexerGrammar)) {
grammar.antlrTool.error("testLiterals option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn());
}
else {
if (value.getText().equals("true")) {
testLiterals = true;
}
else if (value.getText().equals("false")) {
testLiterals = false;
}
else {
grammar.antlrTool.error("Value for testLiterals must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
}
else if (key.getText().equals("ignore")) {
if (!(grammar instanceof LexerGrammar)) {
grammar.antlrTool.error("ignore option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn());
}
else {
ignoreRule = value.getText();
}
}
else if (key.getText().equals("paraphrase")) {
if (!(grammar instanceof LexerGrammar)) {
grammar.antlrTool.error("paraphrase option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn());
}
else {
// find token def associated with this rule
TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ruleName);
if (ts == null) {
grammar.antlrTool.panic("cannot find token associated with rule " + ruleName);
}
ts.setParaphrase(value.getText());
}
}
else if (key.getText().equals("generateAmbigWarnings")) {
if (value.getText().equals("true")) {
generateAmbigWarnings = true;
}
else if (value.getText().equals("false")) {
generateAmbigWarnings = false;
}
else {
grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else {
grammar.antlrTool.error("Invalid rule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn());
}
}
public String toString() {
String s = " FOLLOW={";
Lookahead cache[] = endNode.cache;
int k = grammar.maxk;
boolean allNull = true;
for (int j = 1; j <= k; j++) {
if (cache[j] == null) continue;
s += cache[j].toString(",", grammar.tokenManager.getVocabulary());
allNull = false;
if (j < k && cache[j + 1] != null) s += ";";
}
s += "}";
if (allNull) s = "";
return ruleName + ": " + super.toString() + " ;" + s;
}
}

33
fine-antlr-old/src/main/java/com/fr/third/antlr/RuleEndElement.java

@ -0,0 +1,33 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/RuleEndElement.java#2 $
*/
/**Contains a list of all places that reference
* this enclosing rule. Useful for FOLLOW computations.
*/
class RuleEndElement extends BlockEndElement {
protected Lookahead[] cache; // Each rule can cache it's lookahead computation.
// The FOLLOW(rule) is stored in this cache.
// 1..k
protected boolean noFOLLOW;
public RuleEndElement(Grammar g) {
super(g);
cache = new Lookahead[g.maxk + 1];
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
//return " [RuleEnd]";
return "";
}
}

73
fine-antlr-old/src/main/java/com/fr/third/antlr/RuleRefElement.java

@ -0,0 +1,73 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/RuleRefElement.java#2 $
*/
class RuleRefElement extends AlternativeElement {
protected String targetRule; // which rule is being called?
protected String args = null; // were any args passed to rule?
protected String idAssign = null; // is the return type assigned to a variable?
protected String label;
public RuleRefElement(Grammar g, Token t, int autoGenType_) {
super(g, t, autoGenType_);
targetRule = t.getText();
// if ( Character.isUpperCase(targetRule.charAt(0)) ) { // lexer rule?
if (t.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
targetRule = CodeGenerator.encodeLexerRuleName(targetRule);
}
}
// public RuleRefElement(Grammar g, String t, int line, int autoGenType_) {
// super(g, autoGenType_);
// targetRule = t;
// if ( Character.isUpperCase(targetRule.charAt(0)) ) { // lexer rule?
// targetRule = CodeGenerator.lexerRuleName(targetRule);
// }
// this.line = line;
// }
public void generate() {
grammar.generator.gen(this);
}
public String getArgs() {
return args;
}
public String getIdAssign() {
return idAssign;
}
public String getLabel() {
return label;
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void setArgs(String a) {
args = a;
}
public void setIdAssign(String id) {
idAssign = id;
}
public void setLabel(String label_) {
label = label_;
}
public String toString() {
if (args != null)
return " " + targetRule + args;
else
return " " + targetRule;
}
}

53
fine-antlr-old/src/main/java/com/fr/third/antlr/RuleSymbol.java

@ -0,0 +1,53 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/RuleSymbol.java#2 $
*/
import com.fr.third.antlr.collections.impl.Vector;
class RuleSymbol extends GrammarSymbol {
RuleBlock block; // list of alternatives
boolean defined; // has the rule been defined yet?
Vector references; // list of all nodes referencing this rule
// not strictly needed by generic symbol table
// but we will almost always analyze/gen code
String access; // access specifier for this rule
String comment; // A javadoc comment if any.
public RuleSymbol(String r) {
super(r);
references = new Vector();
}
public void addReference(RuleRefElement e) {
references.appendElement(e);
}
public RuleBlock getBlock() {
return block;
}
public RuleRefElement getReference(int i) {
return (RuleRefElement)references.elementAt(i);
}
public boolean isDefined() {
return defined;
}
public int numReferences() {
return references.size();
}
public void setBlock(RuleBlock rb) {
block = rb;
}
public void setDefined() {
defined = true;
}
}

23
fine-antlr-old/src/main/java/com/fr/third/antlr/SemanticException.java

@ -0,0 +1,23 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/SemanticException.java#2 $
*/
public class SemanticException extends RecognitionException {
public SemanticException(String s) {
super(s);
}
/** @deprecated As of ANTLR 2.7.2 use {@see #SemanticException(char, String, int, int) } */
public SemanticException(String s, String fileName, int line) {
this(s, fileName, line, -1);
}
public SemanticException(String s, String fileName, int line, int column) {
super(s, fileName, line, column);
}
}

143
fine-antlr-old/src/main/java/com/fr/third/antlr/SimpleTokenManager.java

@ -0,0 +1,143 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/SimpleTokenManager.java#2 $
*/
import java.util.Hashtable;
import java.util.Enumeration;
import com.fr.third.antlr.collections.impl.Vector;
class SimpleTokenManager implements TokenManager, Cloneable {
protected int maxToken = Token.MIN_USER_TYPE;
// Token vocabulary is Vector of String's
protected Vector vocabulary;
// Hash table is a mapping from Strings to TokenSymbol
private Hashtable table;
// the ANTLR tool
protected Tool antlrTool;
// Name of the token manager
protected String name;
protected boolean readOnly = false;
SimpleTokenManager(String name_, Tool tool_) {
antlrTool = tool_;
name = name_;
// Don't make a bigger vector than we need, because it will show up in output sets.
vocabulary = new Vector(1);
table = new Hashtable();
// define EOF symbol
TokenSymbol ts = new TokenSymbol("EOF");
ts.setTokenType(Token.EOF_TYPE);
define(ts);
// define <null-tree-lookahead> but only in the vocabulary vector
vocabulary.ensureCapacity(Token.NULL_TREE_LOOKAHEAD);
vocabulary.setElementAt("NULL_TREE_LOOKAHEAD", Token.NULL_TREE_LOOKAHEAD);
}
public Object clone() {
SimpleTokenManager tm;
try {
tm = (SimpleTokenManager)super.clone();
tm.vocabulary = (Vector)this.vocabulary.clone();
tm.table = (Hashtable)this.table.clone();
tm.maxToken = this.maxToken;
tm.antlrTool = this.antlrTool;
tm.name = this.name;
}
catch (CloneNotSupportedException e) {
antlrTool.panic("cannot clone token manager");
return null;
}
return tm;
}
/** define a token */
public void define(TokenSymbol ts) {
// Add the symbol to the vocabulary vector
vocabulary.ensureCapacity(ts.getTokenType());
vocabulary.setElementAt(ts.getId(), ts.getTokenType());
// add the symbol to the hash table
mapToTokenSymbol(ts.getId(), ts);
}
/** Simple token manager doesn't have a name -- must be set externally */
public String getName() {
return name;
}
/** Get a token symbol by index */
public String getTokenStringAt(int idx) {
return (String)vocabulary.elementAt(idx);
}
/** Get the TokenSymbol for a string */
public TokenSymbol getTokenSymbol(String sym) {
return (TokenSymbol)table.get(sym);
}
/** Get a token symbol by index */
public TokenSymbol getTokenSymbolAt(int idx) {
return getTokenSymbol(getTokenStringAt(idx));
}
/** Get an enumerator over the symbol table */
public Enumeration getTokenSymbolElements() {
return table.elements();
}
public Enumeration getTokenSymbolKeys() {
return table.keys();
}
/** Get the token vocabulary (read-only).
* @return A Vector of TokenSymbol
*/
public Vector getVocabulary() {
return vocabulary;
}
/** Simple token manager is not read-only */
public boolean isReadOnly() {
return false;
}
/** Map a label or string to an existing token symbol */
public void mapToTokenSymbol(String name, TokenSymbol sym) {
// System.out.println("mapToTokenSymbol("+name+","+sym+")");
table.put(name, sym);
}
/** Get the highest token type in use */
public int maxTokenType() {
return maxToken - 1;
}
/** Get the next unused token type */
public int nextTokenType() {
return maxToken++;
}
/** Set the name of the token manager */
public void setName(String name_) {
name = name_;
}
public void setReadOnly(boolean ro) {
readOnly = ro;
}
/** Is a token symbol defined? */
public boolean tokenDefined(String symbol) {
return table.containsKey(symbol);
}
}

65
fine-antlr-old/src/main/java/com/fr/third/antlr/StringLiteralElement.java

@ -0,0 +1,65 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/StringLiteralElement.java#2 $
*/
class StringLiteralElement extends GrammarAtom {
// atomText with quotes stripped and escape codes processed
protected String processedAtomText;
public StringLiteralElement(Grammar g, Token t, int autoGenType) {
super(g, t, autoGenType);
if (!(g instanceof LexerGrammar)) {
// lexer does not have token types for string literals
TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atomText);
if (ts == null) {
g.antlrTool.error("Undefined literal: " + atomText, grammar.getFilename(), t.getLine(), t.getColumn());
}
else {
tokenType = ts.getTokenType();
}
}
line = t.getLine();
// process the string literal text by removing quotes and escaping chars
// If a lexical grammar, add the characters to the char vocabulary
processedAtomText = new String();
for (int i = 1; i < atomText.length() - 1; i++) {
char c = atomText.charAt(i);
if (c == '\\') {
if (i + 1 < atomText.length() - 1) {
i++;
c = atomText.charAt(i);
switch (c) {
case 'n':
c = '\n';
break;
case 'r':
c = '\r';
break;
case 't':
c = '\t';
break;
}
}
}
if (g instanceof LexerGrammar) {
((LexerGrammar)g).charVocabulary.add(c);
}
processedAtomText += c;
}
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
}

25
fine-antlr-old/src/main/java/com/fr/third/antlr/StringLiteralSymbol.java

@ -0,0 +1,25 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/StringLiteralSymbol.java#2 $
*/
class StringLiteralSymbol extends TokenSymbol {
protected String label; // was this string literal labeled?
public StringLiteralSymbol(String r) {
super(r);
}
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
}

85
fine-antlr-old/src/main/java/com/fr/third/antlr/StringUtils.java

@ -0,0 +1,85 @@
package com.fr.third.antlr;
public class StringUtils {
/** General-purpose utility function for removing
* characters from back of string
* @param s The string to process
* @param c The character to remove
* @return The resulting string
*/
static public String stripBack(String s, char c) {
while (s.length() > 0 && s.charAt(s.length() - 1) == c) {
s = s.substring(0, s.length() - 1);
}
return s;
}
/** General-purpose utility function for removing
* characters from back of string
* @param s The string to process
* @param remove A string containing the set of characters to remove
* @return The resulting string
*/
static public String stripBack(String s, String remove) {
boolean changed;
do {
changed = false;
for (int i = 0; i < remove.length(); i++) {
char c = remove.charAt(i);
while (s.length() > 0 && s.charAt(s.length() - 1) == c) {
changed = true;
s = s.substring(0, s.length() - 1);
}
}
} while (changed);
return s;
}
/** General-purpose utility function for removing
* characters from front of string
* @param s The string to process
* @param c The character to remove
* @return The resulting string
*/
static public String stripFront(String s, char c) {
while (s.length() > 0 && s.charAt(0) == c) {
s = s.substring(1);
}
return s;
}
/** General-purpose utility function for removing
* characters from front of string
* @param s The string to process
* @param remove A string containing the set of characters to remove
* @return The resulting string
*/
static public String stripFront(String s, String remove) {
boolean changed;
do {
changed = false;
for (int i = 0; i < remove.length(); i++) {
char c = remove.charAt(i);
while (s.length() > 0 && s.charAt(0) == c) {
changed = true;
s = s.substring(1);
}
}
} while (changed);
return s;
}
/** General-purpose utility function for removing
* characters from the front and back of string
* @param s The string to process
* @param head exact string to strip from head
* @param tail exact string to strip from tail
* @return The resulting string
*/
public static String stripFrontBack(String src, String head, String tail) {
int h = src.indexOf(head);
int t = src.lastIndexOf(tail);
if (h == -1 || t == -1) return src;
return src.substring(h + 1, t);
}
}

31
fine-antlr-old/src/main/java/com/fr/third/antlr/SynPredBlock.java

@ -0,0 +1,31 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/SynPredBlock.java#2 $
*/
class SynPredBlock extends AlternativeBlock {
public SynPredBlock(Grammar g) {
super(g);
}
public SynPredBlock(Grammar g, Token start) {
super(g, start, false);
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
return super.toString() + "=>";
}
}

78
fine-antlr-old/src/main/java/com/fr/third/antlr/Token.java

@ -0,0 +1,78 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Token.java#2 $
*/
/** A token is minimally a token type. Subclasses can add the text matched
* for the token and line info.
*/
public class Token implements Cloneable {
// constants
public static final int MIN_USER_TYPE = 4;
public static final int NULL_TREE_LOOKAHEAD = 3;
public static final int INVALID_TYPE = 0;
public static final int EOF_TYPE = 1;
public static final int SKIP = -1;
// each Token has at least a token type
protected int type = INVALID_TYPE;
// the illegal token object
public static Token badToken = new Token(INVALID_TYPE, "<no text>");
public Token() {
}
public Token(int t) {
type = t;
}
public Token(int t, String txt) {
type = t;
setText(txt);
}
public int getColumn() {
return 0;
}
public int getLine() {
return 0;
}
public String getFilename() {
return null;
}
public void setFilename(String name) {
}
public String getText() {
return "<no text>";
}
public void setText(String t) {
}
public void setColumn(int c) {
}
public void setLine(int l) {
}
public int getType() {
return type;
}
public void setType(int t) {
type = t;
}
public String toString() {
return "[\"" + getText() + "\",<" + getType() + ">]";
}
}

125
fine-antlr-old/src/main/java/com/fr/third/antlr/TokenBuffer.java

@ -0,0 +1,125 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/TokenBuffer.java#2 $
*/
/**A Stream of Token objects fed to the parser from a Tokenizer that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input tokens. Normally,
* "k" tokens are stored in the buffer. More tokens may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of tokens is deferred. In other words, reading the next
* token is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see antlr.Token
* @see antlr.Tokenizer
* @see antlr.TokenQueue
*/
public class TokenBuffer {
// Token source
protected TokenStream input;
// Number of active markers
int nMarkers = 0;
// Additional offset used when markers are active
int markerOffset = 0;
// Number of calls to consume() since last LA() or LT() call
int numToConsume = 0;
// Circular queue
TokenQueue queue;
/** Create a token buffer */
public TokenBuffer(TokenStream input_) {
input = input_;
queue = new TokenQueue(1);
}
/** Reset the input buffer to empty state */
public final void reset() {
nMarkers = 0;
markerOffset = 0;
numToConsume = 0;
queue.reset();
}
/** Mark another token for deferred consumption */
public final void consume() {
numToConsume++;
}
/** Ensure that the token buffer is sufficiently full */
private final void fill(int amount) throws TokenStreamException {
syncConsume();
// Fill the buffer sufficiently to hold needed tokens
while (queue.nbrEntries < amount + markerOffset) {
// Append the next token
queue.append(input.nextToken());
}
}
/** return the Tokenizer (needed by ParseView) */
public TokenStream getInput() {
return input;
}
/** Get a lookahead token value */
public final int LA(int i) throws TokenStreamException {
fill(i);
return queue.elementAt(markerOffset + i - 1).getType();
}
/** Get a lookahead token */
public final Token LT(int i) throws TokenStreamException {
fill(i);
return queue.elementAt(markerOffset + i - 1);
}
/**Return an integer marker that can be used to rewind the buffer to
* its current state.
*/
public final int mark() {
syncConsume();
//System.out.println("Marking at " + markerOffset);
//try { for (int i = 1; i <= 2; i++) { System.out.println("LA("+i+")=="+LT(i).getText()); } } catch (ScannerException e) {}
nMarkers++;
return markerOffset;
}
/**Rewind the token buffer to a marker.
* @param mark Marker returned previously from mark()
*/
public final void rewind(int mark) {
syncConsume();
markerOffset = mark;
nMarkers--;
//System.out.println("Rewinding to " + mark);
//try { for (int i = 1; i <= 2; i++) { System.out.println("LA("+i+")=="+LT(i).getText()); } } catch (ScannerException e) {}
}
/** Sync up deferred consumption */
private final void syncConsume() {
while (numToConsume > 0) {
if (nMarkers > 0) {
// guess mode -- leave leading tokens and bump offset.
markerOffset++;
}
else {
// normal mode -- remove first token
queue.removeFirst();
}
numToConsume--;
}
}
}

60
fine-antlr-old/src/main/java/com/fr/third/antlr/TokenManager.java

@ -0,0 +1,60 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/TokenManager.java#2 $
*/
import java.util.Enumeration;
import com.fr.third.antlr.collections.impl.Vector;
/** Interface that describes the set of defined tokens */
interface TokenManager {
public Object clone();
/** define a token symbol */
public void define(TokenSymbol ts);
/** Get the name of the token manager */
public String getName();
/** Get a token string by index */
public String getTokenStringAt(int idx);
/** Get the TokenSymbol for a string */
public TokenSymbol getTokenSymbol(String sym);
public TokenSymbol getTokenSymbolAt(int idx);
/** Get an enumerator over the symbol table */
public Enumeration getTokenSymbolElements();
public Enumeration getTokenSymbolKeys();
/** Get the token vocabulary (read-only).
* @return A Vector of Strings indexed by token type */
public Vector getVocabulary();
/** Is this token manager read-only? */
public boolean isReadOnly();
public void mapToTokenSymbol(String name, TokenSymbol sym);
/** Get the highest token type in use */
public int maxTokenType();
/** Get the next unused token type */
public int nextTokenType();
public void setName(String n);
public void setReadOnly(boolean ro);
/** Is a token symbol defined? */
public boolean tokenDefined(String symbol);
}

96
fine-antlr-old/src/main/java/com/fr/third/antlr/TokenQueue.java

@ -0,0 +1,96 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/TokenQueue.java#2 $
*/
/** A private circular buffer object used by the token buffer */
class TokenQueue {
/** Physical circular buffer of tokens */
private Token[] buffer;
/** buffer.length-1 for quick modulos */
private int sizeLessOne;
/** physical index of front token */
private int offset;
/** number of tokens in the queue */
protected int nbrEntries;
public TokenQueue(int minSize) {
// Find first power of 2 >= to requested size
int size;
if ( minSize<0 ) {
init(16); // pick some value for them
return;
}
// check for overflow
if ( minSize>=(Integer.MAX_VALUE/2) ) {
init(Integer.MAX_VALUE); // wow that's big.
return;
}
for (size = 2; size < minSize; size *= 2) {
;
}
init(size);
}
/** Add token to end of the queue
* @param tok The token to add
*/
public final void append(Token tok) {
if (nbrEntries == buffer.length) {
expand();
}
buffer[(offset + nbrEntries) & sizeLessOne] = tok;
nbrEntries++;
}
/** Fetch a token from the queue by index
* @param idx The index of the token to fetch, where zero is the token at the front of the queue
*/
public final Token elementAt(int idx) {
return buffer[(offset + idx) & sizeLessOne];
}
/** Expand the token buffer by doubling its capacity */
private final void expand() {
Token[] newBuffer = new Token[buffer.length * 2];
// Copy the contents to the new buffer
// Note that this will store the first logical item in the
// first physical array element.
for (int i = 0; i < buffer.length; i++) {
newBuffer[i] = elementAt(i);
}
// Re-initialize with new contents, keep old nbrEntries
buffer = newBuffer;
sizeLessOne = buffer.length - 1;
offset = 0;
}
/** Initialize the queue.
* @param size The initial size of the queue
*/
private final void init(int size) {
// Allocate buffer
buffer = new Token[size];
// Other initialization
sizeLessOne = size - 1;
offset = 0;
nbrEntries = 0;
}
/** Clear the queue. Leaving the previous buffer alone.
*/
public final void reset() {
offset = 0;
nbrEntries = 0;
}
/** Remove token from front of queue */
public final void removeFirst() {
offset = (offset + 1) & sizeLessOne;
nbrEntries--;
}
}

50
fine-antlr-old/src/main/java/com/fr/third/antlr/TokenRangeElement.java

@ -0,0 +1,50 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/TokenRangeElement.java#2 $
*/
class TokenRangeElement extends AlternativeElement {
String label;
protected int begin = Token.INVALID_TYPE;
protected int end = Token.INVALID_TYPE;
protected String beginText;
protected String endText;
public TokenRangeElement(Grammar g, Token t1, Token t2, int autoGenType) {
super(g, t1, autoGenType);
begin = grammar.tokenManager.getTokenSymbol(t1.getText()).getTokenType();
beginText = t1.getText();
end = grammar.tokenManager.getTokenSymbol(t2.getText()).getTokenType();
endText = t2.getText();
line = t1.getLine();
}
public void generate() {
grammar.generator.gen(this);
}
public String getLabel() {
return label;
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void setLabel(String label_) {
label = label_;
}
public String toString() {
if (label != null) {
return " " + label + ":" + beginText + ".." + endText;
}
else {
return " " + beginText + ".." + endText;
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save