Browse Source

KERNEL-6151 feat: fine-third适配fine-essential并加入打包

research/11.0
Cloud.Liu 4 years ago
parent
commit
2574a03062
  1. 14
      base-third-project/base-third-step1/pom.xml
  2. 3
      base-third-project/base-third-step2/pom.xml
  3. 1
      base-third-project/base-third-step3/pom.xml
  4. 1
      base-third-project/base-third-step4/pom.xml
  5. 1
      base-third-project/base-third-step5/pom.xml
  6. 2
      base-third-project/base-third-step6/pom.xml
  7. 2
      base-third-project/base-third-step7/pom.xml
  8. 1
      base-third-project/base-third-step8/pom.xml
  9. 2
      base-third-project/pom.xml
  10. 21
      build.third_step0-jdk11.gradle
  11. 27
      build.third_step0.gradle
  12. 59
      build.third_step1-jdk11.gradle
  13. 65
      build.third_step1.gradle
  14. 15
      build.third_step2-jdk11.gradle
  15. 23
      build.third_step2.gradle
  16. 12
      build.third_step3-jdk11.gradle
  17. 21
      build.third_step3.gradle
  18. 12
      build.third_step4-jdk11.gradle
  19. 20
      build.third_step4.gradle
  20. 12
      build.third_step5-jdk11.gradle
  21. 20
      build.third_step5.gradle
  22. 12
      build.third_step6-jdk11.gradle
  23. 20
      build.third_step6.gradle
  24. 16
      build.third_step7-jdk11.gradle
  25. 24
      build.third_step7.gradle
  26. 12
      build.third_step8-jdk11.gradle
  27. 20
      build.third_step8.gradle
  28. 6
      fine-antlr-old/.gitignore
  29. 2
      fine-antlr-old/README.md
  30. 18
      fine-antlr-old/pom.xml
  31. 26
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRError.java
  32. 27
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRException.java
  33. 124
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRGrammarParseBehavior.java
  34. 107
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRHashString.java
  35. 1451
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRLexer.java
  36. 2961
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRParser.java
  37. 82
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRStringBuffer.java
  38. 647
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefLexer.java
  39. 241
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParser.java
  40. 18
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParserTokenTypes.java
  41. 68
      fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokenTypes.java
  42. 411
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTFactory.java
  43. 76
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTIterator.java
  44. 108
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTNULLType.java
  45. 43
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTPair.java
  46. 14
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASTVisitor.java
  47. 35
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/ASDebugStream.java
  48. 24
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/IASDebugStream.java
  49. 22
      fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/TokenOffsetInfo.java
  50. 33
      fine-antlr-old/src/main/java/com/fr/third/antlr/ActionElement.java
  51. 23
      fine-antlr-old/src/main/java/com/fr/third/antlr/ActionTransInfo.java
  52. 73
      fine-antlr-old/src/main/java/com/fr/third/antlr/Alternative.java
  53. 226
      fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeBlock.java
  54. 43
      fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeElement.java
  55. 495
      fine-antlr-old/src/main/java/com/fr/third/antlr/BaseAST.java
  56. 32
      fine-antlr-old/src/main/java/com/fr/third/antlr/BlockContext.java
  57. 31
      fine-antlr-old/src/main/java/com/fr/third/antlr/BlockEndElement.java
  58. 24
      fine-antlr-old/src/main/java/com/fr/third/antlr/BlockWithImpliedExitPath.java
  59. 53
      fine-antlr-old/src/main/java/com/fr/third/antlr/ByteBuffer.java
  60. 53
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharBuffer.java
  61. 23
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharFormatter.java
  62. 29
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharLiteralElement.java
  63. 95
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharQueue.java
  64. 54
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharRangeElement.java
  65. 409
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharScanner.java
  66. 21
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamException.java
  67. 22
      fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamIOException.java
  68. 663
      fine-antlr-old/src/main/java/com/fr/third/antlr/CodeGenerator.java
  69. 59
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonAST.java
  70. 47
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonASTWithHiddenTokens.java
  71. 41
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonHiddenStreamToken.java
  72. 56
      fine-antlr-old/src/main/java/com/fr/third/antlr/CommonToken.java
  73. 33
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultFileLineFormatter.java
  74. 73
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultJavaCodeGeneratorPrintWriterManager.java
  75. 118
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultToolErrorHandler.java
  76. 811
      fine-antlr-old/src/main/java/com/fr/third/antlr/DefineGrammarSymbols.java
  77. 68
      fine-antlr-old/src/main/java/com/fr/third/antlr/DumpASTVisitor.java
  78. 22
      fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionHandler.java
  79. 29
      fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionSpec.java
  80. 14
      fine-antlr-old/src/main/java/com/fr/third/antlr/FileCopyException.java
  81. 27
      fine-antlr-old/src/main/java/com/fr/third/antlr/FileLineFormatter.java
  82. 288
      fine-antlr-old/src/main/java/com/fr/third/antlr/Grammar.java
  83. 36
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAnalyzer.java
  84. 68
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAtom.java
  85. 62
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarElement.java
  86. 30
      fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarSymbol.java
  87. 102
      fine-antlr-old/src/main/java/com/fr/third/antlr/ImportVocabTokenManager.java
  88. 131
      fine-antlr-old/src/main/java/com/fr/third/antlr/InputBuffer.java
  89. 34
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaBlockFinishingInfo.java
  90. 87
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCharFormatter.java
  91. 3746
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGenerator.java
  92. 21
      fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGeneratorPrintWriterManager.java
  93. 1095
      fine-antlr-old/src/main/java/com/fr/third/antlr/LLkAnalyzer.java
  94. 58
      fine-antlr-old/src/main/java/com/fr/third/antlr/LLkGrammarAnalyzer.java
  95. 85
      fine-antlr-old/src/main/java/com/fr/third/antlr/LLkParser.java
  96. 179
      fine-antlr-old/src/main/java/com/fr/third/antlr/LexerGrammar.java
  97. 80
      fine-antlr-old/src/main/java/com/fr/third/antlr/LexerSharedInputState.java
  98. 218
      fine-antlr-old/src/main/java/com/fr/third/antlr/Lookahead.java
  99. 792
      fine-antlr-old/src/main/java/com/fr/third/antlr/MakeGrammar.java
  100. 146
      fine-antlr-old/src/main/java/com/fr/third/antlr/MismatchedCharException.java
  101. Some files were not shown because too many files have changed in this diff Show More

14
base-third-project/base-third-step1/pom.xml

@ -12,42 +12,30 @@
<artifactId>step1</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-antlr4</module>
<module>../../fine-asm</module>
<module>../../fine-aspectj</module>
<module>../../fine-antlr-old</module>
<module>../../fine-bouncycastle</module>
<module>../../fine-classmate</module>
<module>../../fine-commons-codec</module>
<module>../../fine-commons-collections4</module>
<module>../../fine-commons-io</module>
<module>../../fine-commons-lang3</module>
<module>../../fine-commons-logging</module>
<module>../../fine-commons-math3</module>
<module>../../fine-commons-pool</module>
<module>../../fine-cssparser</module>
<module>../../fine-freehep</module>
<module>../../fine-guava</module>
<module>../../fine-hsqldb</module>
<module>../../fine-iconloader</module>
<module>../../fine-icu4j</module>
<module>../../fine-imageJ</module>
<module>../../fine-j2v8</module>
<module>../../fine-jackson</module>
<!-- <module>../../fine-jai</module>-->
<module>../../fine-jboss-transaction-api</module>
<module>../../fine-jetbrains</module>
<module>../../fine-jgit</module>
<module>../../fine-jna</module>
<module>../../fine-joda</module>
<module>../../fine-jodd</module>
<module>../../fine-jpa</module>
<module>../../fine-lookandfeel</module>
<module>../../fine-lucene</module>
<module>../../fine-lz4</module>
<module>../../fine-objenesis</module>
<module>../../fine-org-dom4j</module>
<module>../../fine-roaringbitmap</module>
<module>../../fine-sense4</module>

3
base-third-project/base-third-step2/pom.xml

@ -12,16 +12,15 @@
<artifactId>step2</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-bcprov-old</module>
<module>../../fine-byte-buddy</module>
<module>../../fine-cglib</module>
<module>../../fine-commons-fileupload</module>
<module>../../fine-httpcomponents</module>
<module>../../fine-jai</module>
<module>../../fine-kryo</module>
<module>../../fine-log4j</module>
<module>../../fine-poi</module>
<module>../../fine-poi-old</module>
<module>../../fine-redisson</module>

1
base-third-project/base-third-step3/pom.xml

@ -12,6 +12,7 @@
<artifactId>step3</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-itext</module>

1
base-third-project/base-third-step4/pom.xml

@ -12,6 +12,7 @@
<artifactId>step4</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-itext-old</module>

1
base-third-project/base-third-step5/pom.xml

@ -12,6 +12,7 @@
<artifactId>step5</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-hibernate</module>

2
base-third-project/base-third-step6/pom.xml

@ -12,7 +12,7 @@
<artifactId>step6</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-druid</module>

2
base-third-project/base-third-step7/pom.xml

@ -12,9 +12,9 @@
<artifactId>step7</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-quartz</module>
<module>../../fine-quartz-old</module>
</modules>
<build>

1
base-third-project/base-third-step8/pom.xml

@ -12,6 +12,7 @@
<artifactId>step8</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<modules>
<module>../../fine-ehcache</module>

2
base-third-project/pom.xml

@ -7,10 +7,12 @@
<groupId>com.fr.third</groupId>
<artifactId>base-third</artifactId>
<version>${revision}</version>
<packaging>pom</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<classes-path>G:\Code\10.0\feature-third-maven\base-third\base-third-project\</classes-path>
<revision>10.0-FEATURE-SNAPSHOT</revision>
<essentialVersion>1.0-RELEASE-SNAPSHOT</essentialVersion>
<language-level>1.8</language-level>
</properties>

21
build.third_step0-jdk11.gradle

@ -26,6 +26,7 @@ jar{
repositories {
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
@ -53,9 +54,13 @@ def jar_version = version
configurations {
// thirdjar
// sigar
essential
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
dependencies {
essential "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
// thirdjar "com.fr.third:fine-third-base:10.0-BASE-SNAPSHOT"
// sigar "com.fr.third:sigar:1.6.0"
testCompile 'junit:junit:4.12'
@ -94,9 +99,9 @@ task unpack(type:Copy) {
// from {
// zipTree(configurations.sigar.singleFile)
// }
from {
zipTree("fine-quartz/lib/c3p0-0.9.1.1.jar")
}
// from {
// zipTree("fine-quartz/lib/c3p0-0.9.1.1.jar")
// }
from {
zipTree("fine-spring/lib/aopalliance-1.0.jar")
}
@ -114,5 +119,15 @@ task unpack(type:Copy) {
}
// essential依赖等级的jar全部解压到classes下
task unpackEssential(type:Copy) {
for (File file : configurations.essential.files) {
from zipTree(file)
}
into classesDir
}
jar.dependsOn unpackEssential
jar.dependsOn unpack

27
build.third_step0.gradle

@ -10,9 +10,9 @@ configurations.all {
resolutionStrategy.cacheChangingModulesFor 0, 'seconds'
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
//lib下的jar到classes文件夹
@ -25,6 +25,7 @@ jar{
repositories {
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
@ -62,9 +63,13 @@ def jar_version = version
configurations {
// thirdjar
// sigar
essential
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
dependencies {
essential "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
// thirdjar "com.fr.third:fine-third-base:10.0-BASE-SNAPSHOT"
// sigar "com.fr.third:sigar:1.6.0"
testCompile 'junit:junit:4.12'
@ -122,7 +127,6 @@ afterEvaluate {
}
}
task unpack(type:Copy) {
delete classesDir
destinationDir=file(classesDir)
@ -133,9 +137,9 @@ task unpack(type:Copy) {
// from {
// zipTree(configurations.sigar.singleFile)
// }
from {
zipTree("fine-quartz/lib/c3p0-0.9.1.1.jar")
}
// from {
// zipTree("fine-quartz/lib/c3p0-0.9.1.1.jar")
// }
from {
zipTree("fine-spring/lib/aopalliance-1.0.jar")
}
@ -150,7 +154,16 @@ task unpack(type:Copy) {
zipTree(file)
}
}
}
// essential依赖等级的jar全部解压到classes下
task unpackEssential(type:Copy) {
for (File file : configurations.essential.files) {
from zipTree(file)
}
into classesDir
}
jar.dependsOn unpackEssential
jar.dependsOn unpack

59
build.third_step1-jdk11.gradle

@ -33,53 +33,32 @@ sourceSets{
srcDirs=[
"${srcDir}/fine-deprecated-utils/src/main/java",
"${srcDir}/fine-asm/src/main/java",
"${srcDir}/fine-antlr4/src/main/java",
"${srcDir}/fine-aspectj/src/main/java",
"${srcDir}/fine-antlr-old/src/main/java",
"${srcDir}/fine-bouncycastle/src/main/java",
"${srcDir}/fine-classmate/src/main/java",
"${srcDir}/fine-commons-codec/src/main/java",
"${srcDir}/fine-commons-collections4/src/main/java",
"${srcDir}/fine-commons-io/src/main/java",
"${srcDir}/fine-commons-lang3/src/main/java",
"${srcDir}/fine-commons-logging/src/main/java",
"${srcDir}/fine-commons-math3/src/main/java",
"${srcDir}/fine-commons-pool/src/main/java",
"${srcDir}/fine-cssparser/src/main/java",
"${srcDir}/fine-freehep/src/main/java",
"${srcDir}/fine-guava/src/main/java",
"${srcDir}/fine-hsqldb/src/main/java",
"${srcDir}/fine-iconloader/src/main/java",
"${srcDir}/fine-icu4j/src/main/java",
"${srcDir}/fine-j2v8/src/main/java",
"${srcDir}/fine-jackson/src/main/java",
// "${srcDir}/fine-jai/src/main/java",
"${srcDir}/fine-jboss-transaction-api/src/main/java",
"${srcDir}/fine-jetbrains/src/main/java",
"${srcDir}/fine-jgit/src/main/java",
"${srcDir}/fine-jna/jna/src/main/java",
"${srcDir}/fine-jna/jna-platform/src/main/java",
"${srcDir}/fine-joda/src/main/java",
"${srcDir}/fine-jodd/src/main/java",
"${srcDir}/fine-jpa/src/main/java",
//"${srcDir}/fine-lookandfeel/src/main/java",
"${srcDir}/fine-lucene/src/main/java",
"${srcDir}/fine-lz4/src/main/java",
"${srcDir}/fine-objenesis/src/main/java",
"${srcDir}/fine-org-dom4j/src/main/java",
"${srcDir}/fine-roaringbitmap/src/main/java",
"${srcDir}/fine-sense4/src/main/java",
"${srcDir}/fine-third-default/fine-mail/src/main/java",
"${srcDir}/fine-third-default/fine-javax-cdi/src/main/java",
"${srcDir}/fine-third-default/fine-javax-jms/src/main/java",
"${srcDir}/fine-third-default/fine-sun-misc/src/main/java",
"${srcDir}/fine-third-default/fine-javax-inject/src/main/java",
"${srcDir}/fine-third-default/fine-javax-interceptor/src/main/java",
"${srcDir}/fine-third-default/fine-zip-tools/src/main/java",
"${srcDir}/fine-third-default/fine-javax-transaction/src/main/java",
"${srcDir}/fine-third-default/fine-slf4j-api/src/main/java",
"${srcDir}/fine-third-default/fine-sjsxp/src/main/java",
"${srcDir}/fine-third-default/fine-stax/src/main/java",
"${srcDir}/fine-third-default/fine-sun-jpeg/src/main/java",
"${srcDir}/fine-third-jdk11/fine-activation/src/main/java",
"${srcDir}/fine-third-jdk11/fine-javax-annotation/src/main/java",
@ -94,10 +73,6 @@ sourceSets{
}
def resourceDirs = [
"${srcDir}/fine-antlr4/src/main/java",
"${srcDir}/fine-antlr4/src/main/resources",
"${srcDir}/fine-antlr-old/src/main/java",
"${srcDir}/fine-antlr-old/src/main/resources",
"${srcDir}/fine-asm/src/main/java",
"${srcDir}/fine-asm/src/main/resources",
"${srcDir}/fine-aspectj/src/main/java",
@ -106,16 +81,6 @@ def resourceDirs = [
"${srcDir}/fine-bouncycastle/src/main/resources",
"${srcDir}/fine-classmate/src/main/java",
"${srcDir}/fine-classmate/src/main/resources",
"${srcDir}/fine-commons-codec/src/main/java",
"${srcDir}/fine-commons-codec/src/main/resources",
"${srcDir}/fine-commons-collections4/src/main/java",
"${srcDir}/fine-commons-collections4/src/main/resources",
"${srcDir}/fine-commons-io/src/main/java",
"${srcDir}/fine-commons-io/src/main/resources",
"${srcDir}/fine-commons-lang3/src/main/java",
"${srcDir}/fine-commons-lang3/src/main/resources",
"${srcDir}/fine-commons-logging/src/main/java",
"${srcDir}/fine-commons-logging/src/main/resources",
"${srcDir}/fine-commons-math3/src/main/java",
"${srcDir}/fine-commons-math3/src/main/resources",
"${srcDir}/fine-commons-pool/src/main/java",
@ -124,8 +89,6 @@ def resourceDirs = [
"${srcDir}/fine-cssparser/src/main/resources",
"${srcDir}/fine-freehep/src/main/java",
"${srcDir}/fine-freehep/src/main/resources",
"${srcDir}/fine-guava/src/main/java",
"${srcDir}/fine-guava/src/main/resources",
"${srcDir}/fine-hsqldb/src/main/java",
"${srcDir}/fine-hsqldb/src/main/resources",
"${srcDir}/fine-iconloader/src/main/java",
@ -134,31 +97,21 @@ def resourceDirs = [
"${srcDir}/fine-icu4j/src/main/resources",
"${srcDir}/fine-j2v8/src/main/java",
"${srcDir}/fine-j2v8/src/main/resources",
"${srcDir}/fine-jackson/src/main/java",
"${srcDir}/fine-jackson/src/main/resources",
// "${srcDir}/fine-jai/src/main/java",
"${srcDir}/fine-jboss-transaction-api/src/main/java",
"${srcDir}/fine-jboss-transaction-api/src/main/resources",
"${srcDir}/fine-jetbrains/src/main/java",
"${srcDir}/fine-jetbrains/src/main/resources",
"${srcDir}/fine-jgit/src/main/java",
"${srcDir}/fine-jgit/src/main/resources",
"${srcDir}/fine-jna/jna/src/main/resources",
"${srcDir}/fine-jna/jna/src/main/java",
"${srcDir}/fine-jna/jna-platform/src/main/java",
"${srcDir}/fine-joda/src/main/java",
"${srcDir}/fine-joda/src/main/resources",
"${srcDir}/fine-jodd/src/main/java",
"${srcDir}/fine-jodd/src/main/resources",
"${srcDir}/fine-jpa/src/main/java",
"${srcDir}/fine-jpa/src/main/resources",
// "${srcDir}/fine-lookandfeel/src/main/java",
"${srcDir}/fine-lucene/src/main/java",
"${srcDir}/fine-lucene/src/main/resources",
"${srcDir}/fine-lz4/src/main/java",
"${srcDir}/fine-lz4/src/main/resources",
"${srcDir}/fine-objenesis/src/main/java",
"${srcDir}/fine-objenesis/src/main/resources",
"${srcDir}/fine-org-dom4j/src/main/java",
"${srcDir}/fine-org-dom4j/src/main/resources",
"${srcDir}/fine-roaringbitmap/src/main/java",
@ -167,16 +120,8 @@ def resourceDirs = [
"${srcDir}/fine-sense4/src/main/resources",
"${srcDir}/fine-third-default/fine-mail/src/main/java",
"${srcDir}/fine-third-default/fine-mail/src/main/resources",
"${srcDir}/fine-third-default/fine-javax-cdi/src/main/java",
"${srcDir}/fine-third-default/fine-javax-jms/src/main/java",
"${srcDir}/fine-third-default/fine-sun-misc/src/main/java",
"${srcDir}/fine-third-default/fine-javax-inject/src/main/java",
"${srcDir}/fine-third-default/fine-javax-interceptor/src/main/java",
"${srcDir}/fine-third-default/fine-zip-tools/src/main/java",
"${srcDir}/fine-third-default/fine-javax-transaction/src/main/java",
"${srcDir}/fine-third-default/fine-slf4j-api/src/main/java",
"${srcDir}/fine-third-default/fine-sjsxp/src/main/java",
"${srcDir}/fine-third-default/fine-stax/src/main/java",
"${srcDir}/fine-third-default/fine-sun-jpeg/src/main/java",
"${srcDir}/fine-third-jdk11/fine-activation/src/main/java",
"${srcDir}/fine-third-jdk11/fine-javax-annotation/src/main/java",
@ -204,9 +149,7 @@ def MVN_BRANCH = branchVariable.toUpperCase()
//
dependencies{
compile fileTree(dir:"${srcDir}/fine-antlr4/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-aspectj/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-commons-logging/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-cssparser/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-freehep/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-hsqldb/lib",include:'**/*.jar')
@ -214,8 +157,6 @@ dependencies{
compile fileTree(dir:"${srcDir}/fine-jgit/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-org-dom4j/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-sense4/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-third-default/fine-slf4j-api/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-third-default/fine-javax-cdi/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")
compile group: "com.fr.third.server", name: "servlet-api", version: "3.0"

65
build.third_step1.gradle

@ -34,53 +34,32 @@ sourceSets{
srcDirs=[
"${srcDir}/fine-deprecated-utils/src/main/java",
"${srcDir}/fine-asm/src/main/java",
"${srcDir}/fine-antlr4/src/main/java",
"${srcDir}/fine-aspectj/src/main/java",
"${srcDir}/fine-antlr-old/src/main/java",
"${srcDir}/fine-bouncycastle/src/main/java",
"${srcDir}/fine-classmate/src/main/java",
"${srcDir}/fine-commons-codec/src/main/java",
"${srcDir}/fine-commons-collections4/src/main/java",
"${srcDir}/fine-commons-io/src/main/java",
"${srcDir}/fine-commons-lang3/src/main/java",
"${srcDir}/fine-commons-logging/src/main/java",
"${srcDir}/fine-commons-math3/src/main/java",
"${srcDir}/fine-commons-pool/src/main/java",
"${srcDir}/fine-cssparser/src/main/java",
"${srcDir}/fine-freehep/src/main/java",
"${srcDir}/fine-guava/src/main/java",
"${srcDir}/fine-hsqldb/src/main/java",
"${srcDir}/fine-iconloader/src/main/java",
"${srcDir}/fine-icu4j/src/main/java",
"${srcDir}/fine-j2v8/src/main/java",
"${srcDir}/fine-jackson/src/main/java",
// "${srcDir}/fine-jai/src/main/java",
"${srcDir}/fine-jboss-transaction-api/src/main/java",
"${srcDir}/fine-jetbrains/src/main/java",
"${srcDir}/fine-jgit/src/main/java",
"${srcDir}/fine-jna/jna/src/main/java",
"${srcDir}/fine-jna/jna-platform/src/main/java",
"${srcDir}/fine-joda/src/main/java",
"${srcDir}/fine-jodd/src/main/java",
"${srcDir}/fine-jpa/src/main/java",
//"${srcDir}/fine-lookandfeel/src/main/java",
"${srcDir}/fine-lucene/src/main/java",
"${srcDir}/fine-lz4/src/main/java",
"${srcDir}/fine-objenesis/src/main/java",
"${srcDir}/fine-org-dom4j/src/main/java",
"${srcDir}/fine-roaringbitmap/src/main/java",
"${srcDir}/fine-sense4/src/main/java",
"${srcDir}/fine-third-default/fine-mail/src/main/java",
"${srcDir}/fine-third-default/fine-javax-cdi/src/main/java",
"${srcDir}/fine-third-default/fine-javax-jms/src/main/java",
"${srcDir}/fine-third-default/fine-sun-misc/src/main/java",
"${srcDir}/fine-third-default/fine-javax-inject/src/main/java",
"${srcDir}/fine-third-default/fine-javax-interceptor/src/main/java",
"${srcDir}/fine-third-default/fine-zip-tools/src/main/java",
"${srcDir}/fine-third-default/fine-javax-transaction/src/main/java",
"${srcDir}/fine-third-default/fine-slf4j-api/src/main/java",
"${srcDir}/fine-third-default/fine-sjsxp/src/main/java",
"${srcDir}/fine-third-default/fine-stax/src/main/java",
"${srcDir}/fine-third-default/fine-sun-jpeg/src/main/java",
"${srcDir}/fine-third-jdk8/xml-apis/src/main/java",
"${srcDir}/fine-third-jdk11/fine-activation/src/main/java",
@ -95,7 +74,7 @@ sourceSets{
}
sourceSets.main.output.classesDir = file('build/classes/1')
sourceSets.main.java.outputDir = file('build/classes/1')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
@ -105,13 +84,11 @@ repositories{
FileTree files =fileTree(dir:'./',include:'build.*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
//
dependencies{
compile fileTree(dir:"${srcDir}/fine-antlr4/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-aspectj/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-commons-logging/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-cssparser/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-freehep/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-hsqldb/lib",include:'**/*.jar')
@ -119,8 +96,6 @@ dependencies{
compile fileTree(dir:"${srcDir}/fine-jgit/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-org-dom4j/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-sense4/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-third-default/fine-slf4j-api/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-third-default/fine-javax-cdi/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")
compile group: "com.fr.third.server", name: "servlet-api", version: "3.0"
@ -142,10 +117,6 @@ def dataContent ={def dir ->
task copyFiles(type:Copy,dependsOn:'compileJava'){
copy{
println "------------------------------------------------copyfiles"
with dataContent.call("${srcDir}/fine-antlr4/src/main/java")
with dataContent.call("${srcDir}/fine-antlr4/src/main/resources")
with dataContent.call("${srcDir}/fine-antlr-old/src/main/java")
with dataContent.call("${srcDir}/fine-antlr-old/src/main/resources")
with dataContent.call("${srcDir}/fine-asm/src/main/java")
with dataContent.call("${srcDir}/fine-asm/src/main/resources")
with dataContent.call("${srcDir}/fine-aspectj/src/main/java")
@ -154,16 +125,6 @@ task copyFiles(type:Copy,dependsOn:'compileJava'){
with dataContent.call("${srcDir}/fine-bouncycastle/src/main/resources")
with dataContent.call("${srcDir}/fine-classmate/src/main/java")
with dataContent.call("${srcDir}/fine-classmate/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-codec/src/main/java")
with dataContent.call("${srcDir}/fine-commons-codec/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-collections4/src/main/java")
with dataContent.call("${srcDir}/fine-commons-collections4/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-io/src/main/java")
with dataContent.call("${srcDir}/fine-commons-io/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-lang3/src/main/java")
with dataContent.call("${srcDir}/fine-commons-lang3/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-logging/src/main/java")
with dataContent.call("${srcDir}/fine-commons-logging/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-math3/src/main/java")
with dataContent.call("${srcDir}/fine-commons-math3/src/main/resources")
with dataContent.call("${srcDir}/fine-commons-pool/src/main/java")
@ -172,58 +133,36 @@ task copyFiles(type:Copy,dependsOn:'compileJava'){
with dataContent.call("${srcDir}/fine-cssparser/src/main/resources")
with dataContent.call("${srcDir}/fine-freehep/src/main/java")
with dataContent.call("${srcDir}/fine-freehep/src/main/resources")
with dataContent.call("${srcDir}/fine-guava/src/main/java")
with dataContent.call("${srcDir}/fine-guava/src/main/resources")
with dataContent.call("${srcDir}/fine-hsqldb/src/main/java")
with dataContent.call("${srcDir}/fine-hsqldb/src/main/resources")
with dataContent.call("${srcDir}/fine-icu4j/src/main/java")
with dataContent.call("${srcDir}/fine-icu4j/src/main/resources")
with dataContent.call("${srcDir}/fine-j2v8/src/main/java")
with dataContent.call("${srcDir}/fine-j2v8/src/main/resources")
with dataContent.call("${srcDir}/fine-jackson/src/main/java")
with dataContent.call("${srcDir}/fine-jackson/src/main/resources")
// with dataContent.call("${srcDir}/fine-jai/src/main/java")
with dataContent.call("${srcDir}/fine-jboss-transaction-api/src/main/java")
with dataContent.call("${srcDir}/fine-jboss-transaction-api/src/main/resources")
with dataContent.call("${srcDir}/fine-jetbrains/src/main/java")
with dataContent.call("${srcDir}/fine-jetbrains/src/main/resources")
with dataContent.call("${srcDir}/fine-jgit/src/main/java")
with dataContent.call("${srcDir}/fine-jgit/src/main/resources")
with dataContent.call("${srcDir}/fine-jna/jna/src/main/resources")
with dataContent.call("${srcDir}/fine-jna/jna/src/main/java")
with dataContent.call("${srcDir}/fine-jna/jna-platform/src/main/java")
with dataContent.call("${srcDir}/fine-jna/jna-platform/src/main/resources")
with dataContent.call("${srcDir}/fine-joda/src/main/java")
with dataContent.call("${srcDir}/fine-joda/src/main/resources")
with dataContent.call("${srcDir}/fine-jodd/src/main/java")
with dataContent.call("${srcDir}/fine-jodd/src/main/resources")
with dataContent.call("${srcDir}/fine-jpa/src/main/java")
with dataContent.call("${srcDir}/fine-jpa/src/main/resources")
// with dataContent.call("${srcDir}/fine-lookandfeel/src/main/java")
with dataContent.call("${srcDir}/fine-lucene/src/main/java")
with dataContent.call("${srcDir}/fine-lucene/src/main/resources")
with dataContent.call("${srcDir}/fine-lz4/src/main/java")
with dataContent.call("${srcDir}/fine-lz4/src/main/resources")
with dataContent.call("${srcDir}/fine-objenesis/src/main/java")
with dataContent.call("${srcDir}/fine-objenesis/src/main/resources")
with dataContent.call("${srcDir}/fine-org-dom4j/src/main/java")
with dataContent.call("${srcDir}/fine-org-dom4j/src/main/resources")
with dataContent.call("${srcDir}/fine-roaringbitmap/src/main/java")
with dataContent.call("${srcDir}/fine-roaringbitmap/src/main/resources")
with dataContent.call("${srcDir}/fine-sense4/src/main/java")
with dataContent.call("${srcDir}/fine-sense4/src/main/resources")
with dataContent.call("${srcDir}/fine-third-default/fine-mail/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-mail/src/main/resources")
with dataContent.call("${srcDir}/fine-third-default/fine-javax-cdi/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-javax-jms/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-sun-misc/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-javax-inject/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-javax-interceptor/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-zip-tools/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-javax-transaction/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-slf4j-api/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-sjsxp/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-stax/src/main/java")
with dataContent.call("${srcDir}/fine-third-default/fine-sun-jpeg/src/main/java")
with dataContent.call("${srcDir}/fine-third-jdk8/xml-apis/src/main/java")
with dataContent.call("${srcDir}/fine-third-jdk11/fine-activation/src/main/java")

15
build.third_step2-jdk11.gradle

@ -47,7 +47,6 @@ sourceSets{
"${srcDir}/fine-httpcomponents/http-core/httpcore-nio/src/main/java-deprecated",
"${srcDir}/fine-jai/src/main/java",
"${srcDir}/fine-kryo/src/main/java",
"${srcDir}/fine-log4j/src/main/java",
"${srcDir}/fine-poi/src/main/java",
"${srcDir}/fine-poi-old/src/main/java",
"${srcDir}/fine-redisson/src/main/java",
@ -87,8 +86,6 @@ def resourceDirs = [
"${srcDir}/fine-jai/src/main/resources",
"${srcDir}/fine-kryo/src/main/java",
"${srcDir}/fine-kryo/src/main/resources",
"${srcDir}/fine-log4j/src/main/java",
"${srcDir}/fine-log4j/src/main/resources",
"${srcDir}/fine-poi/src/main/java",
"${srcDir}/fine-poi/src/main/resources",
"${srcDir}/fine-poi-old/src/main/java",
@ -109,6 +106,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/2')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -117,8 +115,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-byte-buddy/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-commons-fileupload/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-httpcomponents/http-client/lib",include:'**/*.jar')

23
build.third_step2.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/2'
@ -40,7 +40,6 @@ sourceSets{
"${srcDir}/fine-httpcomponents/http-core/httpcore-nio/src/main/java-deprecated",
"${srcDir}/fine-jai/src/main/java",
"${srcDir}/fine-kryo/src/main/java",
"${srcDir}/fine-log4j/src/main/java",
"${srcDir}/fine-poi/src/main/java",
"${srcDir}/fine-poi-old/src/main/java",
"${srcDir}/fine-redisson/src/main/java",
@ -52,20 +51,32 @@ sourceSets{
}
sourceSets.main.output.classesDir = file('build/classes/2')
sourceSets.main.java.outputDir = file('build/classes/2')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build.*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-byte-buddy/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-commons-fileupload/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-httpcomponents/http-client/lib",include:'**/*.jar')
@ -124,8 +135,6 @@ task copyFiles(type:Copy,dependsOn:'compileJava'){
with dataContent.call("${srcDir}/fine-jai/src/main/resources")
with dataContent.call("${srcDir}/fine-kryo/src/main/java")
with dataContent.call("${srcDir}/fine-kryo/src/main/resources")
with dataContent.call("${srcDir}/fine-log4j/src/main/java")
with dataContent.call("${srcDir}/fine-log4j/src/main/resources")
with dataContent.call("${srcDir}/fine-poi/src/main/java")
with dataContent.call("${srcDir}/fine-poi/src/main/resources")
with dataContent.call("${srcDir}/fine-poi-old/src/main/java")

12
build.third_step3-jdk11.gradle

@ -59,6 +59,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/3')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -67,8 +68,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-jboss-logging/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")

21
build.third_step3.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/3'
@ -33,21 +33,34 @@ sourceSets{
}
}
sourceSets.main.output.classesDir = file('build/classes/3')
sourceSets.main.java.outputDir = file('build/classes/3')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build.*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-jboss-logging/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")

12
build.third_step4-jdk11.gradle

@ -57,6 +57,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/4')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -65,8 +66,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
// compile fileTree(dir:"${srcDir}/fine-hibernate/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-spring/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')

20
build.third_step4.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/4'
@ -33,21 +33,33 @@ sourceSets{
}
sourceSets.main.output.classesDir = file('build/classes/4')
sourceSets.main.java.outputDir = file('build/classes/4')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build.*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
// compile fileTree(dir:"${srcDir}/fine-hibernate/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-spring/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')

12
build.third_step5-jdk11.gradle

@ -49,6 +49,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/5')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -57,8 +58,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-hibernate/lib",include:'**/*.jar')
// compile fileTree(dir:"${srcDir}/fine-spring/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')

20
build.third_step5.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/5'
@ -33,21 +33,33 @@ sourceSets{
}
sourceSets.main.output.classesDir = file('build/classes/5')
sourceSets.main.java.outputDir = file('build/classes/5')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build.*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-hibernate/lib",include:'**/*.jar')
// compile fileTree(dir:"${srcDir}/fine-spring/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')

12
build.third_step6-jdk11.gradle

@ -47,6 +47,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/6')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -55,8 +56,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-druid/lib",include:'ojdbc7-12.1.0.jar')
compile fileTree(dir:"${srcDir}/fine-druid/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-socketio/lib",include:'**/*.jar')

20
build.third_step6.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/6'
@ -31,21 +31,33 @@ sourceSets{
}
}
sourceSets.main.output.classesDir = file('build/classes/6')
sourceSets.main.java.outputDir = file('build/classes/6')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build.*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-druid/lib",include:'ojdbc7-12.1.0.jar')
compile fileTree(dir:"${srcDir}/fine-druid/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/fine-socketio/lib",include:'**/*.jar')

16
build.third_step7-jdk11.gradle

@ -25,7 +25,6 @@ sourceSets{
main{
java{
srcDirs=[
"${srcDir}/fine-quartz/src/main/java"
]
}
}
@ -33,8 +32,6 @@ sourceSets{
}
def resourceDirs = [
"${srcDir}/fine-quartz/src/main/java",
"${srcDir}/fine-quartz/src/main/resources"
]
sourceSets.main.java.outputDir = file('build/classes/7')
@ -45,6 +42,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/7')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -53,9 +51,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compile fileTree(dir:"${srcDir}/fine-quartz/lib",include:'**/*.jar')
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")
compile group: "com.fr.third.server", name: "servlet-api", version: "3.0"

24
build.third_step7.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/7'
@ -25,29 +25,39 @@ sourceSets{
main{
java{
srcDirs=[
"${srcDir}/fine-quartz/src/main/java",
"${srcDir}/fine-quartz-old/src/main/java"
]
}
}
}
sourceSets.main.output.classesDir = file('build/classes/7')
sourceSets.main.java.outputDir = file('build/classes/7')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compile fileTree(dir:"${srcDir}/fine-quartz/lib",include:'**/*.jar')
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")
compile group: "com.fr.third.server", name: "servlet-api", version: "3.0"
@ -69,8 +79,6 @@ def dataContent ={def dir ->
task copyFiles(type:Copy,dependsOn:'compileJava'){
copy{
println "------------------------------------------------copyfiles"
with dataContent.call("${srcDir}/fine-quartz/src/main/java")
with dataContent.call("${srcDir}/fine-quartz/src/main/resources")
with dataContent.call("${srcDir}/fine-quartz-old/src/main/java")
with dataContent.call("${srcDir}/fine-quartz-old/src/main/resources")
into "${classesDir}"

12
build.third_step8-jdk11.gradle

@ -44,6 +44,7 @@ sourceSets.main.output.resourcesDir = file('build/resource/8')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
@ -52,8 +53,19 @@ def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.s
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-ehcache/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")

20
build.third_step8.gradle

@ -6,9 +6,9 @@ tasks.withType(JavaCompile){
}
//
sourceCompatibility=1.7
sourceCompatibility=1.8
//class版本
targetCompatibility=1.7
targetCompatibility=1.8
def jarname="fine-third-10.0.jar"
def classesDir='build/classes/8'
@ -31,21 +31,33 @@ sourceSets{
}
}
sourceSets.main.output.classesDir = file('build/classes/8')
sourceSets.main.java.outputDir = file('build/classes/8')
repositories{
mavenCentral()
maven { url "http://mvn.finedevelop.com/repository/maven-public/" }
maven { url "http://mvn.finedevelop.com/repository/fanruan/" }
}
//
FileTree files =fileTree(dir:'./',include:'build*.gradle')
def buildDir=files[0].path.substring(0,files[0].path.lastIndexOf (java.io.File.separator))
def branchName=buildDir.substring(buildDir.lastIndexOf (java.io.File.separator)+1)
def MVN_BRANCH = branchVariable.toUpperCase()
// def MVN_BRANCH = branchVariable.toUpperCase()
def indexV = branchName.indexOf( "%2F");
if(indexV != -1){
version= branchName.substring(indexV+3, branchName.length()).toUpperCase()
} else {
version= branchName
}
// third的feature对应essential和cbb的releasemaster
String essentialBranch = version == 'FEATURE' ? 'RELEASE' : 'MASTER'
//
dependencies{
compileOnly "com.fr.essential:fine-essential:1.0-${essentialBranch}-SNAPSHOT"
compile fileTree(dir:"${srcDir}/fine-ehcache/lib",include:'**/*.jar')
compile fileTree(dir:"${srcDir}/build/libs/",include:'**/*.jar')
compile fileTree(dir:System.getenv("JAVA_HOME"),include:"lib/tools.jar")

6
fine-antlr-old/.gitignore vendored

@ -1,6 +0,0 @@
*.iml
.idea/
.DS_Store
.project
.classpath
*.gradle

2
fine-antlr-old/README.md

@ -1,2 +0,0 @@
1.`fine-antlr-old`是为了与`fine-antlr4`区别开,<br>
2.源码地址https://cloud.finedevelop.com/projects/PF/repos/thirdtools/browse

18
fine-antlr-old/pom.xml

@ -1,18 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.fr.third</groupId>
<artifactId>step1</artifactId>
<version>${revision}</version>
<relativePath>../base-third-project/base-third-step1</relativePath>
</parent>
<artifactId>fine-antlr-old</artifactId>
<version>${revision}</version>
</project>

26
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRError.java

@ -1,26 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRError.java#2 $
*/
public class ANTLRError extends Error {
/**
* ANTLRError constructor comment.
*/
public ANTLRError() {
super();
}
/**
* ANTLRError constructor comment.
* @param s java.lang.String
*/
public ANTLRError(String s) {
super(s);
}
}

27
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRException.java

@ -1,27 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRException.java#2 $
*/
public class ANTLRException extends Exception {
public ANTLRException() {
super();
}
public ANTLRException(String s) {
super(s);
}
public ANTLRException(String message, Throwable cause) {
super(message, cause);
}
public ANTLRException(Throwable cause) {
super(cause);
}
}

124
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRGrammarParseBehavior.java

@ -1,124 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRGrammarParseBehavior.java#2 $
*/
import com.fr.third.antlr.collections.impl.BitSet;
public interface ANTLRGrammarParseBehavior {
public void abortGrammar();
public void beginAlt(boolean doAST_);
public void beginChildList();
// Exception handling
public void beginExceptionGroup();
public void beginExceptionSpec(Token label);
public void beginSubRule(Token label, Token start, boolean not);
// Trees
public void beginTree(Token tok) throws SemanticException;
public void defineRuleName(Token r, String access, boolean ruleAST, String docComment) throws SemanticException;
public void defineToken(Token tokname, Token tokliteral);
public void endAlt();
public void endChildList();
public void endExceptionGroup();
public void endExceptionSpec();
public void endGrammar();
public void endOptions();
public void endRule(String r);
public void endSubRule();
public void endTree();
public void hasError();
public void noASTSubRule();
public void oneOrMoreSubRule();
public void optionalSubRule();
public void refAction(Token action);
public void refArgAction(Token action);
public void setUserExceptions(String thr);
public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule);
public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule);
public void refElementOption(Token option, Token value);
public void refTokensSpecElementOption(Token tok, Token option, Token value);
public void refExceptionHandler(Token exTypeAndName, Token action);
public void refHeaderAction(Token name, Token act);
public void refInitAction(Token action);
public void refMemberAction(Token act);
public void refPreambleAction(Token act);
public void refReturnAction(Token returnAction);
public void refRule(Token idAssign, Token r, Token label, Token arg, int autoGenType);
public void refSemPred(Token pred);
public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule);
public void refToken(Token assignId, Token t, Token label, Token args,
boolean inverted, int autoGenType, boolean lastInRule);
public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule);
// Tree specifiers
public void refTreeSpecifier(Token treeSpec);
public void refWildcard(Token t, Token label, int autoGenType);
public void setArgOfRuleRef(Token argaction);
public void setCharVocabulary(BitSet b);
// Options
public void setFileOption(Token key, Token value, String filename);
public void setGrammarOption(Token key, Token value);
public void setRuleOption(Token key, Token value);
public void setSubruleOption(Token key, Token value);
public void startLexer(String file, Token name, String superClass, String doc);
// Flow control for grammars
public void startParser(String file, Token name, String superClass, String doc);
public void startTreeWalker(String file, Token name, String superClass, String doc);
public void synPred();
public void zeroOrMoreSubRule();
}

107
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRHashString.java

@ -1,107 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRHashString.java#2 $
*/
// class implements a String-like object whose sole purpose is to be
// entered into a lexer HashTable. It uses a lexer object to get
// information about case sensitivity.
public class ANTLRHashString {
// only one of s or buf is non-null
private String s;
private char[] buf;
private int len;
private CharScanner lexer;
private static final int prime = 151;
public ANTLRHashString(char[] buf, int length, CharScanner lexer) {
this.lexer = lexer;
setBuffer(buf, length);
}
// Hash strings constructed this way are unusable until setBuffer or setString are called.
public ANTLRHashString(CharScanner lexer) {
this.lexer = lexer;
}
public ANTLRHashString(String s, CharScanner lexer) {
this.lexer = lexer;
setString(s);
}
private final char charAt(int index) {
return (s != null) ? s.charAt(index) : buf[index];
}
// Return true if o is an ANTLRHashString equal to this.
public boolean equals(Object o) {
if (!(o instanceof ANTLRHashString) && !(o instanceof String)) {
return false;
}
ANTLRHashString s;
if (o instanceof String) {
s = new ANTLRHashString((String)o, lexer);
}
else {
s = (ANTLRHashString)o;
}
int l = length();
if (s.length() != l) {
return false;
}
if (lexer.getCaseSensitiveLiterals()) {
for (int i = 0; i < l; i++) {
if (charAt(i) != s.charAt(i)) {
return false;
}
}
}
else {
for (int i = 0; i < l; i++) {
if (lexer.toLower(charAt(i)) != lexer.toLower(s.charAt(i))) {
return false;
}
}
}
return true;
}
public int hashCode() {
int hashval = 0;
int l = length();
if (lexer.getCaseSensitiveLiterals()) {
for (int i = 0; i < l; i++) {
hashval = hashval * prime + charAt(i);
}
}
else {
for (int i = 0; i < l; i++) {
hashval = hashval * prime + lexer.toLower(charAt(i));
}
}
return hashval;
}
private final int length() {
return (s != null) ? s.length() : len;
}
public void setBuffer(char[] buf, int length) {
this.buf = buf;
this.len = length;
s = null;
}
public void setString(String s) {
this.s = s;
buf = null;
}
}

1451
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRLexer.java

File diff suppressed because it is too large Load Diff

2961
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRParser.java

File diff suppressed because it is too large Load Diff

82
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRStringBuffer.java

@ -1,82 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ANTLRStringBuffer.java#2 $
*/
// Implementation of a StringBuffer-like object that does not have the
// unfortunate side-effect of creating Strings with very large buffers.
public class ANTLRStringBuffer {
protected char[] buffer = null;
protected int length = 0; // length and also where to store next char
public ANTLRStringBuffer() {
buffer = new char[50];
}
public ANTLRStringBuffer(int n) {
buffer = new char[n];
}
public final void append(char c) {
// This would normally be an "ensureCapacity" method, but inlined
// here for speed.
if (length >= buffer.length) {
// Compute a new length that is at least double old length
int newSize = buffer.length;
while (length >= newSize) {
newSize *= 2;
}
// Allocate new array and copy buffer
char[] newBuffer = new char[newSize];
for (int i = 0; i < length; i++) {
newBuffer[i] = buffer[i];
}
buffer = newBuffer;
}
buffer[length] = c;
length++;
}
public final void append(String s) {
for (int i = 0; i < s.length(); i++) {
append(s.charAt(i));
}
}
public final char charAt(int index) {
return buffer[index];
}
final public char[] getBuffer() {
return buffer;
}
public final int length() {
return length;
}
public final void setCharAt(int index, char ch) {
buffer[index] = ch;
}
public final void setLength(int newLength) {
if (newLength < length) {
length = newLength;
}
else {
while (newLength > length) {
append('\0');
}
}
}
public final String toString() {
return new String(buffer, 0, length);
}
}

647
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefLexer.java

@ -1,647 +0,0 @@
// $ANTLR : "tokdef.g" -> "ANTLRTokdefLexer.java"$
package com.fr.third.antlr;
import java.io.InputStream;
import java.io.Reader;
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.BitSet;
public class ANTLRTokdefLexer extends com.fr.third.antlr.CharScanner implements ANTLRTokdefParserTokenTypes, TokenStream
{
public ANTLRTokdefLexer(InputStream in) {
this(new ByteBuffer(in));
}
public ANTLRTokdefLexer(Reader in) {
this(new CharBuffer(in));
}
public ANTLRTokdefLexer(InputBuffer ib) {
this(new LexerSharedInputState(ib));
}
public ANTLRTokdefLexer(LexerSharedInputState state) {
super(state);
caseSensitiveLiterals = true;
setCaseSensitive(true);
literals = new Hashtable();
}
public Token nextToken() throws TokenStreamException {
Token theRetToken=null;
tryAgain:
for (;;) {
Token _token = null;
int _ttype = Token.INVALID_TYPE;
resetText();
try { // for char stream error handling
try { // for lexical error handling
switch ( LA(1)) {
case '\t': case '\n': case '\r': case ' ':
{
mWS(true);
theRetToken=_returnToken;
break;
}
case '(':
{
mLPAREN(true);
theRetToken=_returnToken;
break;
}
case ')':
{
mRPAREN(true);
theRetToken=_returnToken;
break;
}
case '=':
{
mASSIGN(true);
theRetToken=_returnToken;
break;
}
case '"':
{
mSTRING(true);
theRetToken=_returnToken;
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F': case 'G': case 'H':
case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P':
case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z': case 'a': case 'b':
case 'c': case 'd': case 'e': case 'f':
case 'g': case 'h': case 'i': case 'j':
case 'k': case 'l': case 'm': case 'n':
case 'o': case 'p': case 'q': case 'r':
case 's': case 't': case 'u': case 'v':
case 'w': case 'x': case 'y': case 'z':
{
mID(true);
theRetToken=_returnToken;
break;
}
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
{
mINT(true);
theRetToken=_returnToken;
break;
}
default:
if ((LA(1)=='/') && (LA(2)=='/')) {
mSL_COMMENT(true);
theRetToken=_returnToken;
}
else if ((LA(1)=='/') && (LA(2)=='*')) {
mML_COMMENT(true);
theRetToken=_returnToken;
}
else {
if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
}
}
if ( _returnToken==null ) continue tryAgain; // found SKIP token
_ttype = _returnToken.getType();
_returnToken.setType(_ttype);
return _returnToken;
}
catch (RecognitionException e) {
throw new TokenStreamRecognitionException(e);
}
}
catch (CharStreamException cse) {
if ( cse instanceof CharStreamIOException ) {
throw new TokenStreamIOException(((CharStreamIOException)cse).io);
}
else {
throw new TokenStreamException(cse.getMessage());
}
}
}
}
public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = WS;
int _saveIndex;
{
switch ( LA(1)) {
case ' ':
{
match(' ');
break;
}
case '\t':
{
match('\t');
break;
}
case '\r':
{
match('\r');
{
if ((LA(1)=='\n')) {
match('\n');
}
else {
}
}
newline();
break;
}
case '\n':
{
match('\n');
newline();
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
_ttype = Token.SKIP;
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = SL_COMMENT;
int _saveIndex;
match("//");
{
_loop234:
do {
if ((_tokenSet_0.member(LA(1)))) {
{
match(_tokenSet_0);
}
}
else {
break _loop234;
}
} while (true);
}
{
switch ( LA(1)) {
case '\n':
{
match('\n');
break;
}
case '\r':
{
match('\r');
{
if ((LA(1)=='\n')) {
match('\n');
}
else {
}
}
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
_ttype = Token.SKIP; newline();
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ML_COMMENT;
int _saveIndex;
match("/*");
{
_loop239:
do {
if ((LA(1)=='*') && (_tokenSet_1.member(LA(2)))) {
match('*');
matchNot('/');
}
else if ((LA(1)=='\n')) {
match('\n');
newline();
}
else if ((_tokenSet_2.member(LA(1)))) {
matchNot('*');
}
else {
break _loop239;
}
} while (true);
}
match("*/");
_ttype = Token.SKIP;
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = LPAREN;
int _saveIndex;
match('(');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = RPAREN;
int _saveIndex;
match(')');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ASSIGN;
int _saveIndex;
match('=');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = STRING;
int _saveIndex;
match('"');
{
_loop245:
do {
if ((LA(1)=='\\')) {
mESC(false);
}
else if ((_tokenSet_3.member(LA(1)))) {
matchNot('"');
}
else {
break _loop245;
}
} while (true);
}
match('"');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ESC;
int _saveIndex;
match('\\');
{
switch ( LA(1)) {
case 'n':
{
match('n');
break;
}
case 'r':
{
match('r');
break;
}
case 't':
{
match('t');
break;
}
case 'b':
{
match('b');
break;
}
case 'f':
{
match('f');
break;
}
case '"':
{
match('"');
break;
}
case '\'':
{
match('\'');
break;
}
case '\\':
{
match('\\');
break;
}
case '0': case '1': case '2': case '3':
{
{
matchRange('0','3');
}
{
if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
mDIGIT(false);
{
if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
mDIGIT(false);
}
else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
}
else {
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
}
else {
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
break;
}
case '4': case '5': case '6': case '7':
{
{
matchRange('4','7');
}
{
if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
mDIGIT(false);
}
else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
}
else {
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
break;
}
case 'u':
{
match('u');
mXDIGIT(false);
mXDIGIT(false);
mXDIGIT(false);
mXDIGIT(false);
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = DIGIT;
int _saveIndex;
matchRange('0','9');
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = XDIGIT;
int _saveIndex;
switch ( LA(1)) {
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
{
matchRange('0','9');
break;
}
case 'a': case 'b': case 'c': case 'd':
case 'e': case 'f':
{
matchRange('a','f');
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F':
{
matchRange('A','F');
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = ID;
int _saveIndex;
{
switch ( LA(1)) {
case 'a': case 'b': case 'c': case 'd':
case 'e': case 'f': case 'g': case 'h':
case 'i': case 'j': case 'k': case 'l':
case 'm': case 'n': case 'o': case 'p':
case 'q': case 'r': case 's': case 't':
case 'u': case 'v': case 'w': case 'x':
case 'y': case 'z':
{
matchRange('a','z');
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F': case 'G': case 'H':
case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P':
case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z':
{
matchRange('A','Z');
break;
}
default:
{
throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
}
}
}
{
_loop258:
do {
switch ( LA(1)) {
case 'a': case 'b': case 'c': case 'd':
case 'e': case 'f': case 'g': case 'h':
case 'i': case 'j': case 'k': case 'l':
case 'm': case 'n': case 'o': case 'p':
case 'q': case 'r': case 's': case 't':
case 'u': case 'v': case 'w': case 'x':
case 'y': case 'z':
{
matchRange('a','z');
break;
}
case 'A': case 'B': case 'C': case 'D':
case 'E': case 'F': case 'G': case 'H':
case 'I': case 'J': case 'K': case 'L':
case 'M': case 'N': case 'O': case 'P':
case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X':
case 'Y': case 'Z':
{
matchRange('A','Z');
break;
}
case '_':
{
match('_');
break;
}
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
{
matchRange('0','9');
break;
}
default:
{
break _loop258;
}
}
} while (true);
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
int _ttype; Token _token=null; int _begin=text.length();
_ttype = INT;
int _saveIndex;
{
int _cnt261=0;
_loop261:
do {
if (((LA(1) >= '0' && LA(1) <= '9'))) {
mDIGIT(false);
}
else {
if ( _cnt261>=1 ) { break _loop261; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
}
_cnt261++;
} while (true);
}
if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
_token = makeToken(_ttype);
_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
}
_returnToken = _token;
}
private static final long[] mk_tokenSet_0() {
long[] data = new long[8];
data[0]=-9224L;
for (int i = 1; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
private static final long[] mk_tokenSet_1() {
long[] data = new long[8];
data[0]=-140737488355336L;
for (int i = 1; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
private static final long[] mk_tokenSet_2() {
long[] data = new long[8];
data[0]=-4398046512136L;
for (int i = 1; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
private static final long[] mk_tokenSet_3() {
long[] data = new long[8];
data[0]=-17179869192L;
data[1]=-268435457L;
for (int i = 2; i<=3; i++) { data[i]=-1L; }
return data;
}
public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
}

241
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParser.java

@ -1,241 +0,0 @@
// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
package com.fr.third.antlr;
import com.fr.third.antlr.collections.impl.BitSet;
/** Simple lexer/parser for reading token definition files
in support of the import/export vocab option for grammars.
*/
public class ANTLRTokdefParser extends com.fr.third.antlr.LLkParser implements ANTLRTokdefParserTokenTypes
{
// This chunk of error reporting code provided by Brian Smith
private com.fr.third.antlr.Tool antlrTool;
/** In order to make it so existing subclasses don't break, we won't require
* that the antlr.Tool instance be passed as a constructor element. Instead,
* the antlr.Tool instance should register itself via {@link #initTool(com.fr.third.antlr.Tool)}
* @throws IllegalStateException if a tool has already been registered
* @since 2.7.2
*/
public void setTool(com.fr.third.antlr.Tool tool) {
if (antlrTool == null) {
antlrTool = tool;
}
else {
throw new IllegalStateException("antlr.Tool already registered");
}
}
/** @since 2.7.2 */
protected com.fr.third.antlr.Tool getTool() {
return antlrTool;
}
/** Delegates the error message to the tool if any was registered via
* {@link #initTool(com.fr.third.antlr.Tool)}
* @since 2.7.2
*/
public void reportError(String s) {
if (getTool() != null) {
getTool().error(s, getFilename(), -1, -1);
}
else {
super.reportError(s);
}
}
/** Delegates the error message to the tool if any was registered via
* {@link #initTool(com.fr.third.antlr.Tool)}
* @since 2.7.2
*/
public void reportError(RecognitionException e) {
if (getTool() != null) {
getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn());
}
else {
super.reportError(e);
}
}
/** Delegates the warning message to the tool if any was registered via
* {@link #initTool(com.fr.third.antlr.Tool)}
* @since 2.7.2
*/
public void reportWarning(String s) {
if (getTool() != null) {
getTool().warning(s, getFilename(), -1, -1);
}
else {
super.reportWarning(s);
}
}
protected ANTLRTokdefParser(TokenBuffer tokenBuf, int k) {
super(tokenBuf,k);
tokenNames = _tokenNames;
}
public ANTLRTokdefParser(TokenBuffer tokenBuf) {
this(tokenBuf,3);
}
protected ANTLRTokdefParser(TokenStream lexer, int k) {
super(lexer,k);
tokenNames = _tokenNames;
}
public ANTLRTokdefParser(TokenStream lexer) {
this(lexer,3);
}
public ANTLRTokdefParser(ParserSharedInputState state) {
super(state,3);
tokenNames = _tokenNames;
}
public final void file(
ImportVocabTokenManager tm
) throws RecognitionException, TokenStreamException {
Token name = null;
try { // for error handling
name = LT(1);
match(ID);
{
_loop225:
do {
if ((LA(1)==ID||LA(1)==STRING)) {
line(tm);
}
else {
break _loop225;
}
} while (true);
}
}
catch (RecognitionException ex) {
reportError(ex);
consume();
consumeUntil(_tokenSet_0);
}
}
public final void line(
ImportVocabTokenManager tm
) throws RecognitionException, TokenStreamException {
Token s1 = null;
Token lab = null;
Token s2 = null;
Token id = null;
Token para = null;
Token id2 = null;
Token i = null;
Token t=null; Token s=null;
try { // for error handling
{
if ((LA(1)==STRING)) {
s1 = LT(1);
match(STRING);
s = s1;
}
else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==STRING)) {
lab = LT(1);
match(ID);
t = lab;
match(ASSIGN);
s2 = LT(1);
match(STRING);
s = s2;
}
else if ((LA(1)==ID) && (LA(2)==LPAREN)) {
id = LT(1);
match(ID);
t=id;
match(LPAREN);
para = LT(1);
match(STRING);
match(RPAREN);
}
else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==INT)) {
id2 = LT(1);
match(ID);
t=id2;
}
else {
throw new NoViableAltException(LT(1), getFilename());
}
}
match(ASSIGN);
i = LT(1);
match(INT);
Integer value = Integer.valueOf(i.getText());
// if literal found, define as a string literal
if ( s!=null ) {
tm.define(s.getText(), value.intValue());
// if label, then label the string and map label to token symbol also
if ( t!=null ) {
StringLiteralSymbol sl =
(StringLiteralSymbol) tm.getTokenSymbol(s.getText());
sl.setLabel(t.getText());
tm.mapToTokenSymbol(t.getText(), sl);
}
}
// define token (not a literal)
else if ( t!=null ) {
tm.define(t.getText(), value.intValue());
if ( para!=null ) {
TokenSymbol ts = tm.getTokenSymbol(t.getText());
ts.setParaphrase(
para.getText()
);
}
}
}
catch (RecognitionException ex) {
reportError(ex);
consume();
consumeUntil(_tokenSet_1);
}
}
public static final String[] _tokenNames = {
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"ID",
"STRING",
"ASSIGN",
"LPAREN",
"RPAREN",
"INT",
"WS",
"SL_COMMENT",
"ML_COMMENT",
"ESC",
"DIGIT",
"XDIGIT"
};
private static final long[] mk_tokenSet_0() {
long[] data = { 2L, 0L};
return data;
}
public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
private static final long[] mk_tokenSet_1() {
long[] data = { 50L, 0L};
return data;
}
public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
}

18
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokdefParserTokenTypes.java

@ -1,18 +0,0 @@
// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
package com.fr.third.antlr;
public interface ANTLRTokdefParserTokenTypes {
int EOF = 1;
int NULL_TREE_LOOKAHEAD = 3;
int ID = 4;
int STRING = 5;
int ASSIGN = 6;
int LPAREN = 7;
int RPAREN = 8;
int INT = 9;
int WS = 10;
int SL_COMMENT = 11;
int ML_COMMENT = 12;
int ESC = 13;
int DIGIT = 14;
int XDIGIT = 15;
}

68
fine-antlr-old/src/main/java/com/fr/third/antlr/ANTLRTokenTypes.java

@ -1,68 +0,0 @@
// $ANTLR 2.7.3rc3: "antlr.g" -> "ANTLRLexer.java"$
package com.fr.third.antlr;
public interface ANTLRTokenTypes {
int EOF = 1;
int NULL_TREE_LOOKAHEAD = 3;
int LITERAL_tokens = 4;
int LITERAL_header = 5;
int STRING_LITERAL = 6;
int ACTION = 7;
int DOC_COMMENT = 8;
int LITERAL_lexclass = 9;
int LITERAL_class = 10;
int LITERAL_extends = 11;
int LITERAL_Lexer = 12;
int LITERAL_TreeParser = 13;
int OPTIONS = 14;
int ASSIGN = 15;
int SEMI = 16;
int RCURLY = 17;
int LITERAL_charVocabulary = 18;
int CHAR_LITERAL = 19;
int INT = 20;
int OR = 21;
int RANGE = 22;
int TOKENS = 23;
int TOKEN_REF = 24;
int OPEN_ELEMENT_OPTION = 25;
int CLOSE_ELEMENT_OPTION = 26;
int LPAREN = 27;
int RPAREN = 28;
int LITERAL_Parser = 29;
int LITERAL_protected = 30;
int LITERAL_public = 31;
int LITERAL_private = 32;
int BANG = 33;
int ARG_ACTION = 34;
int LITERAL_returns = 35;
int COLON = 36;
int LITERAL_throws = 37;
int COMMA = 38;
int LITERAL_exception = 39;
int LITERAL_catch = 40;
int RULE_REF = 41;
int NOT_OP = 42;
int SEMPRED = 43;
int TREE_BEGIN = 44;
int QUESTION = 45;
int STAR = 46;
int PLUS = 47;
int IMPLIES = 48;
int CARET = 49;
int WILDCARD = 50;
int LITERAL_options = 51;
int WS = 52;
int COMMENT = 53;
int SL_COMMENT = 54;
int ML_COMMENT = 55;
int ESC = 56;
int DIGIT = 57;
int XDIGIT = 58;
int NESTED_ARG_ACTION = 59;
int NESTED_ACTION = 60;
int WS_LOOP = 61;
int INTERNAL_RULE_REF = 62;
int WS_OPT = 63;
}

411
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTFactory.java

@ -1,411 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTFactory.java#2 $
*/
import java.lang.reflect.Constructor;
import java.util.Hashtable;
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.impl.ASTArray;
/** AST Support code shared by TreeParser and Parser.
* We use delegation to share code (and have only one
* bit of code to maintain) rather than subclassing
* or superclassing (forces AST support code to be
* loaded even when you don't want to do AST stuff).
*
* Typically, setASTNodeType is used to specify the
* homogeneous type of node to create, but you can override
* create to make heterogeneous nodes etc...
*/
public class ASTFactory {
/** Name of AST class to create during tree construction.
* Null implies that the create method should create
* a default AST type such as CommonAST. This is for
* homogeneous nodes.
*/
protected String theASTNodeType = null;
protected Class theASTNodeTypeClass = null;
/** How to specify the classname to create for a particular
* token type. Note that ANTLR allows you to say, for example,
*
tokens {
PLUS<AST=PLUSNode>;
...
}
*
* and it tracks everything statically. #[PLUS] will make you
* a PLUSNode w/o use of this table.
*
* For tokens that ANTLR cannot track statically like #[i],
* you can use this table to map PLUS (Integer) -> PLUSNode (Class)
* etc... ANTLR sets the class map from the tokens {...} section
* via the ASTFactory(Hashtable) ctor in antlr.Parser.
*/
protected Hashtable tokenTypeToASTClassMap = null;
public ASTFactory() {
}
/** Create factory with a specific mapping from token type
* to Java AST node type. Your subclasses of ASTFactory
* can override and reuse the map stuff.
*/
public ASTFactory(Hashtable tokenTypeToClassMap) {
setTokenTypeToASTClassMap(tokenTypeToClassMap);
}
/** Specify an "override" for the Java AST object created for a
* specific token. It is provided as a convenience so
* you can specify node types dynamically. ANTLR sets
* the token type mapping automatically from the tokens{...}
* section, but you can change that mapping with this method.
* ANTLR does it's best to statically determine the node
* type for generating parsers, but it cannot deal with
* dynamic values like #[LT(1)]. In this case, it relies
* on the mapping. Beware differences in the tokens{...}
* section and what you set via this method. Make sure
* they are the same.
*
* Set className to null to remove the mapping.
*
* @since 2.7.2
*/
public void setTokenTypeASTNodeType(int tokenType, String className)
throws IllegalArgumentException
{
if ( tokenTypeToASTClassMap==null ) {
tokenTypeToASTClassMap = new Hashtable();
}
if ( className==null ) {
tokenTypeToASTClassMap.remove(new Integer(tokenType));
return;
}
Class c = null;
try {
c = Utils.loadClass(className);
tokenTypeToASTClassMap.put(new Integer(tokenType), c);
}
catch (Exception e) {
throw new IllegalArgumentException("Invalid class, "+className);
}
}
/** For a given token type, what is the AST node object type to create
* for it?
* @since 2.7.2
*/
public Class getASTNodeType(int tokenType) {
// try node specific class
if ( tokenTypeToASTClassMap!=null ) {
Class c = (Class)tokenTypeToASTClassMap.get(new Integer(tokenType));
if ( c!=null ) {
return c;
}
}
// try a global specified class
if (theASTNodeTypeClass != null) {
return theASTNodeTypeClass;
}
// default to the common type
return CommonAST.class;
}
/** Add a child to the current AST */
public void addASTChild(ASTPair currentAST, AST child) {
if (child != null) {
if (currentAST.root == null) {
// Make new child the current root
currentAST.root = child;
}
else {
if (currentAST.child == null) {
// Add new child to current root
currentAST.root.setFirstChild(child);
}
else {
currentAST.child.setNextSibling(child);
}
}
// Make new child the current child
currentAST.child = child;
currentAST.advanceChildToEnd();
}
}
/** Create a new empty AST node; if the user did not specify
* an AST node type, then create a default one: CommonAST.
*/
public AST create() {
return create(Token.INVALID_TYPE);
}
public AST create(int type) {
Class c = getASTNodeType(type);
AST t = create(c);
if ( t!=null ) {
t.initialize(type, "");
}
return t;
}
public AST create(int type, String txt) {
AST t = create(type);
if ( t!=null ) {
t.initialize(type, txt);
}
return t;
}
/** Create an AST node with the token type and text passed in, but
* with a specific Java object type. Typically called when you
* say @[PLUS,"+",PLUSNode] in an antlr action.
* @since 2.7.2
*/
public AST create(int type, String txt, String className) {
AST t = create(className);
if ( t!=null ) {
t.initialize(type, txt);
}
return t;
}
/** Create a new empty AST node; if the user did not specify
* an AST node type, then create a default one: CommonAST.
*/
public AST create(AST tr) {
if (tr == null) return null; // create(null) == null
AST t = create(tr.getType());
if ( t!=null ) {
t.initialize(tr);
}
return t;
}
public AST create(Token tok) {
AST t = create(tok.getType());
if ( t!=null ) {
t.initialize(tok);
}
return t;
}
/** ANTLR generates reference to this when you reference a token
* that has a specified heterogeneous AST node type. This is
* also a special case node creation routine for backward
* compatibility. Before, ANTLR generated "new T(tokenObject)"
* and so I must call the appropriate constructor not T().
*
* @since 2.7.2
*/
public AST create(Token tok, String className) {
AST t = createUsingCtor(tok,className);
return t;
}
/**
* @since 2.7.2
*/
public AST create(String className) {
Class c = null;
try {
c = Utils.loadClass(className);
}
catch (Exception e) {
throw new IllegalArgumentException("Invalid class, "+className);
}
return create(c);
}
/**
* @since 2.7.2
*/
protected AST createUsingCtor(Token token, String className) {
Class c = null;
AST t = null;
try {
c = Utils.loadClass(className);
Class[] tokenArgType = new Class[] { com.fr.third.antlr.Token.class };
try {
Constructor ctor = c.getConstructor(tokenArgType);
t = (AST)ctor.newInstance(new Object[]{token}); // make a new one
}
catch (NoSuchMethodException e){
// just do the regular thing if you can't find the ctor
// Your AST must have default ctor to use this.
t = create(c);
if ( t!=null ) {
t.initialize(token);
}
}
}
catch (Exception e) {
throw new IllegalArgumentException("Invalid class or can't make instance, "+className);
}
return t;
}
/**
* @since 2.7.2
*/
protected AST create(Class c) {
AST t = null;
try {
t = (AST)c.newInstance(); // make a new one
}
catch (Exception e) {
error("Can't create AST Node " + c.getName());
return null;
}
return t;
}
/** Copy a single node with same Java AST objec type.
* Ignore the tokenType->Class mapping since you know
* the type of the node, t.getClass(), and doing a dup.
*
* clone() is not used because we want all AST creation
* to go thru the factory so creation can be
* tracked. Returns null if t is null.
*/
public AST dup(AST t) {
if ( t==null ) {
return null;
}
AST dup_t = create(t.getClass());
dup_t.initialize(t);
return dup_t;
}
/** Duplicate tree including siblings of root. */
public AST dupList(AST t) {
AST result = dupTree(t); // if t == null, then result==null
AST nt = result;
while (t != null) { // for each sibling of the root
t = t.getNextSibling();
nt.setNextSibling(dupTree(t)); // dup each subtree, building new tree
nt = nt.getNextSibling();
}
return result;
}
/**Duplicate a tree, assuming this is a root node of a tree--
* duplicate that node and what's below; ignore siblings of root node.
*/
public AST dupTree(AST t) {
AST result = dup(t); // make copy of root
// copy all children of root.
if (t != null) {
result.setFirstChild(dupList(t.getFirstChild()));
}
return result;
}
/** Make a tree from a list of nodes. The first element in the
* array is the root. If the root is null, then the tree is
* a simple list not a tree. Handles null children nodes correctly.
* For example, build(a, b, null, c) yields tree (a b c). build(null,a,b)
* yields tree (nil a b).
*/
public AST make(AST[] nodes) {
if (nodes == null || nodes.length == 0) return null;
AST root = nodes[0];
AST tail = null;
if (root != null) {
root.setFirstChild(null); // don't leave any old pointers set
}
// link in children;
for (int i = 1; i < nodes.length; i++) {
if (nodes[i] == null) continue; // ignore null nodes
if (root == null) {
// Set the root and set it up for a flat list
root = tail = nodes[i];
}
else if (tail == null) {
root.setFirstChild(nodes[i]);
tail = root.getFirstChild();
}
else {
tail.setNextSibling(nodes[i]);
tail = tail.getNextSibling();
}
// Chase tail to last sibling
while (tail.getNextSibling() != null) {
tail = tail.getNextSibling();
}
}
return root;
}
/** Make a tree from a list of nodes, where the nodes are contained
* in an ASTArray object
*/
public AST make(ASTArray nodes) {
return make(nodes.array);
}
/** Make an AST the root of current AST */
public void makeASTRoot(ASTPair currentAST, AST root) {
if (root != null) {
// Add the current root as a child of new root
root.addChild(currentAST.root);
// The new current child is the last sibling of the old root
currentAST.child = currentAST.root;
currentAST.advanceChildToEnd();
// Set the new root
currentAST.root = root;
}
}
public void setASTNodeClass(Class c) {
if ( c!=null ) {
theASTNodeTypeClass = c;
theASTNodeType = c.getName();
}
}
public void setASTNodeClass(String t) {
theASTNodeType = t;
try {
theASTNodeTypeClass = Utils.loadClass(t); // get class def
}
catch (Exception e) {
// either class not found,
// class is interface/abstract, or
// class or initializer is not accessible.
error("Can't find/access AST Node type" + t);
}
}
/** Specify the type of node to create during tree building.
* @deprecated since 2.7.1
*/
public void setASTNodeType(String t) {
setASTNodeClass(t);
}
public Hashtable getTokenTypeToASTClassMap() {
return tokenTypeToASTClassMap;
}
public void setTokenTypeToASTClassMap(Hashtable tokenTypeToClassMap) {
this.tokenTypeToASTClassMap = tokenTypeToClassMap;
}
/** To change where error messages go, can subclass/override this method
* and then setASTFactory in Parser and TreeParser. This method removes
* a prior dependency on class antlr.Tool.
*/
public void error(String e) {
System.err.println(e);
}
}

76
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTIterator.java

@ -1,76 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTIterator.java#2 $
*/
import com.fr.third.antlr.collections.AST;
public class ASTIterator {
protected AST cursor = null;
protected AST original = null;
public ASTIterator(AST t) {
original = cursor = t;
}
/** Is 'sub' a subtree of 't' beginning at the root? */
public boolean isSubtree(AST t, AST sub) {
AST sibling;
// the empty tree is always a subset of any tree.
if (sub == null) {
return true;
}
// if the tree is empty, return true if the subtree template is too.
if (t == null) {
// if (sub != null) return false;
// return true;
return false;
}
// Otherwise, start walking sibling lists. First mismatch, return false.
for (sibling = t;
sibling != null && sub != null;
sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) {
// as a quick optimization, check roots first.
if (sibling.getType() != sub.getType()) return false;
// if roots match, do full match test on children.
if (sibling.getFirstChild() != null) {
if (!isSubtree(sibling.getFirstChild(), sub.getFirstChild())) return false;
}
}
return true;
}
/** Find the next subtree with structure and token types equal to
* those of 'template'.
*/
public AST next(AST template) {
AST t = null;
AST sibling = null;
if (cursor == null) { // do nothing if no tree to work on
return null;
}
// Start walking sibling list looking for subtree matches.
for (; cursor != null; cursor = cursor.getNextSibling()) {
// as a quick optimization, check roots first.
if (cursor.getType() == template.getType()) {
// if roots match, do full match test on children.
if (cursor.getFirstChild() != null) {
if (isSubtree(cursor.getFirstChild(), template.getFirstChild())) {
return cursor;
}
}
}
}
return t;
}
}

108
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTNULLType.java

@ -1,108 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTNULLType.java#2 $
*/
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.ASTEnumeration;
/** There is only one instance of this class **/
public class ASTNULLType implements AST {
public void addChild(AST c) {
}
public boolean equals(AST t) {
return false;
}
public boolean equalsList(AST t) {
return false;
}
public boolean equalsListPartial(AST t) {
return false;
}
public boolean equalsTree(AST t) {
return false;
}
public boolean equalsTreePartial(AST t) {
return false;
}
public ASTEnumeration findAll(AST tree) {
return null;
}
public ASTEnumeration findAllPartial(AST subtree) {
return null;
}
public AST getFirstChild() {
return this;
}
public AST getNextSibling() {
return this;
}
public String getText() {
return "<ASTNULL>";
}
public int getType() {
return Token.NULL_TREE_LOOKAHEAD;
}
public int getLine() {
return 0;
}
public int getColumn() {
return 0;
}
public int getNumberOfChildren() {
return 0;
}
public void initialize(int t, String txt) {
}
public void initialize(AST t) {
}
public void initialize(Token t) {
}
public void setFirstChild(AST c) {
}
public void setNextSibling(AST n) {
}
public void setText(String text) {
}
public void setType(int ttype) {
}
public String toString() {
return getText();
}
public String toStringList() {
return getText();
}
public String toStringTree() {
return getText();
}
}

43
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTPair.java

@ -1,43 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTPair.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** ASTPair: utility class used for manipulating a pair of ASTs
* representing the current AST root and current AST sibling.
* This exists to compensate for the lack of pointers or 'var'
* arguments in Java.
*/
public class ASTPair {
public AST root; // current root of tree
public AST child; // current child to which siblings are added
/** Make sure that child is the last sibling */
public final void advanceChildToEnd() {
if (child != null) {
while (child.getNextSibling() != null) {
child = child.getNextSibling();
}
}
}
/** Copy an ASTPair. Don't call it clone() because we want type-safety */
public ASTPair copy() {
ASTPair tmp = new ASTPair();
tmp.root = root;
tmp.child = child;
return tmp;
}
public String toString() {
String r = root == null ? "null" : root.getText();
String c = child == null ? "null" : child.getText();
return "[" + r + "," + c + "]";
}
}

14
fine-antlr-old/src/main/java/com/fr/third/antlr/ASTVisitor.java

@ -1,14 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ASTVisitor.java#2 $
*/
import com.fr.third.antlr.collections.AST;
public interface ASTVisitor {
public void visit(AST node);
}

35
fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/ASDebugStream.java

@ -1,35 +0,0 @@
package com.fr.third.antlr.ASdebug;
import com.fr.third.antlr.Token;
import com.fr.third.antlr.TokenStream;
/**
* Default implementation of <code>IASDebugStream</code> methods.
* @author Prashant Deva
*/
public final class ASDebugStream
{
public static String getEntireText(TokenStream stream)
{
if (stream instanceof IASDebugStream)
{
IASDebugStream dbgStream = (IASDebugStream) stream;
return dbgStream.getEntireText();
}
return null;
}
public static TokenOffsetInfo getOffsetInfo(TokenStream stream, Token token)
{
if (stream instanceof IASDebugStream)
{
IASDebugStream dbgStream = (IASDebugStream) stream;
return dbgStream.getOffsetInfo(token);
}
return null;
}
}

24
fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/IASDebugStream.java

@ -1,24 +0,0 @@
package com.fr.third.antlr.ASdebug;
import com.fr.third.antlr.Token;
/**
* Provides information used by the 'Input Text' view
* of Antlr Studio.
* @author Prashant Deva
*/
public interface IASDebugStream
{
/**
* Returns the entire text input to the lexer.
* @return The entire text or <code>null</code>, if error occured or System.in was used.
*/
String getEntireText();
/**
* Returns the offset information for the token
* @param token the token whose information need to be retrieved
* @return offset info, or <code>null</code>
*/
TokenOffsetInfo getOffsetInfo(Token token);
}

22
fine-antlr-old/src/main/java/com/fr/third/antlr/ASdebug/TokenOffsetInfo.java

@ -1,22 +0,0 @@
package com.fr.third.antlr.ASdebug;
/**
* Provides offset info for a token.<br>
* All offsets are 0-based.
* @author Prashant Deva
*/
public class TokenOffsetInfo
{
public final int beginOffset, length;
public TokenOffsetInfo(int offset, int length)
{
this.beginOffset = offset;
this.length = length;
}
public int getEndOffset()
{
return beginOffset+length-1;
}
}

33
fine-antlr-old/src/main/java/com/fr/third/antlr/ActionElement.java

@ -1,33 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ActionElement.java#2 $
*/
class ActionElement extends AlternativeElement {
protected String actionText;
protected boolean isSemPred = false;
public ActionElement(Grammar g, Token t) {
super(g);
actionText = t.getText();
line = t.getLine();
column = t.getColumn();
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
return " " + actionText + (isSemPred?"?":"");
}
}

23
fine-antlr-old/src/main/java/com/fr/third/antlr/ActionTransInfo.java

@ -1,23 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ActionTransInfo.java#2 $
*/
/**
* This class contains information about how an action
* was translated (using the AST conversion rules).
*/
public class ActionTransInfo {
public boolean assignToRoot = false; // somebody did a "#rule = "
public String refRuleRoot = null; // somebody referenced #rule; string is translated var
public String followSetName = null; // somebody referenced $FOLLOW; string is the name of the lookahead set
public String toString() {
return "assignToRoot:" + assignToRoot + ", refRuleRoot:"
+ refRuleRoot + ", FOLLOW Set:" + followSetName;
}
}

73
fine-antlr-old/src/main/java/com/fr/third/antlr/Alternative.java

@ -1,73 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Alternative.java#2 $
*/
/** Intermediate data class holds information about an alternative */
class Alternative {
// Tracking alternative linked list
AlternativeElement head; // head of alt element list
AlternativeElement tail; // last element added
// Syntactic predicate block if non-null
protected SynPredBlock synPred;
// Semantic predicate action if non-null
protected String semPred;
// Exception specification if non-null
protected ExceptionSpec exceptionSpec;
// Init action if non-null;
protected Lookahead[] cache; // lookahead for alt. Filled in by
// deterministic() only!!!!!!! Used for
// code gen after calls to deterministic()
// and used by deterministic for (...)*, (..)+,
// and (..)? blocks. 1..k
protected int lookaheadDepth; // each alt has different look depth possibly.
// depth can be NONDETERMINISTIC too.
// 0..n-1
// If non-null, Tree specification ala -> A B C (not implemented)
protected Token treeSpecifier = null;
// True of AST generation is on for this alt
private boolean doAutoGen;
public Alternative() {
}
public Alternative(AlternativeElement firstElement) {
addElement(firstElement);
}
public void addElement(AlternativeElement e) {
// Link the element into the list
if (head == null) {
head = tail = e;
}
else {
tail.next = e;
tail = e;
}
}
public boolean atStart() {
return head == null;
}
public boolean getAutoGen() {
// Don't build an AST if there is a tree-rewrite-specifier
return doAutoGen && treeSpecifier == null;
}
public Token getTreeSpecifier() {
return treeSpecifier;
}
public void setAutoGen(boolean doAutoGen_) {
doAutoGen = doAutoGen_;
}
}

226
fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeBlock.java

@ -1,226 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/AlternativeBlock.java#2 $
*/
import com.fr.third.antlr.collections.impl.Vector;
/**A list of alternatives */
class AlternativeBlock extends AlternativeElement {
protected String initAction = null; // string for init action {...}
protected Vector alternatives; // Contains Alternatives
protected String label; // can label a looping block to break out of it.
protected int alti, altj; // which alts are being compared at the moment with
// deterministic()?
protected int analysisAlt; // which alt are we computing look on? Must be alti or altj
protected boolean hasAnAction = false; // does any alt have an action?
protected boolean hasASynPred = false; // does any alt have a syntactic predicate?
protected int ID = 0; // used to generate unique variables
protected static int nblks; // how many blocks have we allocated?
boolean not = false; // true if block is inverted.
boolean greedy = true; // Blocks are greedy by default
boolean greedySet = false; // but, if not explicitly greedy, warning might be generated
protected boolean doAutoGen = true; // false if no AST (or text) to be generated for block
protected boolean warnWhenFollowAmbig = true; // warn when an empty path or exit path
protected boolean generateAmbigWarnings = true; // the general warning "shut-up" mechanism
// conflicts with alt of subrule.
// Turning this off will suppress stuff
// like the if-then-else ambig.
public AlternativeBlock(Grammar g) {
super(g);
alternatives = new Vector(5);
this.not = false;
nblks++;
ID = nblks;
}
public AlternativeBlock(Grammar g, Token start, boolean not) {
super(g, start);
alternatives = new Vector(5);
// this.line = start.getLine();
// this.column = start.getColumn();
this.not = not;
nblks++;
ID = nblks;
}
public void addAlternative(Alternative alt) {
alternatives.appendElement(alt);
}
public void generate() {
grammar.generator.gen(this);
}
public Alternative getAlternativeAt(int i) {
return (Alternative)alternatives.elementAt(i);
}
public Vector getAlternatives() {
return alternatives;
}
public boolean getAutoGen() {
return doAutoGen;
}
public String getInitAction() {
return initAction;
}
public String getLabel() {
return label;
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void prepareForAnalysis() {
for (int i = 0; i < alternatives.size(); i++) {
// deterministic() uses an alternative cache and sets lookahead depth
Alternative a = (Alternative)alternatives.elementAt(i);
a.cache = new Lookahead[grammar.maxk + 1];
a.lookaheadDepth = GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT;
}
}
/**Walk the syntactic predicate and, for a rule ref R, remove
* the ref from the list of FOLLOW references for R (stored
* in the symbol table.
*/
public void removeTrackingOfRuleRefs(Grammar g) {
for (int i = 0; i < alternatives.size(); i++) {
Alternative alt = getAlternativeAt(i);
AlternativeElement elem = alt.head;
while (elem != null) {
if (elem instanceof RuleRefElement) {
RuleRefElement rr = (RuleRefElement)elem;
RuleSymbol rs = (RuleSymbol)g.getSymbol(rr.targetRule);
if (rs == null) {
grammar.antlrTool.error("rule " + rr.targetRule + " referenced in (...)=>, but not defined");
}
else {
rs.references.removeElement(rr);
}
}
else if (elem instanceof AlternativeBlock) {// recurse into subrules
((AlternativeBlock)elem).removeTrackingOfRuleRefs(g);
}
elem = elem.next;
}
}
}
public void setAlternatives(Vector v) {
alternatives = v;
}
public void setAutoGen(boolean doAutoGen_) {
doAutoGen = doAutoGen_;
}
public void setInitAction(String initAction_) {
initAction = initAction_;
}
public void setLabel(String label_) {
label = label_;
}
public void setOption(Token key, Token value) {
if (key.getText().equals("warnWhenFollowAmbig")) {
if (value.getText().equals("true")) {
warnWhenFollowAmbig = true;
}
else if (value.getText().equals("false")) {
warnWhenFollowAmbig = false;
}
else {
grammar.antlrTool.error("Value for warnWhenFollowAmbig must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else if (key.getText().equals("generateAmbigWarnings")) {
if (value.getText().equals("true")) {
generateAmbigWarnings = true;
}
else if (value.getText().equals("false")) {
generateAmbigWarnings = false;
}
else {
grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else if (key.getText().equals("greedy")) {
if (value.getText().equals("true")) {
greedy = true;
greedySet = true;
}
else if (value.getText().equals("false")) {
greedy = false;
greedySet = true;
}
else {
grammar.antlrTool.error("Value for greedy must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
}
}
else {
grammar.antlrTool.error("Invalid subrule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn());
}
}
public String toString() {
String s = " (";
if (initAction != null) {
s += initAction;
}
for (int i = 0; i < alternatives.size(); i++) {
Alternative alt = getAlternativeAt(i);
Lookahead cache[] = alt.cache;
int k = alt.lookaheadDepth;
// dump lookahead set
if (k == GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT) {
}
else if (k == GrammarAnalyzer.NONDETERMINISTIC) {
s += "{?}:";
}
else {
s += " {";
for (int j = 1; j <= k; j++) {
s += cache[j].toString(",", grammar.tokenManager.getVocabulary());
if (j < k && cache[j + 1] != null) s += ";";
}
s += "}:";
}
// dump alternative including pred (if any)
AlternativeElement p = alt.head;
String pred = alt.semPred;
if (pred != null) {
s += pred;
}
while (p != null) {
s += p;
p = p.next;
}
if (i < (alternatives.size() - 1)) {
s += " |";
}
}
s += " )";
return s;
}
}

43
fine-antlr-old/src/main/java/com/fr/third/antlr/AlternativeElement.java

@ -1,43 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/AlternativeElement.java#2 $
*/
abstract class AlternativeElement extends GrammarElement {
AlternativeElement next;
protected int autoGenType = AUTO_GEN_NONE;
protected String enclosingRuleName;
public AlternativeElement(Grammar g) {
super(g);
}
public AlternativeElement(Grammar g, Token start) {
super(g, start);
}
public AlternativeElement(Grammar g, Token start, int autoGenType_) {
super(g, start);
autoGenType = autoGenType_;
}
public int getAutoGenType() {
return autoGenType;
}
public void setAutoGenType(int a) {
autoGenType = a;
}
public String getLabel() {
return null;
}
public void setLabel(String label) {
}
}

495
fine-antlr-old/src/main/java/com/fr/third/antlr/BaseAST.java

@ -1,495 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BaseAST.java#2 $
*/
import java.io.Serializable;
import java.io.IOException;
import java.io.Writer;
import com.fr.third.antlr.collections.AST;
import com.fr.third.antlr.collections.ASTEnumeration;
import com.fr.third.antlr.collections.impl.ASTEnumerator;
import com.fr.third.antlr.collections.impl.Vector;
/**
* A Child-Sibling Tree.
*
* A tree with PLUS at the root and with two children 3 and 4 is
* structured as:
*
* PLUS
* |
* 3 -- 4
*
* and can be specified easily in LISP notation as
*
* (PLUS 3 4)
*
* where every '(' starts a new subtree.
*
* These trees are particular useful for translators because of
* the flexibility of the children lists. They are also very easy
* to walk automatically, whereas trees with specific children
* reference fields can't easily be walked automatically.
*
* This class contains the basic support for an AST.
* Most people will create ASTs that are subclasses of
* BaseAST or of CommonAST.
*/
public abstract class BaseAST implements AST, Serializable {
protected BaseAST down;
protected BaseAST right;
private static boolean verboseStringConversion = false;
private static String[] tokenNames = null;
/**Add a node to the end of the child list for this node */
public void addChild(AST node) {
if (node == null) return;
BaseAST t = this.down;
if (t != null) {
while (t.right != null) {
t = t.right;
}
t.right = (BaseAST)node;
}
else {
this.down = (BaseAST)node;
}
}
/** How many children does this node have? */
public int getNumberOfChildren() {
BaseAST t = this.down;
int n = 0;
if (t != null) {
n = 1;
while (t.right != null) {
t = t.right;
n++;
}
return n;
}
return n;
}
private static void doWorkForFindAll(AST nodeToSearch,
Vector v,
AST target,
boolean partialMatch)
{
// Start walking sibling lists, looking for matches.
for (AST sibling = nodeToSearch; sibling != null; sibling = sibling
.getNextSibling()) {
if ((partialMatch && sibling.equalsTreePartial(target))
|| (!partialMatch && sibling.equalsTree(target))) {
v.appendElement(sibling);
}
// regardless of match or not, check any children for matches
if (sibling.getFirstChild() != null) {
doWorkForFindAll(sibling.getFirstChild(), v, target, partialMatch);
}
}
}
/** Is node t equal to this in terms of token type and text? */
public boolean equals(AST t) {
if (t == null) return false;
if ( (this.getText()==null && t.getText()!=null) ||
(this.getText()!=null && t.getText()==null) )
{
return false;
}
if ( this.getText()==null && t.getText()==null ) {
return this.getType() == t.getType();
}
return this.getText().equals(t.getText()) &&
this.getType() == t.getType();
}
/** Is t an exact structural and equals() match of this tree. The
* 'this' reference is considered the start of a sibling list.
*/
public boolean equalsList(AST t) {
AST sibling;
// the empty tree is not a match of any non-null tree.
if (t == null) {
return false;
}
// Otherwise, start walking sibling lists. First mismatch, return false.
for (sibling = this;
sibling != null && t != null;
sibling = sibling.getNextSibling(), t = t.getNextSibling())
{
// as a quick optimization, check roots first.
if (!sibling.equals(t)) {
return false;
}
// if roots match, do full list match test on children.
if (sibling.getFirstChild() != null) {
if (!sibling.getFirstChild().equalsList(t.getFirstChild())) {
return false;
}
}
// sibling has no kids, make sure t doesn't either
else if (t.getFirstChild() != null) {
return false;
}
}
if (sibling == null && t == null) {
return true;
}
// one sibling list has more than the other
return false;
}
/** Is 'sub' a subtree of this list?
* The siblings of the root are NOT ignored.
*/
public boolean equalsListPartial(AST sub) {
AST sibling;
// the empty tree is always a subset of any tree.
if (sub == null) {
return true;
}
// Otherwise, start walking sibling lists. First mismatch, return false.
for (sibling = this;
sibling != null && sub != null;
sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) {
// as a quick optimization, check roots first.
if (!sibling.equals(sub)) return false;
// if roots match, do partial list match test on children.
if (sibling.getFirstChild() != null) {
if (!sibling.getFirstChild().equalsListPartial(sub.getFirstChild())) return false;
}
}
if (sibling == null && sub != null) {
// nothing left to match in this tree, but subtree has more
return false;
}
// either both are null or sibling has more, but subtree doesn't
return true;
}
/** Is tree rooted at 'this' equal to 't'? The siblings
* of 'this' are ignored.
*/
public boolean equalsTree(AST t) {
// check roots first.
if (!this.equals(t)) return false;
// if roots match, do full list match test on children.
if (this.getFirstChild() != null) {
if (!this.getFirstChild().equalsList(t.getFirstChild())) return false;
}
// sibling has no kids, make sure t doesn't either
else if (t.getFirstChild() != null) {
return false;
}
return true;
}
/** Is 't' a subtree of the tree rooted at 'this'? The siblings
* of 'this' are ignored.
*/
public boolean equalsTreePartial(AST sub) {
// the empty tree is always a subset of any tree.
if (sub == null) {
return true;
}
// check roots first.
if (!this.equals(sub)) return false;
// if roots match, do full list partial match test on children.
if (this.getFirstChild() != null) {
if (!this.getFirstChild().equalsListPartial(sub.getFirstChild())) return false;
}
return true;
}
/** Walk the tree looking for all exact subtree matches. Return
* an ASTEnumerator that lets the caller walk the list
* of subtree roots found herein.
*/
public ASTEnumeration findAll(AST target) {
Vector roots = new Vector(10);
AST sibling;
// the empty tree cannot result in an enumeration
if (target == null) {
return null;
}
doWorkForFindAll(this, roots, target, false); // find all matches recursively
return new ASTEnumerator(roots);
}
/** Walk the tree looking for all subtrees. Return
* an ASTEnumerator that lets the caller walk the list
* of subtree roots found herein.
*/
public ASTEnumeration findAllPartial(AST sub) {
Vector roots = new Vector(10);
AST sibling;
// the empty tree cannot result in an enumeration
if (sub == null) {
return null;
}
doWorkForFindAll(this, roots, sub, true); // find all matches recursively
return new ASTEnumerator(roots);
}
/** Get the first child of this node; null if not children */
public AST getFirstChild() {
return down;
}
/** Get the next sibling in line after this one */
public AST getNextSibling() {
return right;
}
/** Get the token text for this node */
public String getText() {
return "";
}
/** Get the token type for this node */
public int getType() {
return 0;
}
public int getLine() {
return 0;
}
public int getColumn() {
return 0;
}
public abstract void initialize(int t, String txt);
public abstract void initialize(AST t);
public abstract void initialize(Token t);
/** Remove all children */
public void removeChildren() {
down = null;
}
public void setFirstChild(AST c) {
down = (BaseAST)c;
}
public void setNextSibling(AST n) {
right = (BaseAST)n;
}
/** Set the token text for this node */
public void setText(String text) {
}
/** Set the token type for this node */
public void setType(int ttype) {
}
public static void setVerboseStringConversion(boolean verbose, String[] names) {
verboseStringConversion = verbose;
tokenNames = names;
}
/** Return an array of strings that maps token ID to it's text. @since 2.7.3 */
public static String[] getTokenNames() {
return tokenNames;
}
public String toString() {
StringBuffer b = new StringBuffer();
// if verbose and type name not same as text (keyword probably)
if (verboseStringConversion &&
getText() != null &&
!getText().equalsIgnoreCase(tokenNames[getType()]) &&
!getText().equalsIgnoreCase(StringUtils.stripFrontBack(tokenNames[getType()], "\"", "\""))) {
b.append('[');
b.append(getText());
b.append(",<");
b.append(tokenNames[getType()]);
b.append(">]");
return b.toString();
}
return getText();
}
/** Print out a child-sibling tree in LISP notation */
public String toStringList() {
AST t = this;
String ts = "";
if (t.getFirstChild() != null) ts += " (";
ts += " " + this.toString();
if (t.getFirstChild() != null) {
ts += ((BaseAST)t.getFirstChild()).toStringList();
}
if (t.getFirstChild() != null) ts += " )";
if (t.getNextSibling() != null) {
ts += ((BaseAST)t.getNextSibling()).toStringList();
}
return ts;
}
public String toStringTree() {
AST t = this;
String ts = "";
if (t.getFirstChild() != null) ts += " (";
ts += " " + this.toString();
if (t.getFirstChild() != null) {
ts += ((BaseAST)t.getFirstChild()).toStringList();
}
if (t.getFirstChild() != null) ts += " )";
return ts;
}
public static String decode(String text) {
char c, c1, c2, c3, c4, c5;
StringBuffer n = new StringBuffer();
for (int i = 0; i < text.length(); i++) {
c = text.charAt(i);
if (c == '&') {
c1 = text.charAt(i + 1);
c2 = text.charAt(i + 2);
c3 = text.charAt(i + 3);
c4 = text.charAt(i + 4);
c5 = text.charAt(i + 5);
if (c1 == 'a' && c2 == 'm' && c3 == 'p' && c4 == ';') {
n.append('&');
i += 5;
}
else if (c1 == 'l' && c2 == 't' && c3 == ';') {
n.append('<');
i += 4;
}
else if (c1 == 'g' && c2 == 't' && c3 == ';') {
n.append('>');
i += 4;
}
else if (c1 == 'q' && c2 == 'u' && c3 == 'o' &&
c4 == 't' && c5 == ';') {
n.append('"');
i += 6;
}
else if (c1 == 'a' && c2 == 'p' && c3 == 'o' &&
c4 == 's' && c5 == ';') {
n.append('\'');
i += 6;
}
else
n.append('&');
}
else
n.append(c);
}
return new String(n);
}
public static String encode(String text) {
char c;
StringBuffer n = new StringBuffer();
for (int i = 0; i < text.length(); i++) {
c = text.charAt(i);
switch (c) {
case '&':
{
n.append("&amp;");
break;
}
case '<':
{
n.append("&lt;");
break;
}
case '>':
{
n.append("&gt;");
break;
}
case '"':
{
n.append("&quot;");
break;
}
case '\'':
{
n.append("&apos;");
break;
}
default :
{
n.append(c);
break;
}
}
}
return new String(n);
}
public void xmlSerializeNode(Writer out)
throws IOException {
StringBuffer buf = new StringBuffer(100);
buf.append('<');
buf.append(getClass().getName() + " ");
buf.append("text=\"" + encode(getText()) + "\" type=\"" +
getType() + "\"/>");
out.write(buf.toString());
}
public void xmlSerializeRootOpen(Writer out)
throws IOException {
StringBuffer buf = new StringBuffer(100);
buf.append('<');
buf.append(getClass().getName() + " ");
buf.append("text=\"" + encode(getText()) + "\" type=\"" +
getType() + "\">\n");
out.write(buf.toString());
}
public void xmlSerializeRootClose(Writer out)
throws IOException {
out.write("</" + getClass().getName() + ">\n");
}
public void xmlSerialize(Writer out) throws IOException {
// print out this node and all siblings
for (AST node = this;
node != null;
node = node.getNextSibling()) {
if (node.getFirstChild() == null) {
// print guts (class name, attributes)
((BaseAST)node).xmlSerializeNode(out);
}
else {
((BaseAST)node).xmlSerializeRootOpen(out);
// print children
((BaseAST)node.getFirstChild()).xmlSerialize(out);
// print end tag
((BaseAST)node).xmlSerializeRootClose(out);
}
}
}
}

32
fine-antlr-old/src/main/java/com/fr/third/antlr/BlockContext.java

@ -1,32 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BlockContext.java#2 $
*/
/**BlockContext stores the information needed when creating an
* alternative (list of elements). Entering a subrule requires
* that we save this state as each block of alternatives
* requires state such as "tail of current alternative."
*/
class BlockContext {
AlternativeBlock block; // current block of alternatives
int altNum; // which alt are we accepting 0..n-1
BlockEndElement blockEnd; // used if nested
public void addAlternativeElement(AlternativeElement e) {
currentAlt().addElement(e);
}
public Alternative currentAlt() {
return (Alternative)block.alternatives.elementAt(altNum);
}
public AlternativeElement currentElement() {
return currentAlt().tail;
}
}

31
fine-antlr-old/src/main/java/com/fr/third/antlr/BlockEndElement.java

@ -1,31 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BlockEndElement.java#2 $
*/
/**All alternative blocks are "terminated" by BlockEndElements unless
* they are rule blocks (in which case they use RuleEndElement).
*/
class BlockEndElement extends AlternativeElement {
protected boolean[] lock; // for analysis; used to avoid infinite loops
protected AlternativeBlock block;// ending blocks know what block they terminate
public BlockEndElement(Grammar g) {
super(g);
lock = new boolean[g.maxk + 1];
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public String toString() {
//return " [BlkEnd]";
return "";
}
}

24
fine-antlr-old/src/main/java/com/fr/third/antlr/BlockWithImpliedExitPath.java

@ -1,24 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/BlockWithImpliedExitPath.java#2 $
*/
abstract class BlockWithImpliedExitPath extends AlternativeBlock {
protected int exitLookaheadDepth; // lookahead needed to handle optional path
/** lookahead to bypass block; set
* by deterministic(). 1..k of Lookahead
*/
protected Lookahead[] exitCache = new Lookahead[grammar.maxk + 1];
public BlockWithImpliedExitPath(Grammar g) {
super(g);
}
public BlockWithImpliedExitPath(Grammar g, Token start) {
super(g, start, false);
}
}

53
fine-antlr-old/src/main/java/com/fr/third/antlr/ByteBuffer.java

@ -1,53 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ByteBuffer.java#2 $
*/
/**A Stream of characters fed to the lexer from a InputStream that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input characters. Normally,
* "k" characters are stored in the buffer. More characters may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of characters is deferred. In other words, reading the next
* character is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see antlr.CharQueue
*/
// SAS: added this class to handle Binary input w/ FileInputStream
import java.io.IOException;
import java.io.InputStream;
public class ByteBuffer extends InputBuffer {
// char source
public transient InputStream input;
/** Create a character buffer */
public ByteBuffer(InputStream input_) {
super();
input = input_;
}
/** Ensure that the character buffer is sufficiently full */
public void fill(int amount) throws CharStreamException {
try {
syncConsume();
// Fill the buffer sufficiently to hold needed characters
while (queue.nbrEntries < amount + markerOffset) {
// Append the next character
queue.append((char)input.read());
}
}
catch (IOException io) {
throw new CharStreamIOException(io);
}
}
}

53
fine-antlr-old/src/main/java/com/fr/third/antlr/CharBuffer.java

@ -1,53 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharBuffer.java#2 $
*/
/**A Stream of characters fed to the lexer from a InputStream that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input characters. Normally,
* "k" characters are stored in the buffer. More characters may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of characters is deferred. In other words, reading the next
* character is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see antlr.CharQueue
*/
import java.io.IOException;
import java.io.Reader;
// SAS: Move most functionality into InputBuffer -- just the file-specific
// stuff is in here
public class CharBuffer extends InputBuffer {
// char source
public transient Reader input;
/** Create a character buffer */
public CharBuffer(Reader input_) { // SAS: for proper text i/o
super();
input = input_;
}
/** Ensure that the character buffer is sufficiently full */
public void fill(int amount) throws CharStreamException {
try {
syncConsume();
// Fill the buffer sufficiently to hold needed characters
while (queue.nbrEntries < amount + markerOffset) {
// Append the next character
queue.append((char)input.read());
}
}
catch (IOException io) {
throw new CharStreamIOException(io);
}
}
}

23
fine-antlr-old/src/main/java/com/fr/third/antlr/CharFormatter.java

@ -1,23 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharFormatter.java#2 $
*/
/** Interface used by BitSet to format elements of the set when
* converting to string
*/
public interface CharFormatter {
public String escapeChar(int c, boolean forCharLiteral);
public String escapeString(String s);
public String literalChar(int c);
public String literalString(String s);
}

29
fine-antlr-old/src/main/java/com/fr/third/antlr/CharLiteralElement.java

@ -1,29 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharLiteralElement.java#2 $
*/
class CharLiteralElement extends GrammarAtom {
public CharLiteralElement(LexerGrammar g, Token t, boolean inverted, int autoGenType) {
super(g, t, AUTO_GEN_NONE);
tokenType = ANTLRLexer.tokenTypeForCharLiteral(t.getText());
g.charVocabulary.add(tokenType);
line = t.getLine();
not = inverted;
this.autoGenType = autoGenType;
}
public void generate() {
grammar.generator.gen(this);
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
}

95
fine-antlr-old/src/main/java/com/fr/third/antlr/CharQueue.java

@ -1,95 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharQueue.java#2 $
*/
/** A circular buffer object used by CharBuffer */
public class CharQueue {
/** Physical circular buffer of tokens */
protected char[] buffer;
/** buffer.length-1 for quick modulos */
private int sizeLessOne;
/** physical index of front token */
private int offset;
/** number of tokens in the queue */
protected int nbrEntries;
public CharQueue(int minSize) {
// Find first power of 2 >= to requested size
int size;
if ( minSize<0 ) {
init(16); // pick some value for them
return;
}
// check for overflow
if ( minSize>=(Integer.MAX_VALUE/2) ) {
init(Integer.MAX_VALUE); // wow that's big.
return;
}
for (size = 2; size < minSize; size *= 2) {
}
init(size);
}
/** Add token to end of the queue
* @param tok The token to add
*/
public final void append(char tok) {
if (nbrEntries == buffer.length) {
expand();
}
buffer[(offset + nbrEntries) & sizeLessOne] = tok;
nbrEntries++;
}
/** Fetch a token from the queue by index
* @param idx The index of the token to fetch, where zero is the token at the front of the queue
*/
public final char elementAt(int idx) {
return buffer[(offset + idx) & sizeLessOne];
}
/** Expand the token buffer by doubling its capacity */
private final void expand() {
char[] newBuffer = new char[buffer.length * 2];
// Copy the contents to the new buffer
// Note that this will store the first logical item in the
// first physical array element.
for (int i = 0; i < buffer.length; i++) {
newBuffer[i] = elementAt(i);
}
// Re-initialize with new contents, keep old nbrEntries
buffer = newBuffer;
sizeLessOne = buffer.length - 1;
offset = 0;
}
/** Initialize the queue.
* @param size The initial size of the queue
*/
public void init(int size) {
// Allocate buffer
buffer = new char[size];
// Other initialization
sizeLessOne = size - 1;
offset = 0;
nbrEntries = 0;
}
/** Clear the queue. Leaving the previous buffer alone.
*/
public final void reset() {
offset = 0;
nbrEntries = 0;
}
/** Remove char from front of queue */
public final void removeFirst() {
offset = (offset + 1) & sizeLessOne;
nbrEntries--;
}
}

54
fine-antlr-old/src/main/java/com/fr/third/antlr/CharRangeElement.java

@ -1,54 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharRangeElement.java#2 $
*/
class CharRangeElement extends AlternativeElement {
String label;
protected char begin = 0;
protected char end = 0;
protected String beginText;
protected String endText;
public CharRangeElement(LexerGrammar g, Token t1, Token t2, int autoGenType) {
super(g);
begin = (char)ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
beginText = t1.getText();
end = (char)ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
endText = t2.getText();
line = t1.getLine();
// track which characters are referenced in the grammar
for (int i = begin; i <= end; i++) {
g.charVocabulary.add(i);
}
this.autoGenType = autoGenType;
}
public void generate() {
grammar.generator.gen(this);
}
public String getLabel() {
return label;
}
public Lookahead look(int k) {
return grammar.theLLkAnalyzer.look(k, this);
}
public void setLabel(String label_) {
label = label_;
}
public String toString() {
if (label != null)
return " " + label + ":" + beginText + ".." + endText;
else
return " " + beginText + ".." + endText;
}
}

409
fine-antlr-old/src/main/java/com/fr/third/antlr/CharScanner.java

@ -1,409 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharScanner.java#2 $
*/
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.BitSet;
public abstract class CharScanner implements TokenStream {
static final char NO_CHAR = 0;
public static final char EOF_CHAR = (char)-1;
protected ANTLRStringBuffer text; // text of current token
protected boolean saveConsumedInput = true; // does consume() save characters?
protected Class tokenObjectClass; // what kind of tokens to create?
protected boolean caseSensitive = true;
protected boolean caseSensitiveLiterals = true;
protected Hashtable literals; // set by subclass
/** Tab chars are handled by tab() according to this value; override
* method to do anything weird with tabs.
*/
protected int tabsize = 8;
protected Token _returnToken = null; // used to return tokens w/o using return val.
// Hash string used so we don't new one every time to check literals table
protected ANTLRHashString hashString;
protected LexerSharedInputState inputState;
/** Used during filter mode to indicate that path is desired.
* A subsequent scan error will report an error as usual if
* acceptPath=true;
*/
protected boolean commitToPath = false;
/** Used to keep track of indentdepth for traceIn/Out */
protected int traceDepth = 0;
public CharScanner() {
text = new ANTLRStringBuffer();
hashString = new ANTLRHashString(this);
//TODO alex mod
// setTokenObjectClass("antlr.CommonToken");
setTokenObjectClass("com.fr.third.antlr.CommonToken");
}
public CharScanner(InputBuffer cb) { // SAS: use generic buffer
this();
inputState = new LexerSharedInputState(cb);
}
public CharScanner(LexerSharedInputState sharedState) {
this();
inputState = sharedState;
}
public void append(char c) {
if (saveConsumedInput) {
text.append(c);
}
}
public void append(String s) {
if (saveConsumedInput) {
text.append(s);
}
}
public void commit() {
inputState.input.commit();
}
public void consume() throws CharStreamException {
if (inputState.guessing == 0) {
char c = LA(1);
if (caseSensitive) {
append(c);
}
else {
// use input.LA(), not LA(), to get original case
// CharScanner.LA() would toLower it.
append(inputState.input.LA(1));
}
if (c == '\t') {
tab();
}
else {
inputState.column++;
}
}
inputState.input.consume();
}
/** Consume chars until one matches the given char */
public void consumeUntil(int c) throws CharStreamException {
while (LA(1) != EOF_CHAR && LA(1) != c) {
consume();
}
}
/** Consume chars until one matches the given set */
public void consumeUntil(BitSet set) throws CharStreamException {
while (LA(1) != EOF_CHAR && !set.member(LA(1))) {
consume();
}
}
public boolean getCaseSensitive() {
return caseSensitive;
}
public final boolean getCaseSensitiveLiterals() {
return caseSensitiveLiterals;
}
public int getColumn() {
return inputState.column;
}
public void setColumn(int c) {
inputState.column = c;
}
public boolean getCommitToPath() {
return commitToPath;
}
public String getFilename() {
return inputState.filename;
}
public InputBuffer getInputBuffer() {
return inputState.input;
}
public LexerSharedInputState getInputState() {
return inputState;
}
public void setInputState(LexerSharedInputState state) {
inputState = state;
}
public int getLine() {
return inputState.line;
}
/** return a copy of the current text buffer */
public String getText() {
return text.toString();
}
public Token getTokenObject() {
return _returnToken;
}
public char LA(int i) throws CharStreamException {
if (caseSensitive) {
return inputState.input.LA(i);
}
else {
return toLower(inputState.input.LA(i));
}
}
protected Token makeToken(int t) {
try {
Token tok = (Token)tokenObjectClass.newInstance();
tok.setType(t);
tok.setColumn(inputState.tokenStartColumn);
tok.setLine(inputState.tokenStartLine);
// tracking real start line now: tok.setLine(inputState.line);
return tok;
}
catch (InstantiationException ie) {
panic("can't instantiate token: " + tokenObjectClass);
}
catch (IllegalAccessException iae) {
panic("Token class is not accessible" + tokenObjectClass);
}
return Token.badToken;
}
public int mark() {
return inputState.input.mark();
}
public void match(char c) throws MismatchedCharException, CharStreamException {
if (LA(1) != c) {
throw new MismatchedCharException(LA(1), c, false, this);
}
consume();
}
public void match(BitSet b) throws MismatchedCharException, CharStreamException {
if (!b.member(LA(1))) {
throw new MismatchedCharException(LA(1), b, false, this);
}
else {
consume();
}
}
public void match(String s) throws MismatchedCharException, CharStreamException {
int len = s.length();
for (int i = 0; i < len; i++) {
if (LA(1) != s.charAt(i)) {
throw new MismatchedCharException(LA(1), s.charAt(i), false, this);
}
consume();
}
}
public void matchNot(char c) throws MismatchedCharException, CharStreamException {
if (LA(1) == c) {
throw new MismatchedCharException(LA(1), c, true, this);
}
consume();
}
public void matchRange(char c1, char c2) throws MismatchedCharException, CharStreamException {
if (LA(1) < c1 || LA(1) > c2) throw new MismatchedCharException(LA(1), c1, c2, false, this);
consume();
}
public void newline() {
inputState.line++;
inputState.column = 1;
}
/** advance the current column number by an appropriate amount
* according to tab size. This method is called from consume().
*/
public void tab() {
int c = getColumn();
int nc = ( ((c-1)/tabsize) + 1) * tabsize + 1; // calculate tab stop
setColumn( nc );
}
public void setTabSize( int size ) {
tabsize = size;
}
public int getTabSize() {
return tabsize;
}
/** @see #panic(String)
*/
public void panic() {
System.err.println("CharScanner: panic");
Utils.error("");
}
/** This method is executed by ANTLR internally when it detected an illegal
* state that cannot be recovered from.
* The default implementation of this method calls
* {@link java.lang.System.exit(int)} and writes directly to
* {@link java.lang.System.err)} , which is usually not appropriate when
* a translator is embedded into a larger application. <em>It is highly
* recommended that this method be overridden to handle the error in a
* way appropriate for your application (e.g. throw an unchecked
* exception)</em>.
*/
public void panic(String s) {
System.err.println("CharScanner; panic: " + s);
Utils.error(s);
}
/** Parser error-reporting function can be overridden in subclass */
public void reportError(RecognitionException ex) {
System.err.println(ex);
}
/** Parser error-reporting function can be overridden in subclass */
public void reportError(String s) {
if (getFilename() == null) {
System.err.println("error: " + s);
}
else {
System.err.println(getFilename() + ": error: " + s);
}
}
/** Parser warning-reporting function can be overridden in subclass */
public void reportWarning(String s) {
if (getFilename() == null) {
System.err.println("warning: " + s);
}
else {
System.err.println(getFilename() + ": warning: " + s);
}
}
public void resetText() {
text.setLength(0);
inputState.tokenStartColumn = inputState.column;
inputState.tokenStartLine = inputState.line;
}
public void rewind(int pos) {
inputState.input.rewind(pos);
// RK: should not be here, it is messing up column calculation
// setColumn(inputState.tokenStartColumn);
}
public void setCaseSensitive(boolean t) {
caseSensitive = t;
}
public void setCommitToPath(boolean commit) {
commitToPath = commit;
}
public void setFilename(String f) {
inputState.filename = f;
}
public void setLine(int line) {
inputState.line = line;
}
public void setText(String s) {
resetText();
text.append(s);
}
public void setTokenObjectClass(String cl) {
try {
tokenObjectClass = Utils.loadClass(cl);
}
catch (ClassNotFoundException ce) {
panic("ClassNotFoundException: " + cl);
}
}
// Test the token text against the literals table
// Override this method to perform a different literals test
public int testLiteralsTable(int ttype) {
hashString.setBuffer(text.getBuffer(), text.length());
Integer literalsIndex = (Integer)literals.get(hashString);
if (literalsIndex != null) {
ttype = literalsIndex.intValue();
}
return ttype;
}
/** Test the text passed in against the literals table
* Override this method to perform a different literals test
* This is used primarily when you want to test a portion of
* a token.
*/
public int testLiteralsTable(String text, int ttype) {
ANTLRHashString s = new ANTLRHashString(text, this);
Integer literalsIndex = (Integer)literals.get(s);
if (literalsIndex != null) {
ttype = literalsIndex.intValue();
}
return ttype;
}
// Override this method to get more specific case handling
public char toLower(char c) {
return Character.toLowerCase(c);
}
public void traceIndent() {
for (int i = 0; i < traceDepth; i++)
System.out.print(' ');
}
public void traceIn(String rname) throws CharStreamException {
traceDepth += 1;
traceIndent();
System.out.println("> lexer " + rname + "; c==" + LA(1));
}
public void traceOut(String rname) throws CharStreamException {
traceIndent();
System.out.println("< lexer " + rname + "; c==" + LA(1));
traceDepth -= 1;
}
/** This method is called by YourLexer.nextToken() when the lexer has
* hit EOF condition. EOF is NOT a character.
* This method is not called if EOF is reached during
* syntactic predicate evaluation or during evaluation
* of normal lexical rules, which presumably would be
* an IOException. This traps the "normal" EOF condition.
*
* uponEOF() is called after the complete evaluation of
* the previous token and only if your parser asks
* for another token beyond that last non-EOF token.
*
* You might want to throw token or char stream exceptions
* like: "Heh, premature eof" or a retry stream exception
* ("I found the end of this file, go back to referencing file").
*/
public void uponEOF() throws TokenStreamException, CharStreamException {
}
}

21
fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamException.java

@ -1,21 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharStreamException.java#2 $
*/
/**
* Anything that goes wrong while generating a stream of characters
*/
public class CharStreamException extends ANTLRException {
/**
* CharStreamException constructor comment.
* @param s java.lang.String
*/
public CharStreamException(String s) {
super(s);
}
}

22
fine-antlr-old/src/main/java/com/fr/third/antlr/CharStreamIOException.java

@ -1,22 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CharStreamIOException.java#2 $
*/
import java.io.IOException;
/**
* Wrap an IOException in a CharStreamException
*/
public class CharStreamIOException extends CharStreamException {
public IOException io;
public CharStreamIOException(IOException io) {
super(io.getMessage());
this.io = io;
}
}

663
fine-antlr-old/src/main/java/com/fr/third/antlr/CodeGenerator.java

@ -1,663 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CodeGenerator.java#2 $
*/
import java.io.IOException;
import java.io.PrintWriter;
import com.fr.third.antlr.collections.impl.BitSet;
import com.fr.third.antlr.collections.impl.Vector;
/**A generic ANTLR code generator. All code generators
* Derive from this class.
*
* <p>
* A CodeGenerator knows about a Grammar data structure and
* a grammar analyzer. The Grammar is walked to generate the
* appropriate code for both a parser and lexer (if present).
* This interface may change slightly so that the lexer is
* itself living inside of a Grammar object (in which case,
* this class generates only one recognizer). The main method
* to call is <tt>gen()</tt>, which initiates all code gen.
*
* <p>
* The interaction of the code generator with the analyzer is
* simple: each subrule block calls deterministic() before generating
* code for the block. Method deterministic() sets lookahead caches
* in each Alternative object. Technically, a code generator
* doesn't need the grammar analyzer if all lookahead analysis
* is done at runtime, but this would result in a slower parser.
*
* <p>
* This class provides a set of support utilities to handle argument
* list parsing and so on.
*
* @author Terence Parr, John Lilley
* @version 2.00a
* @see com.fr.third.antlr.JavaCodeGenerator
* @see com.fr.third.antlr.DiagnosticCodeGenerator
* @see com.fr.third.antlr.LLkAnalyzer
* @see com.fr.third.antlr.Grammar
* @see com.fr.third.antlr.AlternativeElement
* @see com.fr.third.antlr.Lookahead
*/
public abstract class CodeGenerator {
protected com.fr.third.antlr.Tool antlrTool;
/** Current tab indentation for code output */
protected int tabs = 0;
/** Current output Stream */
transient protected PrintWriter currentOutput; // SAS: for proper text i/o
/** The grammar for which we generate code */
protected Grammar grammar = null;
/** List of all bitsets that must be dumped. These are Vectors of BitSet. */
protected Vector bitsetsUsed;
/** The grammar behavior */
protected DefineGrammarSymbols behavior;
/** The LLk analyzer */
protected LLkGrammarAnalyzer analyzer;
/** Object used to format characters in the target language.
* subclass must initialize this to the language-specific formatter
*/
protected CharFormatter charFormatter;
/** Use option "codeGenDebug" to generate debugging output */
protected boolean DEBUG_CODE_GENERATOR = false;
/** Default values for code-generation thresholds */
protected static final int DEFAULT_MAKE_SWITCH_THRESHOLD = 2;
protected static final int DEFAULT_BITSET_TEST_THRESHOLD = 4;
/** If there are more than 8 long words to init in a bitset,
* try to optimize it; e.g., detect runs of -1L and 0L.
*/
protected static final int BITSET_OPTIMIZE_INIT_THRESHOLD = 8;
/** This is a hint for the language-specific code generator.
* A switch() or language-specific equivalent will be generated instead
* of a series of if/else statements for blocks with number of alternates
* greater than or equal to this number of non-predicated LL(1) alternates.
* This is modified by the grammar option "codeGenMakeSwitchThreshold"
*/
protected int makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD;
/** This is a hint for the language-specific code generator.
* A bitset membership test will be generated instead of an
* ORed series of LA(k) comparisions for lookahead sets with
* degree greater than or equal to this value.
* This is modified by the grammar option "codeGenBitsetTestThreshold"
*/
protected int bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD;
private static boolean OLD_ACTION_TRANSLATOR = true;
public static String TokenTypesFileSuffix = "TokenTypes";
public static String TokenTypesFileExt = ".txt";
/** Construct code generator base class */
public CodeGenerator() {
}
/** Output a String to the currentOutput stream.
* Ignored if string is null.
* @param s The string to output
*/
protected void _print(String s) {
if (s != null) {
currentOutput.print(s);
}
}
/** Print an action without leading tabs, attempting to
* preserve the current indentation level for multi-line actions
* Ignored if string is null.
* @param s The action string to output
*/
protected void _printAction(String s) {
if (s == null) {
return;
}
// Skip leading newlines, tabs and spaces
int start = 0;
while (start < s.length() && Character.isSpaceChar(s.charAt(start))) {
start++;
}
// Skip leading newlines, tabs and spaces
int end = s.length() - 1;
while (end > start && Character.isSpaceChar(s.charAt(end))) {
end--;
}
char c = 0;
for (int i = start; i <= end;) {
c = s.charAt(i);
i++;
boolean newline = false;
switch (c) {
case '\n':
newline = true;
break;
case '\r':
if (i <= end && s.charAt(i) == '\n') {
i++;
}
newline = true;
break;
default:
currentOutput.print(c);
break;
}
if (newline) {
currentOutput.println();
printTabs();
// Absorb leading whitespace
while (i <= end && Character.isSpaceChar(s.charAt(i))) {
i++;
}
newline = false;
}
}
currentOutput.println();
}
/** Output a String followed by newline, to the currentOutput stream.
* Ignored if string is null.
* @param s The string to output
*/
protected void _println(String s) {
if (s != null) {
currentOutput.println(s);
}
}
/** Test if a set element array represents a contiguous range.
* @param elems The array of elements representing the set, usually from BitSet.toArray().
* @return true if the elements are a contiguous range (with two or more).
*/
public static boolean elementsAreRange(int[] elems) {
if (elems.length == 0) {
return false;
}
int begin = elems[0];
int end = elems[elems.length - 1];
if (elems.length <= 2) {
// Not enough elements for a range expression
return false;
}
if (end - begin + 1 > elems.length) {
// The set does not represent a contiguous range
return false;
}
int v = begin + 1;
for (int i = 1; i < elems.length - 1; i++) {
if (v != elems[i]) {
// The set does not represent a contiguous range
return false;
}
v++;
}
return true;
}
/** Get the identifier portion of an argument-action token.
* The ID of an action is assumed to be a trailing identifier.
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param t The action token
* @return A string containing the text of the identifier
*/
protected String extractIdOfAction(Token t) {
return extractIdOfAction(t.getText(), t.getLine(), t.getColumn());
}
/** Get the identifier portion of an argument-action.
* The ID of an action is assumed to be a trailing identifier.
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param s The action text
* @param line Line used for error reporting.
* @param column Line used for error reporting.
* @return A string containing the text of the identifier
*/
protected String extractIdOfAction(String s, int line, int column) {
s = removeAssignmentFromDeclaration(s);
// Search back from the end for a non alphanumeric. That marks the
// beginning of the identifier
for (int i = s.length() - 2; i >= 0; i--) {
// TODO: make this work for language-independent identifiers?
if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') {
// Found end of type part
return s.substring(i + 1);
}
}
// Something is bogus, but we cannot parse the language-specific
// actions any better. The compiler will have to catch the problem.
antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column);
return "";
}
/** Get the type string out of an argument-action token.
* The type of an action is assumed to precede a trailing identifier
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param t The action token
* @return A string containing the text of the type
*/
protected String extractTypeOfAction(Token t) {
return extractTypeOfAction(t.getText(), t.getLine(), t.getColumn());
}
/** Get the type portion of an argument-action.
* The type of an action is assumed to precede a trailing identifier
* Specific code-generators may want to override this
* if the language has unusual declaration syntax.
* @param s The action text
* @param line Line used for error reporting.
* @return A string containing the text of the type
*/
protected String extractTypeOfAction(String s, int line, int column) {
s = removeAssignmentFromDeclaration(s);
// Search back from the end for a non alphanumeric. That marks the
// beginning of the identifier
for (int i = s.length() - 2; i >= 0; i--) {
// TODO: make this work for language-independent identifiers?
if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') {
// Found end of type part
return s.substring(0, i + 1);
}
}
// Something is bogus, but we cannot parse the language-specific
// actions any better. The compiler will have to catch the problem.
antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column);
return "";
}
/** Generate the code for all grammars
*/
public abstract void gen();
/** Generate code for the given grammar element.
* @param action The {...} action to generate
*/
public abstract void gen(ActionElement action);
/** Generate code for the given grammar element.
* @param blk The "x|y|z|..." block to generate
*/
public abstract void gen(AlternativeBlock blk);
/** Generate code for the given grammar element.
* @param end The block-end element to generate. Block-end
* elements are synthesized by the grammar parser to represent
* the end of a block.
*/
public abstract void gen(BlockEndElement end);
/** Generate code for the given grammar element.
* @param atom The character literal reference to generate
*/
public abstract void gen(CharLiteralElement atom);
/** Generate code for the given grammar element.
* @param r The character-range reference to generate
*/
public abstract void gen(CharRangeElement r);
/** Generate the code for a parser */
public abstract void gen(LexerGrammar g) throws IOException;
/** Generate code for the given grammar element.
* @param blk The (...)+ block to generate
*/
public abstract void gen(OneOrMoreBlock blk);
/** Generate the code for a parser */
public abstract void gen(ParserGrammar g) throws IOException;
/** Generate code for the given grammar element.
* @param rr The rule-reference to generate
*/
public abstract void gen(RuleRefElement rr);
/** Generate code for the given grammar element.
* @param atom The string-literal reference to generate
*/
public abstract void gen(StringLiteralElement atom);
/** Generate code for the given grammar element.
* @param r The token-range reference to generate
*/
public abstract void gen(TokenRangeElement r);
/** Generate code for the given grammar element.
* @param atom The token-reference to generate
*/
public abstract void gen(TokenRefElement atom);
/** Generate code for the given grammar element.
* @param blk The tree to generate code for.
*/
public abstract void gen(TreeElement t);
/** Generate the code for a parser */
public abstract void gen(TreeWalkerGrammar g) throws IOException;
/** Generate code for the given grammar element.
* @param wc The wildcard element to generate
*/
public abstract void gen(WildcardElement wc);
/** Generate code for the given grammar element.
* @param blk The (...)* block to generate
*/
public abstract void gen(ZeroOrMoreBlock blk);
/** Generate the token types as a text file for persistence across shared lexer/parser */
protected void genTokenInterchange(TokenManager tm) throws IOException {
// Open the token output Java file and set the currentOutput stream
String fName = tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt;
currentOutput = antlrTool.openOutputFile(fName);
println("// $ANTLR " + Tool.version + ": " +
antlrTool.fileMinusPath(antlrTool.grammarFile) +
" -> " +
fName +
"$");
tabs = 0;
// Header
println(tm.getName() + " // output token vocab name");
// Generate a definition for each token type
Vector v = tm.getVocabulary();
for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
String s = (String)v.elementAt(i);
if (DEBUG_CODE_GENERATOR) {
System.out.println("gen persistence file entry for: " + s);
}
if (s != null && !s.startsWith("<")) {
// if literal, find label
if (s.startsWith("\"")) {
StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
if (sl != null && sl.label != null) {
print(sl.label + "=");
}
println(s + "=" + i);
}
else {
print(s);
// check for a paraphrase
TokenSymbol ts = (TokenSymbol)tm.getTokenSymbol(s);
if (ts == null) {
antlrTool.warning("undefined token symbol: " + s);
}
else {
if (ts.getParaphrase() != null) {
print("(" + ts.getParaphrase() + ")");
}
}
println("=" + i);
}
}
}
// Close the tokens output file
currentOutput.close();
currentOutput = null;
}
/** Process a string for an simple expression for use in xx/action.g
* it is used to cast simple tokens/references to the right type for
* the generated language.
* @param str A String.
*/
public String processStringForASTConstructor(String str) {
return str;
}
/** Get a string for an expression to generate creation of an AST subtree.
* @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
*/
public abstract String getASTCreateString(Vector v);
/** Get a string for an expression to generate creating of an AST node
* @param str The text of the arguments to the AST construction
*/
public abstract String getASTCreateString(GrammarAtom atom, String str);
/** Given the index of a bitset in the bitset list, generate a unique name.
* Specific code-generators may want to override this
* if the language does not allow '_' or numerals in identifiers.
* @param index The index of the bitset in the bitset list.
*/
protected String getBitsetName(int index) {
return "_tokenSet_" + index;
}
public static String encodeLexerRuleName(String id) {
return "m" + id;
}
public static String decodeLexerRuleName(String id) {
if ( id==null ) {
return null;
}
return id.substring(1,id.length());
}
/** Map an identifier to it's corresponding tree-node variable.
* This is context-sensitive, depending on the rule and alternative
* being generated
* @param id The identifier name to map
* @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
* @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
*/
public abstract String mapTreeId(String id, ActionTransInfo tInfo);
/** Add a bitset to the list of bitsets to be generated.
* if the bitset is already in the list, ignore the request.
* Always adds the bitset to the end of the list, so the
* caller can rely on the position of bitsets in the list.
* The returned position can be used to format the bitset
* name, since it is invariant.
* @param p Bit set to mark for code generation
* @param forParser true if the bitset is used for the parser, false for the lexer
* @return The position of the bitset in the list.
*/
protected int markBitsetForGen(BitSet p) {
// Is the bitset (or an identical one) already marked for gen?
for (int i = 0; i < bitsetsUsed.size(); i++) {
BitSet set = (BitSet)bitsetsUsed.elementAt(i);
if (p.equals(set)) {
// Use the identical one already stored
return i;
}
}
// Add the new bitset
bitsetsUsed.appendElement(p.clone());
return bitsetsUsed.size() - 1;
}
/** Output tab indent followed by a String, to the currentOutput stream.
* Ignored if string is null.
* @param s The string to output.
*/
protected void print(String s) {
if (s != null) {
printTabs();
currentOutput.print(s);
}
}
/** Print an action with leading tabs, attempting to
* preserve the current indentation level for multi-line actions
* Ignored if string is null.
* @param s The action string to output
*/
protected void printAction(String s) {
if (s != null) {
printTabs();
_printAction(s);
}
}
/** Output tab indent followed by a String followed by newline,
* to the currentOutput stream. Ignored if string is null.
* @param s The string to output
*/
protected void println(String s) {
if (s != null) {
printTabs();
currentOutput.println(s);
}
}
/** Output the current tab indentation. This outputs the number of tabs
* indicated by the "tabs" variable to the currentOutput stream.
*/
protected void printTabs() {
for (int i = 1; i <= tabs; i++) {
currentOutput.print('\t');
}
}
/** Lexically process $ and # references within the action.
* This will replace #id and #(...) with the appropriate
* function calls and/or variables etc...
*/
protected abstract String processActionForSpecialSymbols(String actionStr,
int line,
RuleBlock currentRule,
ActionTransInfo tInfo);
public String getFOLLOWBitSet(String ruleName, int k) {
GrammarSymbol rs = grammar.getSymbol(ruleName);
if ( !(rs instanceof RuleSymbol) ) {
return null;
}
RuleBlock blk = ((RuleSymbol)rs).getBlock();
Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(k, blk.endNode);
String followSetName = getBitsetName(markBitsetForGen(follow.fset));
return followSetName;
}
public String getFIRSTBitSet(String ruleName, int k) {
GrammarSymbol rs = grammar.getSymbol(ruleName);
if ( !(rs instanceof RuleSymbol) ) {
return null;
}
RuleBlock blk = ((RuleSymbol)rs).getBlock();
Lookahead first = grammar.theLLkAnalyzer.look(k, blk);
String firstSetName = getBitsetName(markBitsetForGen(first.fset));
return firstSetName;
}
/**
* Remove the assignment portion of a declaration, if any.
* @param d the declaration
* @return the declaration without any assignment portion
*/
protected String removeAssignmentFromDeclaration(String d) {
// If d contains an equal sign, then it's a declaration
// with an initialization. Strip off the initialization part.
if (d.indexOf('=') >= 0) d = d.substring(0, d.indexOf('=')).trim();
return d;
}
/** Set all fields back like one just created */
private void reset() {
tabs = 0;
// Allocate list of bitsets tagged for code generation
bitsetsUsed = new Vector();
currentOutput = null;
grammar = null;
DEBUG_CODE_GENERATOR = false;
makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD;
bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD;
}
public static String reverseLexerRuleName(String id) {
return id.substring(1, id.length());
}
public void setAnalyzer(LLkGrammarAnalyzer analyzer_) {
analyzer = analyzer_;
}
public void setBehavior(DefineGrammarSymbols behavior_) {
behavior = behavior_;
}
/** Set a grammar for the code generator to use */
protected void setGrammar(Grammar g) {
reset();
grammar = g;
// Lookup make-switch threshold in the grammar generic options
if (grammar.hasOption("codeGenMakeSwitchThreshold")) {
try {
makeSwitchThreshold = grammar.getIntegerOption("codeGenMakeSwitchThreshold");
//System.out.println("setting codeGenMakeSwitchThreshold to " + makeSwitchThreshold);
}
catch (NumberFormatException e) {
Token tok = grammar.getOption("codeGenMakeSwitchThreshold");
antlrTool.error(
"option 'codeGenMakeSwitchThreshold' must be an integer",
grammar.getClassName(),
tok.getLine(), tok.getColumn()
);
}
}
// Lookup bitset-test threshold in the grammar generic options
if (grammar.hasOption("codeGenBitsetTestThreshold")) {
try {
bitsetTestThreshold = grammar.getIntegerOption("codeGenBitsetTestThreshold");
//System.out.println("setting codeGenBitsetTestThreshold to " + bitsetTestThreshold);
}
catch (NumberFormatException e) {
Token tok = grammar.getOption("codeGenBitsetTestThreshold");
antlrTool.error(
"option 'codeGenBitsetTestThreshold' must be an integer",
grammar.getClassName(),
tok.getLine(), tok.getColumn()
);
}
}
// Lookup debug code-gen in the grammar generic options
if (grammar.hasOption("codeGenDebug")) {
Token t = grammar.getOption("codeGenDebug");
if (t.getText().equals("true")) {
//System.out.println("setting code-generation debug ON");
DEBUG_CODE_GENERATOR = true;
}
else if (t.getText().equals("false")) {
//System.out.println("setting code-generation debug OFF");
DEBUG_CODE_GENERATOR = false;
}
else {
antlrTool.error("option 'codeGenDebug' must be true or false", grammar.getClassName(), t.getLine(), t.getColumn());
}
}
}
public void setTool(Tool tool) {
antlrTool = tool;
}
}

59
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonAST.java

@ -1,59 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonAST.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** Common AST node implementation */
public class CommonAST extends BaseAST {
int ttype = Token.INVALID_TYPE;
String text;
/** Get the token text for this node */
public String getText() {
return text;
}
/** Get the token type for this node */
public int getType() {
return ttype;
}
public void initialize(int t, String txt) {
setType(t);
setText(txt);
}
public void initialize(AST t) {
setText(t.getText());
setType(t.getType());
}
public CommonAST() {
}
public CommonAST(Token tok) {
initialize(tok);
}
public void initialize(Token tok) {
setText(tok.getText());
setType(tok.getType());
}
/** Set the token text for this node */
public void setText(String text_) {
text = text_;
}
/** Set the token type for this node */
public void setType(int ttype_) {
ttype = ttype_;
}
}

47
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonASTWithHiddenTokens.java

@ -1,47 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonASTWithHiddenTokens.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** A CommonAST whose initialization copies hidden token
* information from the Token used to create a node.
*/
public class CommonASTWithHiddenTokens extends CommonAST {
protected CommonHiddenStreamToken hiddenBefore, hiddenAfter; // references to hidden tokens
public CommonASTWithHiddenTokens() {
super();
}
public CommonASTWithHiddenTokens(Token tok) {
super(tok);
}
public CommonHiddenStreamToken getHiddenAfter() {
return hiddenAfter;
}
public CommonHiddenStreamToken getHiddenBefore() {
return hiddenBefore;
}
public void initialize(AST t)
{
hiddenBefore = ((CommonASTWithHiddenTokens)t).getHiddenBefore();
hiddenAfter = ((CommonASTWithHiddenTokens)t).getHiddenAfter();
super.initialize(t);
}
public void initialize(Token tok) {
CommonHiddenStreamToken t = (CommonHiddenStreamToken)tok;
super.initialize(t);
hiddenBefore = t.getHiddenBefore();
hiddenAfter = t.getHiddenAfter();
}
}

41
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonHiddenStreamToken.java

@ -1,41 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonHiddenStreamToken.java#2 $
*/
public class CommonHiddenStreamToken extends CommonToken {
protected CommonHiddenStreamToken hiddenBefore;
protected CommonHiddenStreamToken hiddenAfter;
public CommonHiddenStreamToken() {
super();
}
public CommonHiddenStreamToken(int t, String txt) {
super(t, txt);
}
public CommonHiddenStreamToken(String s) {
super(s);
}
public CommonHiddenStreamToken getHiddenAfter() {
return hiddenAfter;
}
public CommonHiddenStreamToken getHiddenBefore() {
return hiddenBefore;
}
protected void setHiddenAfter(CommonHiddenStreamToken t) {
hiddenAfter = t;
}
protected void setHiddenBefore(CommonHiddenStreamToken t) {
hiddenBefore = t;
}
}

56
fine-antlr-old/src/main/java/com/fr/third/antlr/CommonToken.java

@ -1,56 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/CommonToken.java#2 $
*/
public class CommonToken extends Token {
// most tokens will want line and text information
protected int line;
protected String text = null;
protected int col;
public CommonToken() {
}
public CommonToken(int t, String txt) {
type = t;
setText(txt);
}
public CommonToken(String s) {
text = s;
}
public int getLine() {
return line;
}
public String getText() {
return text;
}
public void setLine(int l) {
line = l;
}
public void setText(String s) {
text = s;
}
public String toString() {
return "[\"" + getText() + "\",<" + type + ">,line=" + line + ",col=" + col + "]";
}
/** Return token's start column */
public int getColumn() {
return col;
}
public void setColumn(int c) {
col = c;
}
}

33
fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultFileLineFormatter.java

@ -1,33 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DefaultFileLineFormatter.java#2 $
*/
public class DefaultFileLineFormatter extends FileLineFormatter {
public String getFormatString(String fileName, int line, int column) {
StringBuffer buf = new StringBuffer();
if (fileName != null)
buf.append(fileName + ":");
if (line != -1) {
if (fileName == null)
buf.append("line ");
buf.append(line);
if (column != -1)
buf.append(":" + column);
buf.append(':');
}
buf.append(' ');
return buf.toString();
}
}

73
fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultJavaCodeGeneratorPrintWriterManager.java

@ -1,73 +0,0 @@
package com.fr.third.antlr;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
public class DefaultJavaCodeGeneratorPrintWriterManager implements JavaCodeGeneratorPrintWriterManager {
private Grammar grammar;
private PrintWriterWithSMAP smapOutput;
private PrintWriter currentOutput;
private Tool tool;
private Map sourceMaps = new HashMap();
private String currentFileName;
public PrintWriter setupOutput(Tool tool, Grammar grammar) throws IOException {
return setupOutput(tool, grammar, null);
}
public PrintWriter setupOutput(Tool tool, String fileName) throws IOException {
return setupOutput(tool, null, fileName);
}
public PrintWriter setupOutput(Tool tool, Grammar grammar, String fileName) throws IOException {
this.tool = tool;
this.grammar = grammar;
if (fileName == null)
fileName = grammar.getClassName();
smapOutput = new PrintWriterWithSMAP(tool.openOutputFile(fileName + ".java"));
currentFileName = fileName + ".java";
currentOutput = smapOutput;
return currentOutput;
}
public void startMapping(int sourceLine) {
smapOutput.startMapping(sourceLine);
}
public void startSingleSourceLineMapping(int sourceLine) {
smapOutput.startSingleSourceLineMapping(sourceLine);
}
public void endMapping() {
smapOutput.endMapping();
}
public void finishOutput() throws IOException {
currentOutput.close();
if (grammar != null) {
PrintWriter smapWriter;
smapWriter = tool.openOutputFile(grammar.getClassName() + ".smap");
String grammarFile = grammar.getFilename();
grammarFile = grammarFile.replace('\\', '/');
int lastSlash = grammarFile.lastIndexOf('/');
if (lastSlash != -1)
grammarFile = grammarFile.substring(lastSlash+1);
smapOutput.dump(smapWriter, grammar.getClassName(), grammarFile);
sourceMaps.put(currentFileName, smapOutput.getSourceMap());
}
currentOutput = null;
}
public Map getSourceMaps() {
return sourceMaps;
}
public int getCurrentOutputLine()
{
return smapOutput.getCurrentOutputLine();
}
}

118
fine-antlr-old/src/main/java/com/fr/third/antlr/DefaultToolErrorHandler.java

@ -1,118 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DefaultToolErrorHandler.java#2 $
*/
class DefaultToolErrorHandler implements ToolErrorHandler {
DefaultToolErrorHandler(com.fr.third.antlr.Tool tool) {
antlrTool = tool;
}
private final com.fr.third.antlr.Tool antlrTool;
CharFormatter javaCharFormatter = new JavaCharFormatter();
/** Dump token/character sets to a string array suitable for
* {@link antlr.Tool.warning(String[], String, int, int)
* @param output The array that will contain the token/character set dump,
* one element per k (lookahead) value
* @param outputStartIndex The index into <code>output</code> that the
* dump should start at.
* @param lexicalAnalysis true for lexical rule
* @param depth The depth of the ambiguity
* @param sets An array of bitsets containing the ambiguities
*/
private void dumpSets(String[] output,
int outputStartIndex,
Grammar grammar,
boolean lexicalAnalysis,
int depth,
Lookahead[] sets) {
StringBuffer line = new StringBuffer(100);
for (int i = 1; i <= depth; i++) {
line.append("k==").append(i).append(':');
if (lexicalAnalysis) {
String bits = sets[i].fset.toStringWithRanges(",", javaCharFormatter);
if (sets[i].containsEpsilon()) {
line.append("<end-of-token>");
if (bits.length() > 0) {
line.append(',');
}
}
line.append(bits);
} else {
line.append(sets[i].fset.toString(",", grammar.tokenManager.getVocabulary()));
}
output[outputStartIndex++] = line.toString();
line.setLength(0);
}
}
/** Issue a warning about ambiguity between a alternates
* @param blk The block being analyzed
* @param lexicalAnalysis true for lexical rule
* @param depth The depth of the ambiguity
* @param sets An array of bitsets containing the ambiguities
* @param altIdx1 The zero-based index of the first ambiguous alternative
* @param altIdx2 The zero-based index of the second ambiguous alternative
*/
public void warnAltAmbiguity(Grammar grammar,
AlternativeBlock blk,
boolean lexicalAnalysis,
int depth,
Lookahead[] sets,
int altIdx1,
int altIdx2)
{
final StringBuffer line = new StringBuffer(100);
if (blk instanceof RuleBlock && ((RuleBlock)blk).isLexerAutoGenRule()) {
Alternative ai = blk.getAlternativeAt(altIdx1);
Alternative aj = blk.getAlternativeAt(altIdx2);
RuleRefElement rri = (RuleRefElement)ai.head;
RuleRefElement rrj = (RuleRefElement)aj.head;
String ri = CodeGenerator.reverseLexerRuleName(rri.targetRule);
String rj = CodeGenerator.reverseLexerRuleName(rrj.targetRule);
line.append("lexical nondeterminism between rules ");
line.append(ri).append(" and ").append(rj).append(" upon");
}
else {
if (lexicalAnalysis) {
line.append("lexical ");
}
line.append("nondeterminism between alts ");
line.append(altIdx1 + 1).append(" and ");
line.append(altIdx2 + 1).append(" of block upon");
}
final String [] output = new String [depth + 1];
output[0] = line.toString();
dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets);
antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn());
}
/** Issue a warning about ambiguity between an alternate and exit path.
* @param blk The block being analyzed
* @param lexicalAnalysis true for lexical rule
* @param depth The depth of the ambiguity
* @param sets An array of bitsets containing the ambiguities
* @param altIdx The zero-based index of the ambiguous alternative
*/
public void warnAltExitAmbiguity(Grammar grammar,
BlockWithImpliedExitPath blk,
boolean lexicalAnalysis,
int depth,
Lookahead[] sets,
int altIdx
)
{
String [] output = new String[depth + 2];
output[0] = (lexicalAnalysis ? "lexical " : "") + "nondeterminism upon";
dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets);
output[depth + 1] = "between alt " + (altIdx + 1) + " and exit branch of block";
antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn());
}
}

811
fine-antlr-old/src/main/java/com/fr/third/antlr/DefineGrammarSymbols.java

@ -1,811 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DefineGrammarSymbols.java#2 $
*/
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.BitSet;
/**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all
* the token and rule symbols to the grammar symbol table.
*
* Token types are assigned to token symbols in this class also.
* The token type for a token is done in the order seen (lexically).
*/
public class DefineGrammarSymbols implements ANTLRGrammarParseBehavior {
// Contains all of the defined parser and lexer Grammar's indexed by name
protected Hashtable grammars = new Hashtable();
// Contains all the TokenManagers indexed by name
protected Hashtable tokenManagers = new Hashtable();
// Current grammar (parser or lexer)
protected Grammar grammar;
// The tool under which this is invoked
protected Tool tool;
// The grammar analyzer object
LLkAnalyzer analyzer;
// The command-line arguments passed to the tool.
// This allows each grammar to parse the arguments as it is created
String[] args;
// Name for default token manager does not match any valid name
static final String DEFAULT_TOKENMANAGER_NAME = "*default";
// Header actions apply to all parsers unless redefined
// Contains all of the header actions indexed by name
protected Hashtable headerActions = new Hashtable();
// Place where preamble is stored until a grammar is defined
Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token
// The target language
String language = "Java";
protected int numLexers = 0;
protected int numParsers = 0;
protected int numTreeParsers = 0;
public DefineGrammarSymbols(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
tool = tool_;
args = args_;
analyzer = analyzer_;
}
public void _refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
if (!(grammar instanceof LexerGrammar)) {
// String literals are treated like tokens except by the lexer
String str = lit.getText();
if (grammar.tokenManager.getTokenSymbol(str) != null) {
// string symbol is already defined
return;
}
StringLiteralSymbol sl = new StringLiteralSymbol(str);
int tt = grammar.tokenManager.nextTokenType();
sl.setTokenType(tt);
grammar.tokenManager.define(sl);
}
}
/** Reference a token */
public void _refToken(Token assignId,
Token t,
Token label,
Token args,
boolean inverted,
int autoGenType,
boolean lastInRule) {
String id = t.getText();
if (!grammar.tokenManager.tokenDefined(id)) {
/*
// RK: dish out a warning if the token was not defined before.
tool.warning("Token '" + id + "' defined outside tokens section",
tool.grammarFile, t.getLine(), t.getColumn());
*/
int tt = grammar.tokenManager.nextTokenType();
TokenSymbol ts = new TokenSymbol(id);
ts.setTokenType(tt);
grammar.tokenManager.define(ts);
}
}
/** Abort the processing of a grammar due to syntax errors */
public void abortGrammar() {
if (grammar != null && grammar.getClassName() != null) {
grammars.remove(grammar.getClassName());
}
grammar = null;
}
public void beginAlt(boolean doAST_) {
}
public void beginChildList() {
}
// Exception handling
public void beginExceptionGroup() {
}
public void beginExceptionSpec(Token label) {
}
public void beginSubRule(Token label, Token start, boolean not) {
}
public void beginTree(Token tok) throws SemanticException {
}
/** Define a lexer or parser rule */
public void defineRuleName(Token r,
String access,
boolean ruleAutoGen,
String docComment)
throws SemanticException {
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) {
if (r.type == ANTLRTokenTypes.TOKEN_REF) {
// lexer rule
id = CodeGenerator.encodeLexerRuleName(id);
// make sure we define it as token identifier also
if (!grammar.tokenManager.tokenDefined(r.getText())) {
int tt = grammar.tokenManager.nextTokenType();
TokenSymbol ts = new TokenSymbol(r.getText());
ts.setTokenType(tt);
grammar.tokenManager.define(ts);
}
}
RuleSymbol rs;
if (grammar.isDefined(id)) {
// symbol seen before?
rs = (RuleSymbol)grammar.getSymbol(id);
// rule just referenced or has it been defined yet?
if (rs.isDefined()) {
tool.error("redefinition of rule " + id, grammar.getFilename(), r.getLine(), r.getColumn());
}
}
else {
rs = new RuleSymbol(id);
grammar.define(rs);
}
rs.setDefined();
rs.access = access;
rs.comment = docComment;
}
/** Define a token from tokens {...}.
* Must be label and literal or just label or just a literal.
*/
public void defineToken(Token tokname, Token tokliteral) {
String name = null;
String literal = null;
if (tokname != null) {
name = tokname.getText();
}
if (tokliteral != null) {
literal = tokliteral.getText();
}
// System.out.println("defining " + name + " with literal " + literal);
//
if (literal != null) {
StringLiteralSymbol sl = (StringLiteralSymbol)grammar.tokenManager.getTokenSymbol(literal);
if (sl != null) {
// This literal is known already.
// If the literal has no label already, but we can provide
// one here, then no problem, just map the label to the literal
// and don't change anything else.
// Otherwise, labels conflict: error.
if (name == null || sl.getLabel() != null) {
tool.warning("Redefinition of literal in tokens {...}: " + literal, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn());
return;
}
else if (name != null) {
// The literal had no label, but new def does. Set it.
sl.setLabel(name);
// Also, map the label to the literal.
grammar.tokenManager.mapToTokenSymbol(name, sl);
}
}
// if they provide a name/label and that name/label already
// exists, just hook this literal onto old token.
if (name != null) {
TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(name);
if (ts != null) {
// watch out that the label is not more than just a token.
// If it already has a literal attached, then: conflict.
if (ts instanceof StringLiteralSymbol) {
tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn());
return;
}
// a simple token symbol such as DECL is defined
// must convert it to a StringLiteralSymbol with a
// label by co-opting token type and killing old
// TokenSymbol. Kill mapping and entry in vector
// of token manager.
// First, claim token type.
int ttype = ts.getTokenType();
// now, create string literal with label
sl = new StringLiteralSymbol(literal);
sl.setTokenType(ttype);
sl.setLabel(name);
// redefine this critter as a string literal
grammar.tokenManager.define(sl);
// make sure the label can be used also.
grammar.tokenManager.mapToTokenSymbol(name, sl);
return;
}
// here, literal was labeled but not by a known token symbol.
}
sl = new StringLiteralSymbol(literal);
int tt = grammar.tokenManager.nextTokenType();
sl.setTokenType(tt);
sl.setLabel(name);
grammar.tokenManager.define(sl);
if (name != null) {
// make the label point at token symbol too
grammar.tokenManager.mapToTokenSymbol(name, sl);
}
}
// create a token in the token manager not a literal
else {
if (grammar.tokenManager.tokenDefined(name)) {
tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokname.getLine(), tokname.getColumn());
return;
}
int tt = grammar.tokenManager.nextTokenType();
TokenSymbol ts = new TokenSymbol(name);
ts.setTokenType(tt);
grammar.tokenManager.define(ts);
}
}
public void endAlt() {
}
public void endChildList() {
}
public void endExceptionGroup() {
}
public void endExceptionSpec() {
}
public void endGrammar() {
}
/** Called after the optional options section, to compensate for
* options that may not have been set.
* This method is bigger than it needs to be, but is much more
* clear if I delineate all the cases.
*/
public void endOptions() {
// NO VOCAB OPTIONS
if (grammar.exportVocab == null && grammar.importVocab == null) {
grammar.exportVocab = grammar.getClassName();
// Can we get initial vocab from default shared vocab?
if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
// Use the already-defined token manager
grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME;
TokenManager tm = (TokenManager)tokenManagers.get(DEFAULT_TOKENMANAGER_NAME);
// System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'");
grammar.setTokenManager(tm);
return;
}
// no shared vocab for file, make new one
// System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'");
TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool);
grammar.setTokenManager(tm);
// Add the token manager to the list of token managers
tokenManagers.put(grammar.exportVocab, tm);
// no default vocab, so make this the default vocab
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
return;
}
// NO OUTPUT, BUT HAS INPUT VOCAB
if (grammar.exportVocab == null && grammar.importVocab != null) {
grammar.exportVocab = grammar.getClassName();
// first make sure input!=output
if (grammar.importVocab.equals(grammar.exportVocab)) {
tool.warning("Grammar " + grammar.getClassName() +
" cannot have importVocab same as default output vocab (grammar name); ignored.");
// kill importVocab option and try again: use default vocab
grammar.importVocab = null;
endOptions();
return;
}
// check to see if the vocab is already in memory
// (defined by another grammar in the file). Not normal situation.
if (tokenManagers.containsKey(grammar.importVocab)) {
// make a copy since we'll be generating a new output vocab
// and we don't want to affect this one. Set the name to
// the default output vocab==classname.
TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab);
// System.out.println("Duping importVocab of " + grammar.importVocab);
TokenManager dup = (TokenManager)tm.clone();
dup.setName(grammar.exportVocab);
// System.out.println("Setting name to " + grammar.exportVocab);
dup.setReadOnly(false);
grammar.setTokenManager(dup);
tokenManagers.put(grammar.exportVocab, dup);
return;
}
// System.out.println("reading in vocab "+grammar.importVocab);
// Must be a file, go get it.
ImportVocabTokenManager tm =
new ImportVocabTokenManager(grammar,
grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt,
grammar.exportVocab,
tool);
tm.setReadOnly(false); // since renamed, can write out
// Add this token manager to the list so its tokens will be generated
tokenManagers.put(grammar.exportVocab, tm);
// System.out.println("vocab renamed to default output vocab of "+tm.getName());
// Assign the token manager to this grammar.
grammar.setTokenManager(tm);
// set default vocab if none
if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
}
return;
}
// OUTPUT VOCAB, BUT NO INPUT VOCAB
if (grammar.exportVocab != null && grammar.importVocab == null) {
// share with previous vocab if it exists
if (tokenManagers.containsKey(grammar.exportVocab)) {
// Use the already-defined token manager
TokenManager tm = (TokenManager)tokenManagers.get(grammar.exportVocab);
// System.out.println("Sharing exportVocab of " + grammar.exportVocab);
grammar.setTokenManager(tm);
return;
}
// create new output vocab
// System.out.println("Creating exportVocab " + grammar.exportVocab);
TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool);
grammar.setTokenManager(tm);
// Add the token manager to the list of token managers
tokenManagers.put(grammar.exportVocab, tm);
// set default vocab if none
if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
}
return;
}
// BOTH INPUT AND OUTPUT VOCAB
if (grammar.exportVocab != null && grammar.importVocab != null) {
// don't want input==output
if (grammar.importVocab.equals(grammar.exportVocab)) {
tool.error("exportVocab of " + grammar.exportVocab + " same as importVocab; probably not what you want");
}
// does the input vocab already exist in memory?
if (tokenManagers.containsKey(grammar.importVocab)) {
// make a copy since we'll be generating a new output vocab
// and we don't want to affect this one.
TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab);
// System.out.println("Duping importVocab of " + grammar.importVocab);
TokenManager dup = (TokenManager)tm.clone();
dup.setName(grammar.exportVocab);
// System.out.println("Setting name to " + grammar.exportVocab);
dup.setReadOnly(false);
grammar.setTokenManager(dup);
tokenManagers.put(grammar.exportVocab, dup);
return;
}
// Must be a file, go get it.
ImportVocabTokenManager tm =
new ImportVocabTokenManager(grammar,
grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt,
grammar.exportVocab,
tool);
tm.setReadOnly(false); // write it out as we've changed name
// Add this token manager to the list so its tokens will be generated
tokenManagers.put(grammar.exportVocab, tm);
// Assign the token manager to this grammar.
grammar.setTokenManager(tm);
// set default vocab if none
if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
}
return;
}
}
public void endRule(String r) {
}
public void endSubRule() {
}
public void endTree() {
}
public void hasError() {
}
public void noASTSubRule() {
}
public void oneOrMoreSubRule() {
}
public void optionalSubRule() {
}
public void setUserExceptions(String thr) {
}
public void refAction(Token action) {
}
public void refArgAction(Token action) {
}
public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
}
public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
}
public void refElementOption(Token option, Token value) {
}
public void refTokensSpecElementOption(Token tok, Token option, Token value) {
}
public void refExceptionHandler(Token exTypeAndName, Token action) {
}
// Header action applies to all parsers and lexers.
public void refHeaderAction(Token name, Token act) {
String key;
if (name == null)
key = "";
else
key = StringUtils.stripFrontBack(name.getText(), "\"", "\"");
// FIXME: depending on the mode the inserted header actions should
// be checked for sanity.
if (headerActions.containsKey(key)) {
if (key.equals(""))
tool.error(act.getLine() + ": header action already defined");
else
tool.error(act.getLine() + ": header action '" + key + "' already defined");
}
headerActions.put(key, act);
}
public String getHeaderAction(String name) {
Token t = (Token)headerActions.get(name);
if (t == null) {
return "";
}
return t.getText();
}
public int getHeaderActionLine(String name) {
Token t = (Token)headerActions.get(name);
if (t == null) {
return 0;
}
return t.getLine();
}
public void refInitAction(Token action) {
}
public void refMemberAction(Token act) {
}
public void refPreambleAction(Token act) {
thePreambleAction = act;
}
public void refReturnAction(Token returnAction) {
}
public void refRule(Token idAssign,
Token r,
Token label,
Token args,
int autoGenType) {
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
if (r.type == ANTLRTokenTypes.TOKEN_REF) {
// lexer rule?
id = CodeGenerator.encodeLexerRuleName(id);
}
if (!grammar.isDefined(id)) {
grammar.define(new RuleSymbol(id));
}
}
public void refSemPred(Token pred) {
}
public void refStringLiteral(Token lit,
Token label,
int autoGenType,
boolean lastInRule) {
_refStringLiteral(lit, label, autoGenType, lastInRule);
}
/** Reference a token */
public void refToken(Token assignId, Token t, Token label, Token args,
boolean inverted, int autoGenType, boolean lastInRule) {
_refToken(assignId, t, label, args, inverted, autoGenType, lastInRule);
}
public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
// ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more
// token refs to the alternative by calling MakeGrammar.refToken etc...
if (t1.getText().charAt(0) == '"') {
refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
else {
_refToken(null, t1, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
if (t2.getText().charAt(0) == '"') {
_refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
else {
_refToken(null, t2, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
}
}
public void refTreeSpecifier(Token treeSpec) {
}
public void refWildcard(Token t, Token label, int autoGenType) {
}
/** Get ready to process a new grammar */
public void reset() {
grammar = null;
}
public void setArgOfRuleRef(Token argaction) {
}
/** Set the character vocabulary for a lexer */
public void setCharVocabulary(BitSet b) {
// grammar should enforce that this is only called for lexer
((LexerGrammar)grammar).setCharVocabulary(b);
}
/** setFileOption: Associate an option value with a key.
* This applies to options for an entire grammar file.
* @param key The token containing the option name
* @param value The token containing the option value.
*/
public void setFileOption(Token key, Token value, String filename) {
if (key.getText().equals("language")) {
if (value.getType() == ANTLRParser.STRING_LITERAL) {
language = StringUtils.stripBack(StringUtils.stripFront(value.getText(), '"'), '"');
}
else if (value.getType() == ANTLRParser.TOKEN_REF || value.getType() == ANTLRParser.RULE_REF) {
language = value.getText();
}
else {
tool.error("language option must be string or identifier", filename, value.getLine(), value.getColumn());
}
}
else if (key.getText().equals("mangleLiteralPrefix")) {
if (value.getType() == ANTLRParser.STRING_LITERAL) {
tool.literalsPrefix = StringUtils.stripFrontBack(value.getText(), "\"", "\"");
}
else {
tool.error("mangleLiteralPrefix option must be string", filename, value.getLine(), value.getColumn());
}
}
else if (key.getText().equals("upperCaseMangledLiterals")) {
if (value.getText().equals("true")) {
tool.upperCaseMangledLiterals = true;
}
else if (value.getText().equals("false")) {
tool.upperCaseMangledLiterals = false;
}
else {
grammar.antlrTool.error("Value for upperCaseMangledLiterals must be true or false", filename, key.getLine(), key.getColumn());
}
}
else if ( key.getText().equals("namespaceStd") ||
key.getText().equals("namespaceAntlr") ||
key.getText().equals("genHashLines")
) {
if (!language.equals("Cpp")) {
tool.error(key.getText() + " option only valid for C++", filename, key.getLine(), key.getColumn());
}
else {
if (key.getText().equals("noConstructors")) {
if (!(value.getText().equals("true") || value.getText().equals("false")))
tool.error("noConstructors option must be true or false", filename, value.getLine(), value.getColumn());
tool.noConstructors = value.getText().equals("true");
} else if (key.getText().equals("genHashLines")) {
if (!(value.getText().equals("true") || value.getText().equals("false")))
tool.error("genHashLines option must be true or false", filename, value.getLine(), value.getColumn());
tool.genHashLines = value.getText().equals("true");
}
else {
if (value.getType() != ANTLRParser.STRING_LITERAL) {
tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn());
}
else {
if (key.getText().equals("namespaceStd"))
tool.namespaceStd = value.getText();
else if (key.getText().equals("namespaceAntlr"))
tool.namespaceAntlr = value.getText();
}
}
}
}
else if ( key.getText().equals("namespace") ) {
if ( !language.equals("Cpp") && !language.equals("CSharp") )
{
tool.error(key.getText() + " option only valid for C++ and C# (a.k.a CSharp)", filename, key.getLine(), key.getColumn());
}
else
{
if (value.getType() != ANTLRParser.STRING_LITERAL)
{
tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn());
}
else {
if (key.getText().equals("namespace"))
tool.setNameSpace(value.getText());
}
}
}
else {
tool.error("Invalid file-level option: " + key.getText(), filename, key.getLine(), value.getColumn());
}
}
/** setGrammarOption: Associate an option value with a key.
* This function forwards to Grammar.setOption for some options.
* @param key The token containing the option name
* @param value The token containing the option value.
*/
public void setGrammarOption(Token key, Token value) {
if (key.getText().equals("tokdef") || key.getText().equals("tokenVocabulary")) {
tool.error("tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n" +
" Use importVocab/exportVocab instead. Please see the documentation.\n" +
" The previous options were so heinous that Terence changed the whole\n" +
" vocabulary mechanism; it was better to change the names rather than\n" +
" subtly change the functionality of the known options. Sorry!", grammar.getFilename(), value.getLine(), value.getColumn());
}
else if (key.getText().equals("literal") &&
grammar instanceof LexerGrammar) {
tool.error("the literal option is invalid >= ANTLR 2.6.0.\n" +
" Use the \"tokens {...}\" mechanism instead.",
grammar.getFilename(), value.getLine(), value.getColumn());
}
else if (key.getText().equals("exportVocab")) {
// Set the token manager associated with the parser
if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
grammar.exportVocab = value.getText();
}
else {
tool.error("exportVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn());
}
}
else if (key.getText().equals("importVocab")) {
if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
grammar.importVocab = value.getText();
}
else {
tool.error("importVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn());
}
}
else if ( key.getText().equals("k") ) {
if( grammar instanceof TreeWalkerGrammar
&& ! value.getText().equals("1") ) {
tool.error("Treewalkers only support k=1", grammar.getFilename(), value.getLine(), value.getColumn());
}
else {
grammar.setOption(key.getText(), value);
}
}
else {
// Forward all unrecognized options to the grammar
grammar.setOption(key.getText(), value);
}
}
public void setRuleOption(Token key, Token value) {
}
public void setSubruleOption(Token key, Token value) {
}
/** Start a new lexer */
public void startLexer(String file, Token name, String superClass, String doc) {
if (numLexers > 0) {
tool.panic("You may only have one lexer per grammar file: class " + name.getText());
}
numLexers++;
reset();
//System.out.println("Processing lexer '" + name.getText() + "'");
// Does the lexer already exist?
Grammar g = (Grammar)grammars.get(name);
if (g != null) {
if (!(g instanceof LexerGrammar)) {
tool.panic("'" + name.getText() + "' is already defined as a non-lexer");
}
else {
tool.panic("Lexer '" + name.getText() + "' is already defined");
}
}
else {
// Create a new lexer grammar
LexerGrammar lg = new LexerGrammar(name.getText(), tool, superClass);
lg.comment = doc;
lg.processArguments(args);
lg.setFilename(file);
grammars.put(lg.getClassName(), lg);
// Use any preamble action
lg.preambleAction = thePreambleAction;
thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
// This is now the current grammar
grammar = lg;
}
}
/** Start a new parser */
public void startParser(String file, Token name, String superClass, String doc) {
if (numParsers > 0) {
tool.panic("You may only have one parser per grammar file: class " + name.getText());
}
numParsers++;
reset();
//System.out.println("Processing parser '" + name.getText() + "'");
// Is this grammar already defined?
Grammar g = (Grammar)grammars.get(name);
if (g != null) {
if (!(g instanceof ParserGrammar)) {
tool.panic("'" + name.getText() + "' is already defined as a non-parser");
}
else {
tool.panic("Parser '" + name.getText() + "' is already defined");
}
}
else {
// Create a new grammar
grammar = new ParserGrammar(name.getText(), tool, superClass);
grammar.comment = doc;
grammar.processArguments(args);
grammar.setFilename(file);
grammars.put(grammar.getClassName(), grammar);
// Use any preamble action
grammar.preambleAction = thePreambleAction;
thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
}
}
/** Start a new tree-walker */
public void startTreeWalker(String file, Token name, String superClass, String doc) {
if (numTreeParsers > 0) {
tool.panic("You may only have one tree parser per grammar file: class " + name.getText());
}
numTreeParsers++;
reset();
//System.out.println("Processing tree-walker '" + name.getText() + "'");
// Is this grammar already defined?
Grammar g = (Grammar)grammars.get(name);
if (g != null) {
if (!(g instanceof TreeWalkerGrammar)) {
tool.panic("'" + name.getText() + "' is already defined as a non-tree-walker");
}
else {
tool.panic("Tree-walker '" + name.getText() + "' is already defined");
}
}
else {
// Create a new grammar
grammar = new TreeWalkerGrammar(name.getText(), tool, superClass);
grammar.comment = doc;
grammar.processArguments(args);
grammar.setFilename(file);
grammars.put(grammar.getClassName(), grammar);
// Use any preamble action
grammar.preambleAction = thePreambleAction;
thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
}
}
public void synPred() {
}
public void zeroOrMoreSubRule() {
}
}

68
fine-antlr-old/src/main/java/com/fr/third/antlr/DumpASTVisitor.java

@ -1,68 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/DumpASTVisitor.java#2 $
*/
import com.fr.third.antlr.collections.AST;
/** Simple class to dump the contents of an AST to the output */
public class DumpASTVisitor implements ASTVisitor {
protected int level = 0;
private void tabs() {
for (int i = 0; i < level; i++) {
System.out.print(" ");
}
}
public void visit(AST node) {
// Flatten this level of the tree if it has no children
boolean flatten = /*true*/ false;
AST node2;
for (node2 = node; node2 != null; node2 = node2.getNextSibling()) {
if (node2.getFirstChild() != null) {
flatten = false;
break;
}
}
for (node2 = node; node2 != null; node2 = node2.getNextSibling()) {
if (!flatten || node2 == node) {
tabs();
}
if (node2.getText() == null) {
System.out.print("nil");
}
else {
System.out.print(node2.getText());
}
System.out.print(" [" + node2.getType() + "] ");
if (flatten) {
System.out.print(' ');
}
else {
System.out.println("");
}
if (node2.getFirstChild() != null) {
level++;
visit(node2.getFirstChild());
level--;
}
}
if (flatten) {
System.out.println("");
}
}
}

22
fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionHandler.java

@ -1,22 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ExceptionHandler.java#2 $
*/
class ExceptionHandler {
// Type of the ANTLR exception class to catch and the variable decl
protected Token exceptionTypeAndName;
// The action to be executed when the exception is caught
protected Token action;
public ExceptionHandler(Token exceptionTypeAndName_,
Token action_) {
exceptionTypeAndName = exceptionTypeAndName_;
action = action_;
}
}

29
fine-antlr-old/src/main/java/com/fr/third/antlr/ExceptionSpec.java

@ -1,29 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ExceptionSpec.java#2 $
*/
import com.fr.third.antlr.collections.impl.Vector;
class ExceptionSpec {
// Non-null if this refers to a labeled rule
// Use a token instead of a string to get the line information
protected Token label;
// List of ExceptionHandler (catch phrases)
protected Vector handlers;
public ExceptionSpec(Token label_) {
label = label_;
handlers = new Vector();
}
public void addHandler(ExceptionHandler handler) {
handlers.appendElement(handler);
}
}

14
fine-antlr-old/src/main/java/com/fr/third/antlr/FileCopyException.java

@ -1,14 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/FileCopyException.java#2 $
*/
class FileCopyException extends java.io.IOException {
public FileCopyException(String msg) {
super(msg);
}
}

27
fine-antlr-old/src/main/java/com/fr/third/antlr/FileLineFormatter.java

@ -1,27 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/FileLineFormatter.java#2 $
*/
public abstract class FileLineFormatter {
private static FileLineFormatter formatter = new DefaultFileLineFormatter();
public static FileLineFormatter getFormatter() {
return formatter;
}
public static void setFormatter(FileLineFormatter f) {
formatter = f;
}
/** @param fileName the file that should appear in the prefix. (or null)
* @param line the line (or -1)
* @param column the column (or -1)
*/
public abstract String getFormatString(String fileName, int line, int column);
}

288
fine-antlr-old/src/main/java/com/fr/third/antlr/Grammar.java

@ -1,288 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Grammar.java#2 $
*/
import java.io.IOException;
import java.util.Enumeration;
import java.util.Hashtable;
import com.fr.third.antlr.collections.impl.Vector;
/**A Grammar holds a set of rules (which are stored
* in a symbol table). Most of the time a grammar
* needs a code generator and an LLkAnalyzer too.
*/
public abstract class Grammar {
protected Tool antlrTool;
protected CodeGenerator generator;
protected LLkGrammarAnalyzer theLLkAnalyzer;
protected Hashtable symbols;
protected boolean buildAST = false;
protected boolean analyzerDebug = false;
protected boolean interactive = false;
protected String superClass = null;
/** The token manager associated with the grammar, if any.
// The token manager is responsible for maintaining the set of valid tokens, and
// is conceptually shared between the lexer and parser. This may be either a
// LexerGrammar or a ImportVocabTokenManager.
*/
protected TokenManager tokenManager;
/** The name of the export vocabulary...used to generate the output
* token types interchange file.
*/
protected String exportVocab = null;
/** The name of the import vocabulary. "Initial conditions"
*/
protected String importVocab = null;
// Mapping from String keys to Token option values
protected Hashtable options;
// Vector of RuleSymbol entries
protected Vector rules;
protected Token preambleAction = new CommonToken(Token.INVALID_TYPE, "");
protected String className = null;
protected String fileName = null;
protected Token classMemberAction = new CommonToken(Token.INVALID_TYPE, "");
protected boolean hasSyntacticPredicate = false;
protected boolean hasUserErrorHandling = false;
// max lookahead that can be attempted for this parser.
protected int maxk = 1;
// options
protected boolean traceRules = false;
protected boolean debuggingOutput = false;
protected boolean defaultErrorHandler = true;
protected String comment = null; // javadoc comment
public Grammar(String className_, Tool tool_, String superClass) {
className = className_;
antlrTool = tool_;
symbols = new Hashtable();
options = new Hashtable();
rules = new Vector(100);
this.superClass = superClass;
}
/** Define a rule */
public void define(RuleSymbol rs) {
rules.appendElement(rs);
// add the symbol to the rules hash table
symbols.put(rs.getId(), rs);
}
/** Top-level call to generate the code for this grammar */
public abstract void generate() throws IOException;
protected String getClassName() {
return className;
}
/* Does this grammar have a default error handler? */
public boolean getDefaultErrorHandler() {
return defaultErrorHandler;
}
public String getFilename() {
return fileName;
}
/** Get an integer option. Given the name of the option find its
* associated integer value. If the associated value is not an integer or
* is not in the table, then throw an exception of type NumberFormatException.
* @param key The name of the option
* @return The value associated with the key.
*/
public int getIntegerOption(String key) throws NumberFormatException {
Token t = (Token)options.get(key);
if (t == null || t.getType() != ANTLRTokenTypes.INT) {
throw new NumberFormatException();
}
else {
return Integer.parseInt(t.getText());
}
}
/** Get an option. Given the name of the option find its associated value.
* @param key The name of the option
* @return The value associated with the key, or null if the key has not been set.
*/
public Token getOption(String key) {
return (Token)options.get(key);
}
// Get name of class from which generated parser/lexer inherits
protected abstract String getSuperClass();
public GrammarSymbol getSymbol(String s) {
return (GrammarSymbol)symbols.get(s);
}
public Enumeration getSymbols() {
return symbols.elements();
}
/** Check the existence of an option in the table
* @param key The name of the option
* @return true if the option is in the table
*/
public boolean hasOption(String key) {
return options.containsKey(key);
}
/** Is a rule symbol defined? (not used for tokens) */
public boolean isDefined(String s) {
return symbols.containsKey(s);
}
/**Process command line arguments. Implemented in subclasses */
public abstract void processArguments(String[] args);
public void setCodeGenerator(CodeGenerator gen) {
generator = gen;
}
public void setFilename(String s) {
fileName = s;
}
public void setGrammarAnalyzer(LLkGrammarAnalyzer a) {
theLLkAnalyzer = a;
}
/** Set a generic option.
* This associates a generic option key with a Token value.
* No validation is performed by this method, although users of the value
* (code generation and/or analysis) may require certain formats.
* The value is stored as a token so that the location of an error
* can be reported.
* @param key The name of the option.
* @param value The value to associate with the key.
* @return true if the option was a valid generic grammar option, false o/w
*/
public boolean setOption(String key, Token value) {
options.put(key, value);
String s = value.getText();
int i;
if (key.equals("k")) {
try {
maxk = getIntegerOption("k");
if ( maxk<=0 ) {
antlrTool.error("option 'k' must be greater than 0 (was " +
value.getText() + ")",
getFilename(),
value.getLine(),
value.getColumn());
maxk = 1;
}
}
catch (NumberFormatException e) {
antlrTool.error("option 'k' must be an integer (was " + value.getText() + ")", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("codeGenMakeSwitchThreshold")) {
try {
i = getIntegerOption("codeGenMakeSwitchThreshold");
}
catch (NumberFormatException e) {
antlrTool.error("option 'codeGenMakeSwitchThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("codeGenBitsetTestThreshold")) {
try {
i = getIntegerOption("codeGenBitsetTestThreshold");
}
catch (NumberFormatException e) {
antlrTool.error("option 'codeGenBitsetTestThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("defaultErrorHandler")) {
if (s.equals("true")) {
defaultErrorHandler = true;
}
else if (s.equals("false")) {
defaultErrorHandler = false;
}
else {
antlrTool.error("Value for defaultErrorHandler must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("analyzerDebug")) {
if (s.equals("true")) {
analyzerDebug = true;
}
else if (s.equals("false")) {
analyzerDebug = false;
}
else {
antlrTool.error("option 'analyzerDebug' must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("codeGenDebug")) {
if (s.equals("true")) {
analyzerDebug = true;
}
else if (s.equals("false")) {
analyzerDebug = false;
}
else {
antlrTool.error("option 'codeGenDebug' must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("classHeaderSuffix")) {
return true;
}
if (key.equals("classHeaderPrefix")) {
return true;
}
if (key.equals("namespaceAntlr")) {
return true;
}
if (key.equals("namespaceStd")) {
return true;
}
if (key.equals("genHashLines")) {
return true;
}
if (key.equals("noConstructors")) {
return true;
}
return false;
}
public void setTokenManager(TokenManager tokenManager_) {
tokenManager = tokenManager_;
}
/** Print out the grammar without actions */
public String toString() {
StringBuffer buf = new StringBuffer(20000);
Enumeration ids = rules.elements();
while (ids.hasMoreElements()) {
RuleSymbol rs = (RuleSymbol)ids.nextElement();
if (!rs.id.equals("mnextToken")) {
buf.append(rs.getBlock().toString());
buf.append("\n\n");
}
}
return buf.toString();
}
}

36
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAnalyzer.java

@ -1,36 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarAnalyzer.java#2 $
*/
/**A GrammarAnalyzer computes lookahead from Grammar (which contains
* a grammar symbol table) and can then answer questions about the
* grammar.
*
* To access the RuleBlock for a rule name, the grammar symbol table
* is consulted.
*
* There should be no distinction between static & dynamic analysis.
* In other words, some of the easy analysis can be done statically
* and then the part that is hard statically can be deferred to
* parse-time. Interestingly, computing LL(k) for k>1 lookahead
* statically is O(|T|^k) where T is the grammar vocabulary, but,
* is O(k) at run-time (ignoring the large constant associated with
* the size of the grammar). In English, the difference can be
* described as "find the set of all possible k-sequences of input"
* versus "does this specific k-sequence match?".
*/
public interface GrammarAnalyzer {
/**The epsilon token type is an imaginary type used
* during analysis. It indicates an incomplete look() computation.
* Must be kept consistent with Token constants to be between
* MIN_USER_TYPE and INVALID_TYPE.
*/
// public static final int EPSILON_TYPE = 2;
public static final int NONDETERMINISTIC = Integer.MAX_VALUE; // lookahead depth
public static final int LOOKAHEAD_DEPTH_INIT = -1;
}

68
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarAtom.java

@ -1,68 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarAtom.java#2 $
*/
/** A GrammarAtom is either a token ref, a character ref, or string.
* The analysis doesn't care.
*/
abstract class GrammarAtom extends AlternativeElement {
protected String label;
protected String atomText;
protected int tokenType = Token.INVALID_TYPE;
protected boolean not = false; // ~T or ~'c' or ~"foo"
/** Set to type of AST node to create during parse. Defaults to what is
* set in the TokenSymbol.
*/
protected String ASTNodeType = null;
public GrammarAtom(Grammar g, Token t, int autoGenType) {
super(g, t, autoGenType);
atomText = t.getText();
}
public String getLabel() {
return label;
}
public String getText() {
return atomText;
}
public int getType() {
return tokenType;
}
public void setLabel(String label_) {
label = label_;
}
public String getASTNodeType() {
return ASTNodeType;
}
public void setASTNodeType(String type) {
ASTNodeType = type;
}
public void setOption(Token option, Token value) {
if (option.getText().equals("AST")) {
setASTNodeType(value.getText());
}
else {
grammar.antlrTool.error("Invalid element option:" + option.getText(),
grammar.getFilename(), option.getLine(), option.getColumn());
}
}
public String toString() {
String s = " ";
if (label != null) s += label + ":";
if (not) s += "~";
return s + atomText;
}
}

62
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarElement.java

@ -1,62 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarElement.java#2 $
*/
/**A GrammarElement is a generic node in our
* data structure that holds a grammar in memory.
* This data structure can be used for static
* analysis or for dynamic analysis (during parsing).
* Every node must know which grammar owns it, how
* to generate code, and how to do analysis.
*/
abstract class GrammarElement {
public static final int AUTO_GEN_NONE = 1;
public static final int AUTO_GEN_CARET = 2;
public static final int AUTO_GEN_BANG = 3;
/*
* Note that Java does static argument type matching to
* determine which function to execute on the receiver.
* Here, that implies that we cannot simply say
* grammar.generator.gen(this) in GrammarElement or
* only CodeGenerator.gen(GrammarElement ge) would
* ever be called.
*/
protected Grammar grammar;
protected int line;
protected int column;
public GrammarElement(Grammar g) {
grammar = g;
line = -1;
column = -1;
}
public GrammarElement(Grammar g, Token start) {
grammar = g;
line = start.getLine();
column = start.getColumn();
}
public void generate() {
}
public int getLine() {
return line;
}
public int getColumn() {
return column;
}
public Lookahead look(int k) {
return null;
}
public abstract String toString();
}

30
fine-antlr-old/src/main/java/com/fr/third/antlr/GrammarSymbol.java

@ -1,30 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/GrammarSymbol.java#2 $
*/
/**A GrammarSymbol is a generic symbol that can be
* added to the symbol table for a grammar.
*/
abstract class GrammarSymbol {
protected String id;
public GrammarSymbol() {
}
public GrammarSymbol(String s) {
id = s;
}
public String getId() {
return id;
}
public void setId(String s) {
id = s;
}
}

102
fine-antlr-old/src/main/java/com/fr/third/antlr/ImportVocabTokenManager.java

@ -1,102 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/ImportVocabTokenManager.java#2 $
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.Reader;
/** Static implementation of the TokenManager, used for importVocab option */
class ImportVocabTokenManager extends SimpleTokenManager implements Cloneable {
private String filename;
protected Grammar grammar;
// FIXME: it would be nice if the path to the original grammar file was
// also searched.
ImportVocabTokenManager(Grammar grammar, String filename_, String name_, Tool tool_) {
// initialize
super(name_, tool_);
this.grammar = grammar;
this.filename = filename_;
// Figure out exactly where the file lives. Check $PWD first,
// and then search in -o <output_dir>.
//
File grammarFile = new File(filename);
if (!grammarFile.exists()) {
grammarFile = new File(antlrTool.getOutputDirectory(), filename);
if (!grammarFile.exists()) {
antlrTool.panic("Cannot find importVocab file '" + filename + "'");
}
}
setReadOnly(true);
// Read a file with lines of the form ID=number
try {
Reader fileIn = new BufferedReader(new FileReader(grammarFile));
ANTLRTokdefLexer tokdefLexer = new ANTLRTokdefLexer(fileIn);
ANTLRTokdefParser tokdefParser = new ANTLRTokdefParser(tokdefLexer);
tokdefParser.setTool(antlrTool);
tokdefParser.setFilename(filename);
tokdefParser.file(this);
}
catch (FileNotFoundException fnf) {
antlrTool.panic("Cannot find importVocab file '" + filename + "'");
}
catch (RecognitionException ex) {
antlrTool.panic("Error parsing importVocab file '" + filename + "': " + ex.toString());
}
catch (TokenStreamException ex) {
antlrTool.panic("Error reading importVocab file '" + filename + "'");
}
}
public Object clone() {
ImportVocabTokenManager tm;
tm = (ImportVocabTokenManager)super.clone();
tm.filename = this.filename;
tm.grammar = this.grammar;
return tm;
}
/** define a token. */
public void define(TokenSymbol ts) {
super.define(ts);
}
/** define a token. Intended for use only when reading the importVocab file. */
public void define(String s, int ttype) {
TokenSymbol ts = null;
if (s.startsWith("\"")) {
ts = new StringLiteralSymbol(s);
}
else {
ts = new TokenSymbol(s);
}
ts.setTokenType(ttype);
super.define(ts);
maxToken = (ttype + 1) > maxToken ? (ttype + 1) : maxToken; // record maximum token type
}
/** importVocab token manager is read-only if output would be same as input */
public boolean isReadOnly() {
return readOnly;
}
/** Get the next unused token type. */
public int nextTokenType() {
return super.nextTokenType();
}
}

131
fine-antlr-old/src/main/java/com/fr/third/antlr/InputBuffer.java

@ -1,131 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/InputBuffer.java#2 $
*/
// SAS: Added this class to genericise the input buffers for scanners
// This allows a scanner to use a binary (FileInputStream) or
// text (FileReader) stream of data; the generated scanner
// subclass will define the input stream
// There are two subclasses to this: CharBuffer and ByteBuffer
/**A Stream of characters fed to the lexer from a InputStream that can
* be rewound via mark()/rewind() methods.
* <p>
* A dynamic array is used to buffer up all the input characters. Normally,
* "k" characters are stored in the buffer. More characters may be stored during
* guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
* Consumption of characters is deferred. In other words, reading the next
* character is not done by conume(), but deferred until needed by LA or LT.
* <p>
*
* @see com.fr.third.antlr.CharQueue
*/
public abstract class InputBuffer {
// Number of active markers
protected int nMarkers = 0;
// Additional offset used when markers are active
protected int markerOffset = 0;
// Number of calls to consume() since last LA() or LT() call
protected int numToConsume = 0;
// Circular queue
protected CharQueue queue;
/** Create an input buffer */
public InputBuffer() {
queue = new CharQueue(1);
}
/** This method updates the state of the input buffer so that
* the text matched since the most recent mark() is no longer
* held by the buffer. So, you either do a mark/rewind for
* failed predicate or mark/commit to keep on parsing without
* rewinding the input.
*/
public void commit() {
nMarkers--;
}
/** Mark another character for deferred consumption */
public void consume() {
numToConsume++;
}
/** Ensure that the input buffer is sufficiently full */
public abstract void fill(int amount) throws CharStreamException;
public String getLAChars() {
StringBuffer la = new StringBuffer();
for (int i = markerOffset; i < queue.nbrEntries; i++)
la.append(queue.elementAt(i));
return la.toString();
}
public String getMarkedChars() {
StringBuffer marked = new StringBuffer();
for (int i = 0; i < markerOffset; i++)
marked.append(queue.elementAt(i));
return marked.toString();
}
public boolean isMarked() {
return (nMarkers != 0);
}
/** Get a lookahead character */
public char LA(int i) throws CharStreamException {
fill(i);
return queue.elementAt(markerOffset + i - 1);
}
/**Return an integer marker that can be used to rewind the buffer to
* its current state.
*/
public int mark() {
syncConsume();
nMarkers++;
return markerOffset;
}
/**Rewind the character buffer to a marker.
* @param mark Marker returned previously from mark()
*/
public void rewind(int mark) {
syncConsume();
markerOffset = mark;
nMarkers--;
}
/** Reset the input buffer
*/
public void reset() {
nMarkers = 0;
markerOffset = 0;
numToConsume = 0;
queue.reset();
}
/** Sync up deferred consumption */
protected void syncConsume() {
while (numToConsume > 0) {
if (nMarkers > 0) {
// guess mode -- leave leading characters and bump offset.
markerOffset++;
}
else {
// normal mode -- remove first character
queue.removeFirst();
}
numToConsume--;
}
}
}

34
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaBlockFinishingInfo.java

@ -1,34 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/JavaBlockFinishingInfo.java#2 $
*/
class JavaBlockFinishingInfo {
String postscript; // what to generate to terminate block
boolean generatedSwitch;// did block finish with "default:" of switch?
boolean generatedAnIf;
/** When generating an if or switch, end-of-token lookahead sets
* will become the else or default clause, don't generate an
* error clause in this case.
*/
boolean needAnErrorClause;
public JavaBlockFinishingInfo() {
postscript = null;
generatedSwitch = false;
needAnErrorClause = true;
}
public JavaBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) {
postscript = ps;
generatedSwitch = genS;
this.generatedAnIf = generatedAnIf;
needAnErrorClause = n;
}
}

87
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCharFormatter.java

@ -1,87 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/JavaCharFormatter.java#2 $
*/
class JavaCharFormatter implements CharFormatter {
/** Given a character value, return a string representing the character
* that can be embedded inside a string literal or character literal
* This works for Java/C/C++ code-generation and languages with compatible
* special-character-escapment.
* Code-generators for languages should override this method.
* @param c The character of interest.
* @param forCharLiteral true to escape for char literal, false for string literal
*/
public String escapeChar(int c, boolean forCharLiteral) {
switch (c) {
// case GrammarAnalyzer.EPSILON_TYPE : return "<end-of-token>";
case '\n':
return "\\n";
case '\t':
return "\\t";
case '\r':
return "\\r";
case '\\':
return "\\\\";
case '\'':
return forCharLiteral ? "\\'" : "'";
case '"':
return forCharLiteral ? "\"" : "\\\"";
default :
if (c < ' ' || c > 126) {
if ((0x0000 <= c) && (c <= 0x000F)) {
return "\\u000" + Integer.toString(c, 16);
}
else if ((0x0010 <= c) && (c <= 0x00FF)) {
return "\\u00" + Integer.toString(c, 16);
}
else if ((0x0100 <= c) && (c <= 0x0FFF)) {
return "\\u0" + Integer.toString(c, 16);
}
else {
return "\\u" + Integer.toString(c, 16);
}
}
else {
return String.valueOf((char)c);
}
}
}
/** Converts a String into a representation that can be use as a literal
* when surrounded by double-quotes.
* @param s The String to be changed into a literal
*/
public String escapeString(String s) {
String retval = new String();
for (int i = 0; i < s.length(); i++) {
retval += escapeChar(s.charAt(i), false);
}
return retval;
}
/** Given a character value, return a string representing the character
* literal that can be recognized by the target language compiler.
* This works for languages that use single-quotes for character literals.
* Code-generators for languages should override this method.
* @param c The character of interest.
*/
public String literalChar(int c) {
return "'" + escapeChar(c, true) + "'";
}
/** Converts a String into a string literal
* This works for languages that use double-quotes for string literals.
* Code-generators for languages should override this method.
* @param s The String to be changed into a literal
*/
public String literalString(String s) {
return "\"" + escapeString(s) + "\"";
}
}

3746
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGenerator.java

File diff suppressed because it is too large Load Diff

21
fine-antlr-old/src/main/java/com/fr/third/antlr/JavaCodeGeneratorPrintWriterManager.java

@ -1,21 +0,0 @@
package com.fr.third.antlr;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Map;
/**
* Defines a strategy that can be used to manage the printwriter
* being used to write JavaCodeGenerator output
*
* TODO generalize so all code gens could use?
*/
public interface JavaCodeGeneratorPrintWriterManager {
public PrintWriter setupOutput(Tool tool, Grammar grammar) throws IOException;
public PrintWriter setupOutput(Tool tool, String fileName) throws IOException;
public void startMapping(int sourceLine);
public void startSingleSourceLineMapping(int sourceLine);
public void endMapping();
public void finishOutput() throws IOException;
public Map getSourceMaps();
}

1095
fine-antlr-old/src/main/java/com/fr/third/antlr/LLkAnalyzer.java

File diff suppressed because it is too large Load Diff

58
fine-antlr-old/src/main/java/com/fr/third/antlr/LLkGrammarAnalyzer.java

@ -1,58 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LLkGrammarAnalyzer.java#2 $
*/
public interface LLkGrammarAnalyzer extends GrammarAnalyzer {
public boolean deterministic(AlternativeBlock blk);
public boolean deterministic(OneOrMoreBlock blk);
public boolean deterministic(ZeroOrMoreBlock blk);
public Lookahead FOLLOW(int k, RuleEndElement end);
public Lookahead look(int k, ActionElement action);
public Lookahead look(int k, AlternativeBlock blk);
public Lookahead look(int k, BlockEndElement end);
public Lookahead look(int k, CharLiteralElement atom);
public Lookahead look(int k, CharRangeElement end);
public Lookahead look(int k, GrammarAtom atom);
public Lookahead look(int k, OneOrMoreBlock blk);
public Lookahead look(int k, RuleBlock blk);
public Lookahead look(int k, RuleEndElement end);
public Lookahead look(int k, RuleRefElement rr);
public Lookahead look(int k, StringLiteralElement atom);
public Lookahead look(int k, SynPredBlock blk);
public Lookahead look(int k, TokenRangeElement end);
public Lookahead look(int k, TreeElement end);
public Lookahead look(int k, WildcardElement wc);
public Lookahead look(int k, ZeroOrMoreBlock blk);
public Lookahead look(int k, String rule);
public void setGrammar(Grammar g);
public boolean subruleCanBeInverted(AlternativeBlock blk, boolean forLexer);
}

85
fine-antlr-old/src/main/java/com/fr/third/antlr/LLkParser.java

@ -1,85 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LLkParser.java#2 $
*/
/**An LL(k) parser.
*
* @see com.fr.third.antlr.Token
* @see com.fr.third.antlr.TokenBuffer
*/
public class LLkParser extends Parser {
int k;
public LLkParser(int k_) {
k = k_;
}
public LLkParser(ParserSharedInputState state, int k_) {
super(state);
k = k_;
}
public LLkParser(TokenBuffer tokenBuf, int k_) {
k = k_;
setTokenBuffer(tokenBuf);
}
public LLkParser(TokenStream lexer, int k_) {
k = k_;
TokenBuffer tokenBuf = new TokenBuffer(lexer);
setTokenBuffer(tokenBuf);
}
/**Consume another token from the input stream. Can only write sequentially!
* If you need 3 tokens ahead, you must consume() 3 times.
* <p>
* Note that it is possible to overwrite tokens that have not been matched.
* For example, calling consume() 3 times when k=2, means that the first token
* consumed will be overwritten with the 3rd.
*/
public void consume() throws TokenStreamException {
inputState.input.consume();
}
public int LA(int i) throws TokenStreamException {
return inputState.input.LA(i);
}
public Token LT(int i) throws TokenStreamException {
return inputState.input.LT(i);
}
private void trace(String ee, String rname) throws TokenStreamException {
traceIndent();
System.out.print(ee + rname + ((inputState.guessing > 0)?"; [guessing]":"; "));
for (int i = 1; i <= k; i++) {
if (i != 1) {
System.out.print(", ");
}
if ( LT(i)!=null ) {
System.out.print("LA(" + i + ")==" + LT(i).getText());
}
else {
System.out.print("LA(" + i + ")==null");
}
}
System.out.println("");
}
public void traceIn(String rname) throws TokenStreamException {
traceDepth += 1;
trace("> ", rname);
}
public void traceOut(String rname) throws TokenStreamException {
trace("< ", rname);
traceDepth -= 1;
}
}

179
fine-antlr-old/src/main/java/com/fr/third/antlr/LexerGrammar.java

@ -1,179 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LexerGrammar.java#2 $
*/
import java.io.IOException;
import com.fr.third.antlr.collections.impl.BitSet;
/** Lexer-specific grammar subclass */
class LexerGrammar extends Grammar {
// character set used by lexer
protected BitSet charVocabulary;
// true if the lexer generates literal testing code for nextToken
protected boolean testLiterals = true;
// true if the lexer generates case-sensitive LA(k) testing
protected boolean caseSensitiveLiterals = true;
/** true if the lexer generates case-sensitive literals testing */
protected boolean caseSensitive = true;
/** true if lexer is to ignore all unrecognized tokens */
protected boolean filterMode = false;
/** if filterMode is true, then filterRule can indicate an optional
* rule to use as the scarf language. If null, programmer used
* plain "filter=true" not "filter=rule".
*/
protected String filterRule = null;
LexerGrammar(String className_, Tool tool_, String superClass) {
super(className_, tool_, superClass);
// by default, use 0..127 for ASCII char vocabulary
BitSet cv = new BitSet();
for (int i = 0; i <= 127; i++) {
cv.add(i);
}
setCharVocabulary(cv);
// Lexer usually has no default error handling
defaultErrorHandler = false;
}
/** Top-level call to generate the code */
public void generate() throws IOException {
generator.gen(this);
}
public String getSuperClass() {
// If debugging, use debugger version of scanner
if (debuggingOutput)
return "debug.DebuggingCharScanner";
return "CharScanner";
}
// Get the testLiterals option value
public boolean getTestLiterals() {
return testLiterals;
}
/**Process command line arguments.
* -trace have all rules call traceIn/traceOut
* -traceLexer have lexical rules call traceIn/traceOut
* -debug generate debugging output for parser debugger
*/
public void processArguments(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-trace")) {
traceRules = true;
antlrTool.setArgOK(i);
}
else if (args[i].equals("-traceLexer")) {
traceRules = true;
antlrTool.setArgOK(i);
}
else if (args[i].equals("-debug")) {
debuggingOutput = true;
antlrTool.setArgOK(i);
}
}
}
/** Set the character vocabulary used by the lexer */
public void setCharVocabulary(BitSet b) {
charVocabulary = b;
}
/** Set lexer options */
public boolean setOption(String key, Token value) {
String s = value.getText();
if (key.equals("buildAST")) {
antlrTool.warning("buildAST option is not valid for lexer", getFilename(), value.getLine(), value.getColumn());
return true;
}
if (key.equals("testLiterals")) {
if (s.equals("true")) {
testLiterals = true;
}
else if (s.equals("false")) {
testLiterals = false;
}
else {
antlrTool.warning("testLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("interactive")) {
if (s.equals("true")) {
interactive = true;
}
else if (s.equals("false")) {
interactive = false;
}
else {
antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("caseSensitive")) {
if (s.equals("true")) {
caseSensitive = true;
}
else if (s.equals("false")) {
caseSensitive = false;
}
else {
antlrTool.warning("caseSensitive option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("caseSensitiveLiterals")) {
if (s.equals("true")) {
caseSensitiveLiterals = true;
}
else if (s.equals("false")) {
caseSensitiveLiterals = false;
}
else {
antlrTool.warning("caseSensitiveLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("filter")) {
if (s.equals("true")) {
filterMode = true;
}
else if (s.equals("false")) {
filterMode = false;
}
else if (value.getType() == ANTLRTokenTypes.TOKEN_REF) {
filterMode = true;
filterRule = s;
}
else {
antlrTool.warning("filter option must be true, false, or a lexer rule name", getFilename(), value.getLine(), value.getColumn());
}
return true;
}
if (key.equals("longestPossible")) {
antlrTool.warning("longestPossible option has been deprecated; ignoring it...", getFilename(), value.getLine(), value.getColumn());
return true;
}
if (key.equals("className")) {
super.setOption(key, value);
return true;
}
if (super.setOption(key, value)) {
return true;
}
antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn());
return false;
}
}

80
fine-antlr-old/src/main/java/com/fr/third/antlr/LexerSharedInputState.java

@ -1,80 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.jGuru.com
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/LexerSharedInputState.java#2 $
*/
import java.io.Reader;
import java.io.InputStream;
/** This object contains the data associated with an
* input stream of characters. Multiple lexers
* share a single LexerSharedInputState to lex
* the same input stream.
*/
public class LexerSharedInputState {
protected int column = 1;
protected int line = 1;
protected int tokenStartColumn = 1;
protected int tokenStartLine = 1;
protected InputBuffer input;
/** What file (if known) caused the problem? */
protected String filename;
public int guessing = 0;
public LexerSharedInputState(InputBuffer inbuf) {
input = inbuf;
}
public LexerSharedInputState(InputStream in) {
this(new ByteBuffer(in));
}
public LexerSharedInputState(Reader in) {
this(new CharBuffer(in));
}
public String getFilename() {
return filename;
}
public InputBuffer getInput() {
return input;
}
public int getLine()
{
return line;
}
public int getTokenStartColumn()
{
return tokenStartColumn;
}
public int getTokenStartLine()
{
return tokenStartLine;
}
public int getColumn()
{
return column;
}
public void reset() {
column = 1;
line = 1;
tokenStartColumn = 1;
tokenStartLine = 1;
guessing = 0;
filename = null;
input.reset();
}
}

218
fine-antlr-old/src/main/java/com/fr/third/antlr/Lookahead.java

@ -1,218 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/Lookahead.java#2 $
*/
import com.fr.third.antlr.collections.impl.BitSet;
import com.fr.third.antlr.collections.impl.Vector;
/**This object holds all information needed to represent
* the lookahead for any particular lookahead computation
* for a <b>single</b> lookahead depth. Final lookahead
* information is a simple bit set, but intermediate
* stages need computation cycle and FOLLOW information.
*
* <p>
* Concerning the <tt>cycle</tt> variable.
* If lookahead is computed for a RuleEnd node, then
* computation is part of a FOLLOW cycle for this rule.
* If lookahead is computed for a RuleBlock node, the
* computation is part of a FIRST cycle to this rule.
*
* <p>
* Concerning the <tt>epsilonDepth</tt> variable.
* This is not the depth relative to the rule reference
* that epsilon was encountered. That value is
* <pre>
* initial_k - epsilonDepth + 1
* </pre>
* Also, lookahead depths past rule ref for local follow are:
* <pre>
* initial_k - (initial_k - epsilonDepth)
* </pre>
* Used for rule references. If we try
* to compute look(k, ruleref) and there are fewer
* than k lookahead terminals before the end of the
* the rule, epsilon will be returned (don't want to
* pass the end of the rule). We must track when the
* the lookahead got stuck. For example,
* <pre>
* a : b A B E F G;
* b : C ;
* </pre>
* LOOK(5, ref-to(b)) is {<EPSILON>} with depth = 4, which
* indicates that at 2 (5-4+1) tokens ahead, end of rule was reached.
* Therefore, the token at 4=5-(5-4) past rule ref b must be
* included in the set == F.
* The situation is complicated by the fact that a computation
* may hit the end of a rule at many different depths. For example,
* <pre>
* a : b A B C ;
* b : E F // epsilon depth of 1 relative to initial k=3
* | G // epsilon depth of 2
* ;
* </pre>
* Here, LOOK(3,ref-to(b)) returns epsilon, but the depths are
* {1, 2}; i.e., 3-(3-1) and 3-(3-2). Those are the lookahead depths
* past the rule ref needed for the local follow.
*
* <p>
* This is null unless an epsilon is created.
*
* @see com.fr.third.antlr.Lookahead#combineWith(Lookahead)
*/
public class Lookahead implements Cloneable {
/** actual bitset of the lookahead */
BitSet fset;
/** is this computation part of a computation cycle? */
String cycle;
/** What k values were being computed when end of rule hit? */
BitSet epsilonDepth;
/** Does this lookahead depth include Epsilon token type? This
* is used to avoid having a bit in the set for Epsilon as it
* conflicts with parsing binary files.
*/
boolean hasEpsilon = false;
public Lookahead() {
fset = new BitSet();
}
/** create a new lookahead set with the LL(1) set to the parameter */
public Lookahead(BitSet p) {
fset = p;
}
/** create an empty lookahead set, but with cycle */
public Lookahead(String c) {
this();
cycle = c;
}
/** Make a deep copy of everything in this object */
public Object clone() {
Lookahead p = null;
try {
p = (Lookahead)super.clone();
p.fset = (BitSet)fset.clone();
p.cycle = cycle; // strings are immutable
if (epsilonDepth != null) {
p.epsilonDepth = (BitSet)epsilonDepth.clone();
}
}
catch (CloneNotSupportedException e) {
throw new InternalError();
}
return p;
}
public void combineWith(Lookahead q) {
if (cycle == null) { // track at least one cycle
cycle = q.cycle;
}
if (q.containsEpsilon()) {
hasEpsilon = true;
}
// combine epsilon depths
if (epsilonDepth != null) {
if (q.epsilonDepth != null) {
epsilonDepth.orInPlace(q.epsilonDepth);
}
}
else if (q.epsilonDepth != null) {
epsilonDepth = (BitSet)q.epsilonDepth.clone();
}
fset.orInPlace(q.fset);
}
public boolean containsEpsilon() {
return hasEpsilon;
}
/** What is the intersection of two lookahead depths?
* Only the Epsilon "bit" and bitset are considered.
*/
public Lookahead intersection(Lookahead q) {
Lookahead p = new Lookahead(fset.and(q.fset));
if (this.hasEpsilon && q.hasEpsilon) {
p.setEpsilon();
}
return p;
}
public boolean nil() {
return fset.nil() && !hasEpsilon;
}
public static Lookahead of(int el) {
Lookahead look = new Lookahead();
look.fset.add(el);
return look;
}
public void resetEpsilon() {
hasEpsilon = false;
}
public void setEpsilon() {
hasEpsilon = true;
}
public String toString() {
String e = "",b,f = "",d = "";
b = fset.toString(",");
if (containsEpsilon()) {
e = "+<epsilon>";
}
if (cycle != null) {
f = "; FOLLOW(" + cycle + ")";
}
if (epsilonDepth != null) {
d = "; depths=" + epsilonDepth.toString(",");
}
return b + e + f + d;
}
public String toString(String separator, CharFormatter formatter) {
String e = "",b,f = "",d = "";
b = fset.toString(separator, formatter);
if (containsEpsilon()) {
e = "+<epsilon>";
}
if (cycle != null) {
f = "; FOLLOW(" + cycle + ")";
}
if (epsilonDepth != null) {
d = "; depths=" + epsilonDepth.toString(",");
}
return b + e + f + d;
}
public String toString(String separator, CharFormatter formatter, Grammar g) {
if (g instanceof LexerGrammar) {
return toString(separator, formatter);
}
else {
return toString(separator, g.tokenManager.getVocabulary());
}
}
public String toString(String separator, Vector vocab) {
String b,f = "",d = "";
b = fset.toString(separator, vocab);
if (cycle != null) {
f = "; FOLLOW(" + cycle + ")";
}
if (epsilonDepth != null) {
d = "; depths=" + epsilonDepth.toString(",");
}
return b + f + d;
}
}

792
fine-antlr-old/src/main/java/com/fr/third/antlr/MakeGrammar.java

@ -1,792 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/MakeGrammar.java#2 $
*/
import com.fr.third.antlr.collections.Stack;
import com.fr.third.antlr.collections.impl.LList;
import com.fr.third.antlr.collections.impl.Vector;
public class MakeGrammar extends DefineGrammarSymbols {
protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
protected RuleRefElement lastRuleRef;
protected RuleEndElement ruleEnd; // used if not nested
protected RuleBlock ruleBlock; // points to block of current rule.
protected int nested = 0; // nesting inside a subrule
protected boolean grammarError = false;
ExceptionSpec currentExceptionSpec = null;
public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
super(tool_, args_, analyzer_);
}
/** Abort the processing of a grammar (due to syntax errors) */
public void abortGrammar() {
String s = "unknown grammar";
if (grammar != null) {
s = grammar.getClassName();
}
tool.error("aborting grammar '" + s + "' due to errors");
super.abortGrammar();
}
protected void addElementToCurrentAlt(AlternativeElement e) {
e.enclosingRuleName = ruleBlock.ruleName;
context().addAlternativeElement(e);
}
public void beginAlt(boolean doAutoGen_) {
super.beginAlt(doAutoGen_);
Alternative alt = new Alternative();
alt.setAutoGen(doAutoGen_);
context().block.addAlternative(alt);
}
public void beginChildList() {
super.beginChildList();
context().block.addAlternative(new Alternative());
}
/** Add an exception group to a rule (currently a no-op) */
public void beginExceptionGroup() {
super.beginExceptionGroup();
if (!(context().block instanceof RuleBlock)) {
tool.panic("beginExceptionGroup called outside of rule block");
}
}
/** Add an exception spec to an exception group or rule block */
public void beginExceptionSpec(Token label) {
// Hack the label string a bit to remove leading/trailing space.
if (label != null) {
label.setText(StringUtils.stripFront(StringUtils.stripBack(label.getText(), " \n\r\t"), " \n\r\t"));
}
super.beginExceptionSpec(label);
// Don't check for currentExceptionSpec!=null because syntax errors
// may leave it set to something.
currentExceptionSpec = new ExceptionSpec(label);
}
public void beginSubRule(Token label, Token start, boolean not) {
super.beginSubRule(label, start, not);
// we don't know what kind of subrule it is yet.
// push a dummy one that will allow us to collect the
// alternatives. Later, we'll switch to real object.
blocks.push(new BlockContext());
context().block = new AlternativeBlock(grammar, start, not);
context().altNum = 0; // reset alternative number
nested++;
// create a final node to which the last elememt of each
// alternative will point.
context().blockEnd = new BlockEndElement(grammar);
// make sure end node points to start of block
context().blockEnd.block = context().block;
labelElement(context().block, label);
}
public void beginTree(Token tok) throws SemanticException {
if (!(grammar instanceof TreeWalkerGrammar)) {
tool.error("Trees only allowed in TreeParser", grammar.getFilename(), tok.getLine(), tok.getColumn());
throw new SemanticException("Trees only allowed in TreeParser");
}
super.beginTree(tok);
blocks.push(new TreeBlockContext());
context().block = new TreeElement(grammar, tok);
context().altNum = 0; // reset alternative number
}
public BlockContext context() {
if (blocks.height() == 0) {
return null;
}
else {
return (BlockContext)blocks.top();
}
}
/**Used to build nextToken() for the lexer.
* This builds a rule which has every "public" rule in the given Vector of
* rules as it's alternate. Each rule ref generates a Token object.
* @param g The Grammar that is being processed
* @param lexRules A vector of lexer rules that will be used to create an alternate block.
* @param rname The name of the resulting rule.
*/
public static RuleBlock createNextTokenRule(Grammar g, Vector lexRules, String rname) {
// create actual rule data structure
RuleBlock rb = new RuleBlock(g, rname);
rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
RuleEndElement ruleEnd = new RuleEndElement(g);
rb.setEndElement(ruleEnd);
ruleEnd.block = rb;
// Add an alternative for each element of the rules vector.
for (int i = 0; i < lexRules.size(); i++) {
RuleSymbol r = (RuleSymbol)lexRules.elementAt(i);
if (!r.isDefined()) {
g.antlrTool.error("Lexer rule " + r.id.substring(1) + " is not defined");
}
else {
if (r.access.equals("public")) {
Alternative alt = new Alternative(); // create alt we'll add to ref rule
RuleBlock targetRuleBlock = r.getBlock();
Vector targetRuleAlts = targetRuleBlock.getAlternatives();
// collect a sem pred if only one alt and it's at the start;
// simple, but faster to implement until real hoisting
if ( targetRuleAlts!=null && targetRuleAlts.size()==1 ) {
Alternative onlyAlt = (Alternative)targetRuleAlts.elementAt(0);
if ( onlyAlt.semPred!=null ) {
// ok, has sem pred, make this rule ref alt have a pred
alt.semPred = onlyAlt.semPred;
// REMOVE predicate from target rule??? NOPE, another
// rule other than nextToken() might invoke it.
}
}
// create a rule ref to lexer rule
// the Token is a RULE_REF not a TOKEN_REF since the
// conversion to mRulename has already taken place
RuleRefElement rr =
new RuleRefElement(g,
new CommonToken(ANTLRTokenTypes.RULE_REF, r.getId()),
GrammarElement.AUTO_GEN_NONE);
rr.setLabel("theRetToken");
rr.enclosingRuleName = "nextToken";
rr.next = ruleEnd;
alt.addElement(rr); // add rule ref to alt
alt.setAutoGen(true); // keep text of elements
rb.addAlternative(alt); // add alt to rule block
r.addReference(rr); // track ref to this rule in rule blk
}
}
}
rb.setAutoGen(true); // keep text of elements
rb.prepareForAnalysis();
//System.out.println(rb);
return rb;
}
/** Return block as if they had typed: "( rule )?" */
private AlternativeBlock createOptionalRuleRef(String rule, Token start) {
// Make the subrule
AlternativeBlock blk = new AlternativeBlock(grammar, start, false);
// Make sure rule is defined
String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
if (!grammar.isDefined(mrule)) {
grammar.define(new RuleSymbol(mrule));
}
// Make the rule ref element
// RK: fixme probably easier to abuse start token..
Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
t.setLine(start.getLine());
t.setLine(start.getColumn());
RuleRefElement rref =
new RuleRefElement(grammar, t, GrammarElement.AUTO_GEN_NONE);
rref.enclosingRuleName = ruleBlock.ruleName;
// Make the end of block element
BlockEndElement end = new BlockEndElement(grammar);
end.block = blk; // end block points back to start of blk
// Make an alternative, putting the rule ref into it
Alternative alt = new Alternative(rref);
alt.addElement(end); // last element in alt points to end of block
// Add the alternative to this block
blk.addAlternative(alt);
// create an empty (optional) alt and add to blk
Alternative optAlt = new Alternative();
optAlt.addElement(end); // points immediately to end of block
blk.addAlternative(optAlt);
blk.prepareForAnalysis();
return blk;
}
public void defineRuleName(Token r,
String access,
boolean ruleAutoGen,
String docComment)
throws SemanticException {
// if ( Character.isUpperCase(r.getText().charAt(0)) ) {
if (r.type == ANTLRTokenTypes.TOKEN_REF) {
if (!(grammar instanceof LexerGrammar)) {
tool.error("Lexical rule " + r.getText() +
" defined outside of lexer",
grammar.getFilename(), r.getLine(), r.getColumn());
r.setText(r.getText().toLowerCase());
}
}
else {
if (grammar instanceof LexerGrammar) {
tool.error("Lexical rule names must be upper case, '" + r.getText() +
"' is not",
grammar.getFilename(), r.getLine(), r.getColumn());
r.setText(r.getText().toUpperCase());
}
}
super.defineRuleName(r, access, ruleAutoGen, docComment);
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
id = CodeGenerator.encodeLexerRuleName(id);
}
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(), ruleAutoGen);
// Lexer rules do not generate default error handling
rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
ruleBlock = rb;
blocks.push(new BlockContext()); // enter new context
context().block = rb;
rs.setBlock(rb);
ruleEnd = new RuleEndElement(grammar);
rb.setEndElement(ruleEnd);
nested = 0;
}
public void endAlt() {
super.endAlt();
if (nested == 0) { // all rule-level alts link to ruleEnd node
addElementToCurrentAlt(ruleEnd);
}
else {
addElementToCurrentAlt(context().blockEnd);
}
context().altNum++;
}
public void endChildList() {
super.endChildList();
// create a final node to which the last elememt of the single
// alternative will point. Done for compatibility with analyzer.
// Does NOT point to any block like alternative blocks because the
// TreeElement is not a block. This is used only as a placeholder.
BlockEndElement be = new BlockEndElement(grammar);
be.block = context().block;
addElementToCurrentAlt(be);
}
public void endExceptionGroup() {
super.endExceptionGroup();
}
public void endExceptionSpec() {
super.endExceptionSpec();
if (currentExceptionSpec == null) {
tool.panic("exception processing internal error -- no active exception spec");
}
if (context().block instanceof RuleBlock) {
// Named rule
((RuleBlock)context().block).addExceptionSpec(currentExceptionSpec);
}
else {
// It must be a plain-old alternative block
if (context().currentAlt().exceptionSpec != null) {
tool.error("Alternative already has an exception specification", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
else {
context().currentAlt().exceptionSpec = currentExceptionSpec;
}
}
currentExceptionSpec = null;
}
/** Called at the end of processing a grammar */
public void endGrammar() {
if (grammarError) {
abortGrammar();
}
else {
super.endGrammar();
}
}
public void endRule(String rule) {
super.endRule(rule);
BlockContext ctx = (BlockContext)blocks.pop(); // remove scope
// record the start of this block in the ending node
ruleEnd.block = ctx.block;
ruleEnd.block.prepareForAnalysis();
//System.out.println(ctx.block);
}
public void endSubRule() {
super.endSubRule();
nested--;
// remove subrule context from scope stack
BlockContext ctx = (BlockContext)blocks.pop();
AlternativeBlock block = ctx.block;
// If the subrule is marked with ~, check that it is
// a valid candidate for analysis
if (
block.not &&
!(block instanceof SynPredBlock) &&
!(block instanceof ZeroOrMoreBlock) &&
!(block instanceof OneOrMoreBlock)
) {
if (!analyzer.subruleCanBeInverted(block, grammar instanceof LexerGrammar)) {
String newline = System.getProperty("line.separator");
tool.error(
"This subrule cannot be inverted. Only subrules of the form:" + newline +
" (T1|T2|T3...) or" + newline +
" ('c1'|'c2'|'c3'...)" + newline +
"may be inverted (ranges are also allowed).",
grammar.getFilename(),
block.getLine(), block.getColumn()
);
}
}
// add the subrule as element if not a syn pred
if (block instanceof SynPredBlock) {
// record a reference to the recently-recognized syn pred in the
// enclosing block.
SynPredBlock synpred = (SynPredBlock)block;
context().block.hasASynPred = true;
context().currentAlt().synPred = synpred;
grammar.hasSyntacticPredicate = true;
synpred.removeTrackingOfRuleRefs(grammar);
}
else {
addElementToCurrentAlt(block);
}
ctx.blockEnd.block.prepareForAnalysis();
}
public void endTree() {
super.endTree();
BlockContext ctx = (BlockContext)blocks.pop();
addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt.
}
/** Remember that a major error occured in the grammar */
public void hasError() {
grammarError = true;
}
private void labelElement(AlternativeElement el, Token label) {
if (label != null) {
// Does this label already exist?
for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
AlternativeElement altEl = (AlternativeElement)ruleBlock.labeledElements.elementAt(i);
String l = altEl.getLabel();
if (l != null && l.equals(label.getText())) {
tool.error("Label '" + label.getText() + "' has already been defined", grammar.getFilename(), label.getLine(), label.getColumn());
return;
}
}
// add this node to the list of labeled elements
el.setLabel(label.getText());
ruleBlock.labeledElements.appendElement(el);
}
}
public void noAutoGenSubRule() {
context().block.setAutoGen(false);
}
public void oneOrMoreSubRule() {
if (context().block.not) {
tool.error("'~' cannot be applied to (...)* subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// create the right kind of object now that we know what that is
// and switch the list of alternatives. Adjust the stack of blocks.
// copy any init action also.
OneOrMoreBlock b = new OneOrMoreBlock(grammar);
setBlock(b, context().block);
BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
blocks.push(new BlockContext());
context().block = b;
context().blockEnd = old.blockEnd;
context().blockEnd.block = b;
}
public void optionalSubRule() {
if (context().block.not) {
tool.error("'~' cannot be applied to (...)? subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// convert (X)? -> (X|) so that we can ignore optional blocks altogether!
// It already thinks that we have a simple subrule, just add option block.
beginAlt(false);
endAlt();
}
public void refAction(Token action) {
super.refAction(action);
context().block.hasAnAction = true;
addElementToCurrentAlt(new ActionElement(grammar, action));
}
public void setUserExceptions(String thr) {
((RuleBlock)context().block).throwsSpec = thr;
}
// Only called for rule blocks
public void refArgAction(Token action) {
((RuleBlock)context().block).argAction = action.getText();
}
public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
if (!(grammar instanceof LexerGrammar)) {
tool.error("Character literal only valid in lexer", grammar.getFilename(), lit.getLine(), lit.getColumn());
return;
}
super.refCharLiteral(lit, label, inverted, autoGenType, lastInRule);
CharLiteralElement cl = new CharLiteralElement((LexerGrammar)grammar, lit, inverted, autoGenType);
// Generate a warning for non-lowercase ASCII when case-insensitive
if (
!((LexerGrammar)grammar).caseSensitive && cl.getType() < 128 &&
Character.toLowerCase((char)cl.getType()) != (char)cl.getType()
) {
tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
}
addElementToCurrentAlt(cl);
labelElement(cl, label);
// if ignore option is set, must add an optional call to the specified rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
}
}
public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
if (!(grammar instanceof LexerGrammar)) {
tool.error("Character range only valid in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
if (rangeMax < rangeMin) {
tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
// Generate a warning for non-lowercase ASCII when case-insensitive
if (!((LexerGrammar)grammar).caseSensitive) {
if (rangeMin < 128 && Character.toLowerCase((char)rangeMin) != (char)rangeMin) {
tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t1.getLine(), t1.getColumn());
}
if (rangeMax < 128 && Character.toLowerCase((char)rangeMax) != (char)rangeMax) {
tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t2.getLine(), t2.getColumn());
}
}
super.refCharRange(t1, t2, label, autoGenType, lastInRule);
CharRangeElement cr = new CharRangeElement((LexerGrammar)grammar, t1, t2, autoGenType);
addElementToCurrentAlt(cr);
labelElement(cr, label);
// if ignore option is set, must add an optional call to the specified rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
}
}
public void refTokensSpecElementOption(Token tok,
Token option,
Token value) {
/*
System.out.println("setting tokens spec option for "+tok.getText());
System.out.println(option.getText()+","+value.getText());
*/
TokenSymbol ts = (TokenSymbol)
grammar.tokenManager.getTokenSymbol(tok.getText());
if (ts == null) {
tool.panic("cannot find " + tok.getText() + "in tokens {...}");
}
if (option.getText().equals("AST")) {
ts.setASTNodeType(value.getText());
}
else {
grammar.antlrTool.error("invalid tokens {...} element option:" +
option.getText(),
grammar.getFilename(),
option.getLine(), option.getColumn());
}
}
public void refElementOption(Token option, Token value) {
/*
System.out.println("setting option for "+context().currentElement());
System.out.println(option.getText()+","+value.getText());
*/
AlternativeElement e = context().currentElement();
if (e instanceof StringLiteralElement ||
e instanceof TokenRefElement ||
e instanceof WildcardElement) {
((GrammarAtom)e).setOption(option, value);
}
else {
tool.error("cannot use element option (" + option.getText() +
") for this kind of element",
grammar.getFilename(), option.getLine(), option.getColumn());
}
}
/** Add an exception handler to an exception spec */
public void refExceptionHandler(Token exTypeAndName, Token action) {
super.refExceptionHandler(exTypeAndName, action);
if (currentExceptionSpec == null) {
tool.panic("exception handler processing internal error");
}
currentExceptionSpec.addHandler(new ExceptionHandler(exTypeAndName, action));
}
public void refInitAction(Token action) {
super.refAction(action);
context().block.setInitAction(action.getText());
}
public void refMemberAction(Token act) {
grammar.classMemberAction = act;
}
public void refPreambleAction(Token act) {
super.refPreambleAction(act);
}
// Only called for rule blocks
public void refReturnAction(Token returnAction) {
if (grammar instanceof LexerGrammar) {
String name = CodeGenerator.encodeLexerRuleName(((RuleBlock)context().block).getRuleName());
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(name);
if (rs.access.equals("public")) {
tool.warning("public Lexical rules cannot specify return type", grammar.getFilename(), returnAction.getLine(), returnAction.getColumn());
return;
}
}
((RuleBlock)context().block).returnAction = returnAction.getText();
}
public void refRule(Token idAssign,
Token r,
Token label,
Token args,
int autoGenType) {
// Disallow parser rule references in the lexer
if (grammar instanceof LexerGrammar) {
// if (!Character.isUpperCase(r.getText().charAt(0))) {
if (r.type != ANTLRTokenTypes.TOKEN_REF) {
tool.error("Parser rule " + r.getText() + " referenced in lexer");
return;
}
if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), r.getLine(), r.getColumn());
}
}
super.refRule(idAssign, r, label, args, autoGenType);
lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
if (args != null) {
lastRuleRef.setArgs(args.getText());
}
if (idAssign != null) {
lastRuleRef.setIdAssign(idAssign.getText());
}
addElementToCurrentAlt(lastRuleRef);
String id = r.getText();
// if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
id = CodeGenerator.encodeLexerRuleName(id);
}
// update symbol table so it knows what nodes reference the rule.
RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
rs.addReference(lastRuleRef);
labelElement(lastRuleRef, label);
}
public void refSemPred(Token pred) {
//System.out.println("refSemPred "+pred.getText());
super.refSemPred(pred);
//System.out.println("context().block: "+context().block);
if (context().currentAlt().atStart()) {
context().currentAlt().semPred = pred.getText();
}
else {
ActionElement a = new ActionElement(grammar, pred);
a.isSemPred = true;
addElementToCurrentAlt(a);
}
//System.out.println("DONE refSemPred "+pred.getText());
}
public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
super.refStringLiteral(lit, label, autoGenType, lastInRule);
if (grammar instanceof TreeWalkerGrammar && autoGenType == GrammarElement.AUTO_GEN_CARET) {
tool.error("^ not allowed in here for tree-walker", grammar.getFilename(), lit.getLine(), lit.getColumn());
}
StringLiteralElement sl = new StringLiteralElement(grammar, lit, autoGenType);
// If case-insensitive, then check each char of the stirng literal
if (grammar instanceof LexerGrammar && !((LexerGrammar)grammar).caseSensitive) {
for (int i = 1; i < lit.getText().length() - 1; i++) {
char c = lit.getText().charAt(i);
if (c < 128 && Character.toLowerCase(c) != c) {
tool.warning("Characters of string literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
break;
}
}
}
addElementToCurrentAlt(sl);
labelElement(sl, label);
// if ignore option is set, must add an optional call to the specified rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
}
}
public void refToken(Token idAssign, Token t, Token label, Token args,
boolean inverted, int autoGenType, boolean lastInRule) {
if (grammar instanceof LexerGrammar) {
// In lexer, token references are really rule references
if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
}
if (inverted) {
tool.error("~TOKEN is not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
}
refRule(idAssign, t, label, args, autoGenType);
// if ignore option is set, must add an optional call to the specified token rule.
String ignore = ruleBlock.getIgnoreRule();
if (!lastInRule && ignore != null) {
addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
}
}
else {
// Cannot have token ref args or assignment outside of lexer
if (idAssign != null) {
tool.error("Assignment from token reference only allowed in lexer", grammar.getFilename(), idAssign.getLine(), idAssign.getColumn());
}
if (args != null) {
tool.error("Token reference arguments only allowed in lexer", grammar.getFilename(), args.getLine(), args.getColumn());
}
super.refToken(idAssign, t, label, args, inverted, autoGenType, lastInRule);
TokenRefElement te = new TokenRefElement(grammar, t, inverted, autoGenType);
addElementToCurrentAlt(te);
labelElement(te, label);
}
}
public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
if (grammar instanceof LexerGrammar) {
tool.error("Token range not allowed in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
super.refTokenRange(t1, t2, label, autoGenType, lastInRule);
TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2, autoGenType);
if (tr.end < tr.begin) {
tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
return;
}
addElementToCurrentAlt(tr);
labelElement(tr, label);
}
public void refTreeSpecifier(Token treeSpec) {
context().currentAlt().treeSpecifier = treeSpec;
}
public void refWildcard(Token t, Token label, int autoGenType) {
super.refWildcard(t, label, autoGenType);
WildcardElement wc = new WildcardElement(grammar, t, autoGenType);
addElementToCurrentAlt(wc);
labelElement(wc, label);
}
/** Get ready to process a new grammar */
public void reset() {
super.reset();
blocks = new LList();
lastRuleRef = null;
ruleEnd = null;
ruleBlock = null;
nested = 0;
currentExceptionSpec = null;
grammarError = false;
}
public void setArgOfRuleRef(Token argAction) {
super.setArgOfRuleRef(argAction);
lastRuleRef.setArgs(argAction.getText());
}
public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
b.setAlternatives(src.getAlternatives());
b.initAction = src.initAction;
//b.lookaheadDepth = src.lookaheadDepth;
b.label = src.label;
b.hasASynPred = src.hasASynPred;
b.hasAnAction = src.hasAnAction;
b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
b.generateAmbigWarnings = src.generateAmbigWarnings;
b.line = src.line;
b.greedy = src.greedy;
b.greedySet = src.greedySet;
}
public void setRuleOption(Token key, Token value) {
//((RuleBlock)context().block).setOption(key, value);
ruleBlock.setOption(key, value);
}
public void setSubruleOption(Token key, Token value) {
((AlternativeBlock)context().block).setOption(key, value);
}
public void synPred() {
if (context().block.not) {
tool.error("'~' cannot be applied to syntactic predicate", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// create the right kind of object now that we know what that is
// and switch the list of alternatives. Adjust the stack of blocks.
// copy any init action also.
SynPredBlock b = new SynPredBlock(grammar);
setBlock(b, context().block);
BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
blocks.push(new BlockContext());
context().block = b;
context().blockEnd = old.blockEnd;
context().blockEnd.block = b;
}
public void zeroOrMoreSubRule() {
if (context().block.not) {
tool.error("'~' cannot be applied to (...)+ subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
}
// create the right kind of object now that we know what that is
// and switch the list of alternatives. Adjust the stack of blocks.
// copy any init action also.
ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
setBlock(b, context().block);
BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
blocks.push(new BlockContext());
context().block = b;
context().blockEnd = old.blockEnd;
context().blockEnd.block = b;
}
}

146
fine-antlr-old/src/main/java/com/fr/third/antlr/MismatchedCharException.java

@ -1,146 +0,0 @@
package com.fr.third.antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.cs.usfca.edu
* Software rights: http://www.antlr.org/license.html
*
* $Id: //depot/code/org.antlr/release/antlr-2.7.7/antlr/MismatchedCharException.java#2 $
*/
import com.fr.third.antlr.collections.impl.BitSet;
public class MismatchedCharException extends RecognitionException {
// Types of chars
public static final int CHAR = 1;
public static final int NOT_CHAR = 2;
public static final int RANGE = 3;
public static final int NOT_RANGE = 4;
public static final int SET = 5;
public static final int NOT_SET = 6;
// One of the above
public int mismatchType;
// what was found on the input stream
public int foundChar;
// For CHAR/NOT_CHAR and RANGE/NOT_RANGE
public int expecting;
// For RANGE/NOT_RANGE (expecting is lower bound of range)
public int upper;
// For SET/NOT_SET
public BitSet set;
// who knows...they may want to ask scanner questions
public CharScanner scanner;
/**
* MismatchedCharException constructor comment.
*/
public MismatchedCharException() {
super("Mismatched char");
}
// Expected range / not range
public MismatchedCharException(char c, char lower, char upper_, boolean matchNot, CharScanner scanner_) {
super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
mismatchType = matchNot ? NOT_RANGE : RANGE;
foundChar = c;
expecting = lower;
upper = upper_;
scanner = scanner_;
}
// Expected token / not token
public MismatchedCharException(char c, char expecting_, boolean matchNot, CharScanner scanner_) {
super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
mismatchType = matchNot ? NOT_CHAR : CHAR;
foundChar = c;
expecting = expecting_;
scanner = scanner_;
}
// Expected BitSet / not BitSet
public MismatchedCharException(char c, BitSet set_, boolean matchNot, CharScanner scanner_) {
super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
mismatchType = matchNot ? NOT_SET : SET;
foundChar = c;
set = set_;
scanner = scanner_;
}
/**
* Returns a clean error message (no line number/column information)
*/
public String getMessage() {
StringBuffer sb = new StringBuffer();
switch (mismatchType) {
case CHAR:
sb.append("expecting "); appendCharName(sb, expecting);
sb.append(", found "); appendCharName(sb, foundChar);
break;
case NOT_CHAR:
sb.append("expecting anything but '");
appendCharName(sb, expecting);
sb.append("'; got it anyway");
break;
case RANGE:
case NOT_RANGE:
sb.append("expecting token ");
if (mismatchType == NOT_RANGE)
sb.append("NOT ");
sb.append("in range: ");
appendCharName(sb, expecting);
sb.append("..");
appendCharName(sb, upper);
sb.append(", found ");
appendCharName(sb, foundChar);
break;
case SET:
case NOT_SET:
sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of (");
int[] elems = set.toArray();
for (int i = 0; i < elems.length; i++) {
appendCharName(sb, elems[i]);
}
sb.append("), found ");
appendCharName(sb, foundChar);
break;
default :
sb.append(super.getMessage());
break;
}
return sb.toString();
}
/** Append a char to the msg buffer. If special,
* then show escaped version
*/
private void appendCharName(StringBuffer sb, int c) {
switch (c) {
case 65535 :
// 65535 = (char) -1 = EOF
sb.append("'<EOF>'");
break;
case '\n' :
sb.append("'\\n'");
break;
case '\r' :
sb.append("'\\r'");
break;
case '\t' :
sb.append("'\\t'");
break;
default :
sb.append('\'');
sb.append((char) c);
sb.append('\'');
break;
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save