Add full source for antlr project (version 3.4)
This replaces the existing source for an unknown version of the antlr-runtime
with the full source for the antlr tool. However, we are still building
just the runtime jar, not the full tool.
The full tool will be included as a prebuilt jar, due to the complexities
of building this tool.
Since we will have both the full tool and the runtime jar in the Android tree,
the module name for the runtime jar has been changed from "antlr" to
"antlr-runtime"
Change-Id: I38d5f3e5e82392dc122f46bf7961aab5b42e40c5
Signed-off-by: Ben Gruver <[email protected]>
diff --git a/antlr-3.4/runtime/Python/tests/t001lexer.g b/antlr-3.4/runtime/Python/tests/t001lexer.g
new file mode 100644
index 0000000..f92b958
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t001lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t001lexer;
+options {
+ language = Python;
+}
+
+ZERO: '0';
diff --git a/antlr-3.4/runtime/Python/tests/t001lexer.py b/antlr-3.4/runtime/Python/tests/t001lexer.py
new file mode 100644
index 0000000..3228235
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t001lexer.py
@@ -0,0 +1,57 @@
+import antlr3
+import testbase
+import unittest
+
+class t001lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('0')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.ZERO)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.EOF)
+
+
+ def testIteratorInterface(self):
+ stream = antlr3.StringStream('0')
+ lexer = self.getLexer(stream)
+
+ types = [token.type for token in lexer]
+
+ self.failUnlessEqual(types, [self.lexerModule.ZERO])
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('1')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ self.fail()
+
+ except antlr3.MismatchedTokenException, exc:
+ self.failUnlessEqual(exc.expecting, '0')
+ self.failUnlessEqual(exc.unexpectedType, '1')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t002lexer.g b/antlr-3.4/runtime/Python/tests/t002lexer.g
new file mode 100644
index 0000000..53b67a9
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t002lexer.g
@@ -0,0 +1,7 @@
+lexer grammar t002lexer;
+options {
+ language = Python;
+}
+
+ZERO: '0';
+ONE: '1';
diff --git a/antlr-3.4/runtime/Python/tests/t002lexer.py b/antlr-3.4/runtime/Python/tests/t002lexer.py
new file mode 100644
index 0000000..c2c03ba
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t002lexer.py
@@ -0,0 +1,50 @@
+import antlr3
+import testbase
+import unittest
+
+class t002lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('01')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.ZERO)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.ONE)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.EOF)
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('2')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ self.fail()
+
+ except antlr3.NoViableAltException, exc:
+ self.failUnlessEqual(exc.unexpectedType, '2')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t003lexer.g b/antlr-3.4/runtime/Python/tests/t003lexer.g
new file mode 100644
index 0000000..0e85e11
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t003lexer.g
@@ -0,0 +1,8 @@
+lexer grammar t003lexer;
+options {
+ language = Python;
+}
+
+ZERO: '0';
+ONE: '1';
+FOOZE: 'fooze';
diff --git a/antlr-3.4/runtime/Python/tests/t003lexer.py b/antlr-3.4/runtime/Python/tests/t003lexer.py
new file mode 100644
index 0000000..3a32955
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t003lexer.py
@@ -0,0 +1,53 @@
+import antlr3
+import testbase
+import unittest
+
+class t003lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('0fooze1')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.ZERO)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.FOOZE)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.ONE)
+
+ token = lexer.nextToken()
+ self.failUnlessEqual(token.type, self.lexerModule.EOF)
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('2')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ self.fail()
+
+ except antlr3.NoViableAltException, exc:
+ self.failUnlessEqual(exc.unexpectedType, '2')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t004lexer.g b/antlr-3.4/runtime/Python/tests/t004lexer.g
new file mode 100644
index 0000000..c39d10d
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t004lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t004lexer;
+options {
+ language = Python;
+}
+
+FOO: 'f' 'o'*;
diff --git a/antlr-3.4/runtime/Python/tests/t004lexer.py b/antlr-3.4/runtime/Python/tests/t004lexer.py
new file mode 100644
index 0000000..52b444c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t004lexer.py
@@ -0,0 +1,70 @@
+import antlr3
+import testbase
+import unittest
+
+class t004lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('ffofoofooo')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 0, token.start
+ assert token.stop == 0, token.stop
+ assert token.text == 'f', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 1, token.start
+ assert token.stop == 2, token.stop
+ assert token.text == 'fo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 3, token.start
+ assert token.stop == 5, token.stop
+ assert token.text == 'foo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 6, token.start
+ assert token.stop == 9, token.stop
+ assert token.text == 'fooo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('2')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ self.fail()
+
+ except antlr3.MismatchedTokenException, exc:
+ self.failUnlessEqual(exc.expecting, 'f')
+ self.failUnlessEqual(exc.unexpectedType, '2')
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t005lexer.g b/antlr-3.4/runtime/Python/tests/t005lexer.g
new file mode 100644
index 0000000..f9cc681
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t005lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t005lexer;
+options {
+ language = Python;
+}
+
+FOO: 'f' 'o'+;
diff --git a/antlr-3.4/runtime/Python/tests/t005lexer.py b/antlr-3.4/runtime/Python/tests/t005lexer.py
new file mode 100644
index 0000000..667083e
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t005lexer.py
@@ -0,0 +1,75 @@
+import antlr3
+import testbase
+import unittest
+
+class t005lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('fofoofooo')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 0, token.start
+ assert token.stop == 1, token.stop
+ assert token.text == 'fo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 2, token.start
+ assert token.stop == 4, token.stop
+ assert token.text == 'foo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 5, token.start
+ assert token.stop == 8, token.stop
+ assert token.text == 'fooo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput1(self):
+ stream = antlr3.StringStream('2')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ raise AssertionError
+
+ except antlr3.MismatchedTokenException, exc:
+ assert exc.expecting == 'f', repr(exc.expecting)
+ assert exc.unexpectedType == '2', repr(exc.unexpectedType)
+
+
+ def testMalformedInput2(self):
+ stream = antlr3.StringStream('f')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ raise AssertionError
+
+ except antlr3.EarlyExitException, exc:
+ assert exc.unexpectedType == antlr3.EOF, repr(exc.unexpectedType)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t006lexer.g b/antlr-3.4/runtime/Python/tests/t006lexer.g
new file mode 100644
index 0000000..ad93cb4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t006lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t006lexer;
+options {
+ language = Python;
+}
+
+FOO: 'f' ('o' | 'a')*;
diff --git a/antlr-3.4/runtime/Python/tests/t006lexer.py b/antlr-3.4/runtime/Python/tests/t006lexer.py
new file mode 100644
index 0000000..a4f845b
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t006lexer.py
@@ -0,0 +1,61 @@
+import antlr3
+import testbase
+import unittest
+
+class t006lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('fofaaooa')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 0, token.start
+ assert token.stop == 1, token.stop
+ assert token.text == 'fo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 2, token.start
+ assert token.stop == 7, token.stop
+ assert token.text == 'faaooa', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('fofoaooaoa2')
+ lexer = self.getLexer(stream)
+
+ lexer.nextToken()
+ lexer.nextToken()
+ try:
+ token = lexer.nextToken()
+ raise AssertionError, token
+
+ except antlr3.MismatchedTokenException, exc:
+ assert exc.expecting == 'f', repr(exc.expecting)
+ assert exc.unexpectedType == '2', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 10, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t007lexer.g b/antlr-3.4/runtime/Python/tests/t007lexer.g
new file mode 100644
index 0000000..b5651d5
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t007lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t007lexer;
+options {
+ language = Python;
+}
+
+FOO: 'f' ('o' | 'a' 'b'+)*;
diff --git a/antlr-3.4/runtime/Python/tests/t007lexer.py b/antlr-3.4/runtime/Python/tests/t007lexer.py
new file mode 100644
index 0000000..440657b
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t007lexer.py
@@ -0,0 +1,59 @@
+import antlr3
+import testbase
+import unittest
+
+class t007lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('fofababbooabb')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 0, token.start
+ assert token.stop == 1, token.stop
+ assert token.text == 'fo', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 2, token.start
+ assert token.stop == 12, token.stop
+ assert token.text == 'fababbooabb', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('foaboao')
+ lexer = self.getLexer(stream)
+
+ try:
+ token = lexer.nextToken()
+ raise AssertionError, token
+
+ except antlr3.EarlyExitException, exc:
+ assert exc.unexpectedType == 'o', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 6, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t008lexer.g b/antlr-3.4/runtime/Python/tests/t008lexer.g
new file mode 100644
index 0000000..5949866
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t008lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t008lexer;
+options {
+ language = Python;
+}
+
+FOO: 'f' 'a'?;
diff --git a/antlr-3.4/runtime/Python/tests/t008lexer.py b/antlr-3.4/runtime/Python/tests/t008lexer.py
new file mode 100644
index 0000000..f62c148
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t008lexer.py
@@ -0,0 +1,66 @@
+import antlr3
+import testbase
+import unittest
+
+class t008lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('ffaf')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 0, token.start
+ assert token.stop == 0, token.stop
+ assert token.text == 'f', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 1, token.start
+ assert token.stop == 2, token.stop
+ assert token.text == 'fa', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.FOO
+ assert token.start == 3, token.start
+ assert token.stop == 3, token.stop
+ assert token.text == 'f', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('fafb')
+ lexer = self.getLexer(stream)
+
+ lexer.nextToken()
+ lexer.nextToken()
+ try:
+ token = lexer.nextToken()
+ raise AssertionError, token
+
+ except antlr3.MismatchedTokenException, exc:
+ assert exc.unexpectedType == 'b', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 3, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t009lexer.g b/antlr-3.4/runtime/Python/tests/t009lexer.g
new file mode 100644
index 0000000..6126908
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t009lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t009lexer;
+options {
+ language = Python;
+}
+
+DIGIT: '0' .. '9';
diff --git a/antlr-3.4/runtime/Python/tests/t009lexer.py b/antlr-3.4/runtime/Python/tests/t009lexer.py
new file mode 100644
index 0000000..c32cbbf
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t009lexer.py
@@ -0,0 +1,67 @@
+import antlr3
+import testbase
+import unittest
+
+class t009lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('085')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.DIGIT
+ assert token.start == 0, token.start
+ assert token.stop == 0, token.stop
+ assert token.text == '0', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.DIGIT
+ assert token.start == 1, token.start
+ assert token.stop == 1, token.stop
+ assert token.text == '8', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.DIGIT
+ assert token.start == 2, token.start
+ assert token.stop == 2, token.stop
+ assert token.text == '5', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('2a')
+ lexer = self.getLexer(stream)
+
+ lexer.nextToken()
+ try:
+ token = lexer.nextToken()
+ raise AssertionError, token
+
+ except antlr3.MismatchedSetException, exc:
+ # TODO: This should provide more useful information
+ assert exc.expecting is None
+ assert exc.unexpectedType == 'a', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 1, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t010lexer.g b/antlr-3.4/runtime/Python/tests/t010lexer.g
new file mode 100644
index 0000000..a93636c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t010lexer.g
@@ -0,0 +1,7 @@
+lexer grammar t010lexer;
+options {
+ language = Python;
+}
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
+WS: (' ' | '\n')+;
diff --git a/antlr-3.4/runtime/Python/tests/t010lexer.py b/antlr-3.4/runtime/Python/tests/t010lexer.py
new file mode 100644
index 0000000..7cd318c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t010lexer.py
@@ -0,0 +1,78 @@
+import antlr3
+import testbase
+import unittest
+
+class t010lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.IDENTIFIER
+ assert token.start == 0, token.start
+ assert token.stop == 5, token.stop
+ assert token.text == 'foobar', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.WS
+ assert token.start == 6, token.start
+ assert token.stop == 6, token.stop
+ assert token.text == ' ', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.IDENTIFIER
+ assert token.start == 7, token.start
+ assert token.stop == 11, token.stop
+ assert token.text == '_Ab98', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.WS
+ assert token.start == 12, token.start
+ assert token.stop == 14, token.stop
+ assert token.text == ' \n ', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.IDENTIFIER
+ assert token.start == 15, token.start
+ assert token.stop == 20, token.stop
+ assert token.text == 'A12sdf', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('a-b')
+ lexer = self.getLexer(stream)
+
+ lexer.nextToken()
+ try:
+ token = lexer.nextToken()
+ raise AssertionError, token
+
+ except antlr3.NoViableAltException, exc:
+ assert exc.unexpectedType == '-', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 1, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t011lexer.g b/antlr-3.4/runtime/Python/tests/t011lexer.g
new file mode 100644
index 0000000..fde9a3b
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t011lexer.g
@@ -0,0 +1,19 @@
+lexer grammar t011lexer;
+options {
+ language = Python;
+}
+
+IDENTIFIER:
+ ('a'..'z'|'A'..'Z'|'_')
+ ('a'..'z'
+ |'A'..'Z'
+ |'0'..'9'
+ |'_'
+ {
+ print "Underscore"
+ print "foo"
+ }
+ )*
+ ;
+
+WS: (' ' | '\n')+;
diff --git a/antlr-3.4/runtime/Python/tests/t011lexer.py b/antlr-3.4/runtime/Python/tests/t011lexer.py
new file mode 100644
index 0000000..7014255
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t011lexer.py
@@ -0,0 +1,78 @@
+import antlr3
+import testbase
+import unittest
+
+class t011lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+ lexer = self.getLexer(stream)
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.IDENTIFIER
+ assert token.start == 0, token.start
+ assert token.stop == 5, token.stop
+ assert token.text == 'foobar', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.WS
+ assert token.start == 6, token.start
+ assert token.stop == 6, token.stop
+ assert token.text == ' ', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.IDENTIFIER
+ assert token.start == 7, token.start
+ assert token.stop == 11, token.stop
+ assert token.text == '_Ab98', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.WS
+ assert token.start == 12, token.start
+ assert token.stop == 14, token.stop
+ assert token.text == ' \n ', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.IDENTIFIER
+ assert token.start == 15, token.start
+ assert token.stop == 20, token.stop
+ assert token.text == 'A12sdf', token.text
+
+ token = lexer.nextToken()
+ assert token.type == self.lexerModule.EOF
+
+
+ def testMalformedInput(self):
+ stream = antlr3.StringStream('a-b')
+ lexer = self.getLexer(stream)
+
+ lexer.nextToken()
+ try:
+ token = lexer.nextToken()
+ raise AssertionError, token
+
+ except antlr3.NoViableAltException, exc:
+ assert exc.unexpectedType == '-', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 1, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.input b/antlr-3.4/runtime/Python/tests/t012lexerXML.input
new file mode 100644
index 0000000..1815a9f
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t012lexerXML.input
@@ -0,0 +1,21 @@
+<?xml version='1.0'?>
+<!DOCTYPE component [
+<!ELEMENT component (PCDATA|sub)*>
+<!ATTLIST component
+ attr CDATA #IMPLIED
+ attr2 CDATA #IMPLIED
+>
+<!ELMENT sub EMPTY>
+
+]>
+<component attr="val'ue" attr2='val"ue'>
+<!-- This is a comment -->
+Text
+<![CDATA[huhu]]>
+öäüß
+&
+<
+<?xtal cursor='11'?>
+<sub/>
+<sub></sub>
+</component>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.output b/antlr-3.4/runtime/Python/tests/t012lexerXML.output
new file mode 100644
index 0000000..825c37f
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t012lexerXML.output
@@ -0,0 +1,39 @@
+XML declaration
+Attr: version='1.0'
+ROOTELEMENT: component
+INTERNAL DTD: [
+<!ELEMENT component (PCDATA|sub)*>
+<!ATTLIST component
+ attr CDATA #IMPLIED
+ attr2 CDATA #IMPLIED
+>
+<!ELMENT sub EMPTY>
+
+]
+Start Tag: component
+Attr: attr="val'ue"
+Attr: attr2='val"ue'
+PCDATA: "
+"
+Comment: "<!-- This is a comment -->"
+PCDATA: "
+Text
+"
+CDATA: "<![CDATA[huhu]]>"
+PCDATA: "
+öäüß
+&
+<
+"
+PI: xtal
+Attr: cursor='11'
+PCDATA: "
+"
+Empty Element: sub
+PCDATA: "
+"
+Start Tag: sub
+End Tag: sub
+PCDATA: "
+"
+End Tag: component
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.py b/antlr-3.4/runtime/Python/tests/t012lexerXML.py
new file mode 100644
index 0000000..3e8f8b4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t012lexerXML.py
@@ -0,0 +1,189 @@
+import antlr3
+import testbase
+import unittest
+import os
+import sys
+from cStringIO import StringIO
+import difflib
+import textwrap
+
+class t012lexerXML(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar('t012lexerXMLLexer.g')
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TLexer
+
+
+ def testValid(self):
+ inputPath = os.path.splitext(__file__)[0] + '.input'
+ stream = antlr3.StringStream(unicode(open(inputPath).read(), 'utf-8'))
+ lexer = self.getLexer(stream)
+
+ while True:
+ token = lexer.nextToken()
+ if token.type == self.lexerModule.EOF:
+ break
+
+
+ output = unicode(lexer.outbuf.getvalue(), 'utf-8')
+
+ outputPath = os.path.splitext(__file__)[0] + '.output'
+ testOutput = unicode(open(outputPath).read(), 'utf-8')
+
+ success = (output == testOutput)
+ if not success:
+ d = difflib.Differ()
+ r = d.compare(output.splitlines(1), testOutput.splitlines(1))
+ self.fail(
+ ''.join([l.encode('ascii', 'backslashreplace') for l in r])
+ )
+
+
+ def testMalformedInput1(self):
+ input = textwrap.dedent("""\
+ <?xml version='1.0'?>
+ <document d>
+ </document>
+ """)
+
+ stream = antlr3.StringStream(input)
+ lexer = self.getLexer(stream)
+
+ try:
+ while True:
+ token = lexer.nextToken()
+ if token.type == antlr3.EOF:
+ break
+
+ raise AssertionError
+
+ except antlr3.NoViableAltException, exc:
+ assert exc.unexpectedType == '>', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 11, repr(exc.charPositionInLine)
+ assert exc.line == 2, repr(exc.line)
+
+
+ def testMalformedInput2(self):
+ input = textwrap.dedent("""\
+ <?tml version='1.0'?>
+ <document>
+ </document>
+ """)
+
+ stream = antlr3.StringStream(input)
+ lexer = self.getLexer(stream)
+
+ try:
+ while True:
+ token = lexer.nextToken()
+ if token.type == antlr3.EOF:
+ break
+
+ raise AssertionError
+
+ except antlr3.MismatchedSetException, exc:
+ assert exc.unexpectedType == 't', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 2, repr(exc.charPositionInLine)
+ assert exc.line == 1, repr(exc.line)
+
+
+ def testMalformedInput3(self):
+ input = textwrap.dedent("""\
+ <?xml version='1.0'?>
+ <docu ment attr="foo">
+ </document>
+ """)
+
+ stream = antlr3.StringStream(input)
+ lexer = self.getLexer(stream)
+
+ try:
+ while True:
+ token = lexer.nextToken()
+ if token.type == antlr3.EOF:
+ break
+
+ raise AssertionError
+
+ except antlr3.NoViableAltException, exc:
+ assert exc.unexpectedType == 'a', repr(exc.unexpectedType)
+ assert exc.charPositionInLine == 11, repr(exc.charPositionInLine)
+ assert exc.line == 2, repr(exc.line)
+
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+## # run an infinite loop with randomly mangled input
+## while True:
+## print "ping"
+
+## input = """\
+## <?xml version='1.0'?>
+## <!DOCTYPE component [
+## <!ELEMENT component (PCDATA|sub)*>
+## <!ATTLIST component
+## attr CDATA #IMPLIED
+## attr2 CDATA #IMPLIED
+## >
+## <!ELMENT sub EMPTY>
+
+## ]>
+## <component attr="val'ue" attr2='val"ue'>
+## <!-- This is a comment -->
+## Text
+## <![CDATA[huhu]]>
+## &
+## <
+## <?xtal cursor='11'?>
+## <sub/>
+## <sub></sub>
+## </component>
+## """
+
+## import random
+## input = list(input) # make it mutable
+## for _ in range(3):
+## p1 = random.randrange(len(input))
+## p2 = random.randrange(len(input))
+
+## c1 = input[p1]
+## input[p1] = input[p2]
+## input[p2] = c1
+## input = ''.join(input) # back to string
+
+## stream = antlr3.StringStream(input)
+## lexer = Lexer(stream)
+
+## try:
+## while True:
+## token = lexer.nextToken()
+## if token.type == EOF:
+## break
+
+## except antlr3.RecognitionException, exc:
+## print exc
+## for l in input.splitlines()[0:exc.line]:
+## print l
+## print ' '*exc.charPositionInLine + '^'
+
+## except BaseException, exc:
+## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())])
+## print "%s at %d:%d" % (exc, stream.line, stream.charPositionInLine)
+## print
+
+## raise
+
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXMLLexer.g b/antlr-3.4/runtime/Python/tests/t012lexerXMLLexer.g
new file mode 100644
index 0000000..31fa203
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t012lexerXMLLexer.g
@@ -0,0 +1,132 @@
+lexer grammar t012lexerXMLLexer;
+options {
+ language = Python;
+}
+
+@header {
+from cStringIO import StringIO
+}
+
+@lexer::init {
+self.outbuf = StringIO()
+}
+
+@lexer::members {
+def output(self, line):
+ self.outbuf.write(line.encode('utf-8') + "\n")
+}
+
+DOCUMENT
+ : XMLDECL? WS? DOCTYPE? WS? ELEMENT WS?
+ ;
+
+fragment DOCTYPE
+ :
+ '<!DOCTYPE' WS rootElementName=GENERIC_ID
+ {self.output("ROOTELEMENT: "+rootElementName.text)}
+ WS
+ (
+ ( 'SYSTEM' WS sys1=VALUE
+ {self.output("SYSTEM: "+sys1.text)}
+
+ | 'PUBLIC' WS pub=VALUE WS sys2=VALUE
+ {self.output("PUBLIC: "+pub.text)}
+ {self.output("SYSTEM: "+sys2.text)}
+ )
+ ( WS )?
+ )?
+ ( dtd=INTERNAL_DTD
+ {self.output("INTERNAL DTD: "+dtd.text)}
+ )?
+ '>'
+ ;
+
+fragment INTERNAL_DTD : '[' (options {greedy=false;} : .)* ']' ;
+
+fragment PI :
+ '<?' target=GENERIC_ID WS?
+ {self.output("PI: "+target.text)}
+ ( ATTRIBUTE WS? )* '?>'
+ ;
+
+fragment XMLDECL :
+ '<?' ('x'|'X') ('m'|'M') ('l'|'L') WS?
+ {self.output("XML declaration")}
+ ( ATTRIBUTE WS? )* '?>'
+ ;
+
+
+fragment ELEMENT
+ : ( START_TAG
+ (ELEMENT
+ | t=PCDATA
+ {self.output("PCDATA: \""+$t.text+"\"")}
+ | t=CDATA
+ {self.output("CDATA: \""+$t.text+"\"")}
+ | t=COMMENT
+ {self.output("Comment: \""+$t.text+"\"")}
+ | pi=PI
+ )*
+ END_TAG
+ | EMPTY_ELEMENT
+ )
+ ;
+
+fragment START_TAG
+ : '<' WS? name=GENERIC_ID WS?
+ {self.output("Start Tag: "+name.text)}
+ ( ATTRIBUTE WS? )* '>'
+ ;
+
+fragment EMPTY_ELEMENT
+ : '<' WS? name=GENERIC_ID WS?
+ {self.output("Empty Element: "+name.text)}
+ ( ATTRIBUTE WS? )* '/>'
+ ;
+
+fragment ATTRIBUTE
+ : name=GENERIC_ID WS? '=' WS? value=VALUE
+ {self.output("Attr: "+name.text+"="+value.text)}
+ ;
+
+fragment END_TAG
+ : '</' WS? name=GENERIC_ID WS? '>'
+ {self.output("End Tag: "+name.text)}
+ ;
+
+fragment COMMENT
+ : '<!--' (options {greedy=false;} : .)* '-->'
+ ;
+
+fragment CDATA
+ : '<![CDATA[' (options {greedy=false;} : .)* ']]>'
+ ;
+
+fragment PCDATA : (~'<')+ ;
+
+fragment VALUE :
+ ( '\"' (~'\"')* '\"'
+ | '\'' (~'\'')* '\''
+ )
+ ;
+
+fragment GENERIC_ID
+ : ( LETTER | '_' | ':')
+ ( options {greedy=true;} : LETTER | '0'..'9' | '.' | '-' | '_' | ':' )*
+ ;
+
+fragment LETTER
+ : 'a'..'z'
+ | 'A'..'Z'
+ ;
+
+fragment WS :
+ ( ' '
+ | '\t'
+ | ( '\n'
+ | '\r\n'
+ | '\r'
+ )
+ )+
+ ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t013parser.g b/antlr-3.4/runtime/Python/tests/t013parser.g
new file mode 100644
index 0000000..c3ab2c9
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t013parser.g
@@ -0,0 +1,23 @@
+grammar t013parser;
+options {
+ language = Python;
+}
+
+@parser::init {
+self.identifiers = []
+self.reportedErrors = []
+}
+
+@parser::members {
+def foundIdentifier(self, name):
+ self.identifiers.append(name)
+
+def emitErrorMessage(self, msg):
+ self.reportedErrors.append(msg)
+}
+
+document:
+ t=IDENTIFIER {self.foundIdentifier($t.text)}
+ ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
diff --git a/antlr-3.4/runtime/Python/tests/t013parser.py b/antlr-3.4/runtime/Python/tests/t013parser.py
new file mode 100644
index 0000000..1c21d5e
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t013parser.py
@@ -0,0 +1,35 @@
+import antlr3
+import testbase
+import unittest
+
+class t013parser(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.document()
+
+ assert len(parser.reportedErrors) == 0, parser.reportedErrors
+ assert parser.identifiers == ['foobar']
+
+
+ def testMalformedInput1(self):
+ cStream = antlr3.StringStream('')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ parser.document()
+
+ # FIXME: currently strings with formatted errors are collected
+ # can't check error locations yet
+ assert len(parser.reportedErrors) == 1, parser.reportedErrors
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t014parser.g b/antlr-3.4/runtime/Python/tests/t014parser.g
new file mode 100644
index 0000000..4c8238f
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t014parser.g
@@ -0,0 +1,35 @@
+grammar t014parser;
+options {
+ language = Python;
+}
+
+@parser::init {
+self.events = []
+self.reportedErrors = []
+}
+
+@parser::members {
+def emitErrorMessage(self, msg):
+ self.reportedErrors.append(msg)
+}
+
+
+document:
+ ( declaration
+ | call
+ )*
+ EOF
+ ;
+
+declaration:
+ 'var' t=IDENTIFIER ';'
+ {self.events.append(('decl', $t.text))}
+ ;
+
+call:
+ t=IDENTIFIER '(' ')' ';'
+ {self.events.append(('call', $t.text))}
+ ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
+WS: (' '|'\r'|'\t'|'\n') {$channel=HIDDEN;};
diff --git a/antlr-3.4/runtime/Python/tests/t014parser.py b/antlr-3.4/runtime/Python/tests/t014parser.py
new file mode 100644
index 0000000..e2965a4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t014parser.py
@@ -0,0 +1,74 @@
+import antlr3
+import testbase
+import unittest
+
+class t014parser(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid(self):
+ cStream = antlr3.StringStream('var foobar; gnarz(); var blupp; flupp ( ) ;')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.document()
+
+ assert len(parser.reportedErrors) == 0, parser.reportedErrors
+ assert parser.events == [
+ ('decl', 'foobar'),
+ ('call', 'gnarz'),
+ ('decl', 'blupp'),
+ ('call', 'flupp')
+ ], parser.events
+
+
+ def testMalformedInput1(self):
+ cStream = antlr3.StringStream('var; foo();')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ parser.document()
+
+ # FIXME: currently strings with formatted errors are collected
+ # can't check error locations yet
+ assert len(parser.reportedErrors) == 1, parser.reportedErrors
+ assert parser.events == [], parser.events
+
+
+ def testMalformedInput2(self):
+ cStream = antlr3.StringStream('var foobar(); gnarz();')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ parser.document()
+
+ # FIXME: currently strings with formatted errors are collected
+ # can't check error locations yet
+ assert len(parser.reportedErrors) == 1, parser.reportedErrors
+ assert parser.events == [
+ ('call', 'gnarz'),
+ ], parser.events
+
+
+ def testMalformedInput3(self):
+ cStream = antlr3.StringStream('gnarz(; flupp();')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ parser.document()
+
+ # FIXME: currently strings with formatted errors are collected
+ # can't check error locations yet
+ assert len(parser.reportedErrors) == 1, parser.reportedErrors
+ assert parser.events == [
+ ('call', 'gnarz'),
+ ('call', 'flupp'),
+ ], parser.events
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t015calc.g b/antlr-3.4/runtime/Python/tests/t015calc.g
new file mode 100644
index 0000000..f08e3ce
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t015calc.g
@@ -0,0 +1,54 @@
+grammar t015calc;
+options {
+ language = Python;
+}
+
+@header {
+import math
+}
+
+@parser::init {
+self.reportedErrors = []
+}
+
+@parser::members {
+def emitErrorMessage(self, msg):
+ self.reportedErrors.append(msg)
+}
+
+evaluate returns [result]: r=expression {result = r};
+
+expression returns [result]: r=mult (
+ '+' r2=mult {r += r2}
+ | '-' r2=mult {r -= r2}
+ )* {result = r};
+
+mult returns [result]: r=log (
+ '*' r2=log {r *= r2}
+ | '/' r2=log {r /= r2}
+// | '%' r2=log {r %= r2}
+ )* {result = r};
+
+log returns [result]: 'ln' r=exp {result = math.log(r)}
+ | r=exp {result = r}
+ ;
+
+exp returns [result]: r=atom ('^' r2=atom {r = math.pow(r,r2)} )? {result = r}
+ ;
+
+atom returns [result]:
+ n=INTEGER {result = int($n.text)}
+ | n=DECIMAL {result = float($n.text)}
+ | '(' r=expression {result = r} ')'
+ | 'PI' {result = math.pi}
+ | 'E' {result = math.e}
+ ;
+
+INTEGER: DIGIT+;
+
+DECIMAL: DIGIT+ '.' DIGIT+;
+
+fragment
+DIGIT: '0'..'9';
+
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN};
diff --git a/antlr-3.4/runtime/Python/tests/t015calc.py b/antlr-3.4/runtime/Python/tests/t015calc.py
new file mode 100644
index 0000000..0f1fe8a
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t015calc.py
@@ -0,0 +1,46 @@
+import antlr3
+import testbase
+import unittest
+
+class t015calc(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def _evaluate(self, expr, expected, errors=[]):
+ cStream = antlr3.StringStream(expr)
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ result = parser.evaluate()
+ assert result == expected, "%r != %r" % (result, expected)
+ assert len(parser.reportedErrors) == len(errors), parser.reportedErrors
+
+
+ def testValid01(self):
+ self._evaluate("1 + 2", 3)
+
+
+ def testValid02(self):
+ self._evaluate("1 + 2 * 3", 7)
+
+
+ def testValid03(self):
+ self._evaluate("10 / 2", 5)
+
+
+ def testValid04(self):
+ self._evaluate("6 + 2*(3+1) - 4", 10)
+
+
+ def testMalformedInput(self):
+ self._evaluate("6 - (2*1", 4, ["mismatched token at pos 8"])
+
+ # FIXME: most parse errors result in TypeErrors in action code, because
+ # rules return None, which is then added/multiplied... to integers.
+ # evaluate("6 - foo 2", 4, ["some error"])
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t016actions.g b/antlr-3.4/runtime/Python/tests/t016actions.g
new file mode 100644
index 0000000..1b7ac65
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t016actions.g
@@ -0,0 +1,31 @@
+grammar t016actions;
+options {
+ language = Python;
+}
+
+declaration returns [name]
+ : functionHeader ';'
+ {$name = $functionHeader.name}
+ ;
+
+functionHeader returns [name]
+ : type ID
+ {$name = $ID.text}
+ ;
+
+type
+ : 'int'
+ | 'char'
+ | 'void'
+ ;
+
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ ;
+
+WS : ( ' '
+ | '\t'
+ | '\r'
+ | '\n'
+ )+
+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t016actions.py b/antlr-3.4/runtime/Python/tests/t016actions.py
new file mode 100644
index 0000000..5e4cad0
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t016actions.py
@@ -0,0 +1,20 @@
+import antlr3
+import testbase
+import unittest
+
+class t016actions(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid(self):
+ cStream = antlr3.StringStream("int foo;")
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ name = parser.declaration()
+ assert name == 'foo', name
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t017parser.g b/antlr-3.4/runtime/Python/tests/t017parser.g
new file mode 100644
index 0000000..84c6b03
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t017parser.g
@@ -0,0 +1,91 @@
+grammar t017parser;
+
+options {
+ language = Python;
+}
+
+program
+ : declaration+
+ ;
+
+declaration
+ : variable
+ | functionHeader ';'
+ | functionHeader block
+ ;
+
+variable
+ : type declarator ';'
+ ;
+
+declarator
+ : ID
+ ;
+
+functionHeader
+ : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+ ;
+
+formalParameter
+ : type declarator
+ ;
+
+type
+ : 'int'
+ | 'char'
+ | 'void'
+ | ID
+ ;
+
+block
+ : '{'
+ variable*
+ stat*
+ '}'
+ ;
+
+stat: forStat
+ | expr ';'
+ | block
+ | assignStat ';'
+ | ';'
+ ;
+
+forStat
+ : 'for' '(' assignStat ';' expr ';' assignStat ')' block
+ ;
+
+assignStat
+ : ID '=' expr
+ ;
+
+expr: condExpr
+ ;
+
+condExpr
+ : aexpr ( ('==' | '<') aexpr )?
+ ;
+
+aexpr
+ : atom ( '+' atom )*
+ ;
+
+atom
+ : ID
+ | INT
+ | '(' expr ')'
+ ;
+
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ ;
+
+INT : ('0'..'9')+
+ ;
+
+WS : ( ' '
+ | '\t'
+ | '\r'
+ | '\n'
+ )+
+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t017parser.py b/antlr-3.4/runtime/Python/tests/t017parser.py
new file mode 100644
index 0000000..5b4d851
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t017parser.py
@@ -0,0 +1,58 @@
+import antlr3
+import testbase
+import unittest
+
+class t017parser(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+ def parserClass(self, base):
+ class TestParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self.reportedErrors = []
+
+
+ def emitErrorMessage(self, msg):
+ self.reportedErrors.append(msg)
+
+ return TestParser
+
+
+ def testValid(self):
+ cStream = antlr3.StringStream("int foo;")
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.program()
+
+ assert len(parser.reportedErrors) == 0, parser.reportedErrors
+
+
+ def testMalformedInput1(self):
+ cStream = antlr3.StringStream('int foo() { 1+2 }')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.program()
+
+ # FIXME: currently strings with formatted errors are collected
+ # can't check error locations yet
+ assert len(parser.reportedErrors) == 1, parser.reportedErrors
+
+
+ def testMalformedInput2(self):
+ cStream = antlr3.StringStream('int foo() { 1+; 1+2 }')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.program()
+
+ # FIXME: currently strings with formatted errors are collected
+ # can't check error locations yet
+ assert len(parser.reportedErrors) == 2, parser.reportedErrors
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.g b/antlr-3.4/runtime/Python/tests/t018llstar.g
new file mode 100644
index 0000000..388ab92
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t018llstar.g
@@ -0,0 +1,111 @@
+grammar t018llstar;
+
+options {
+ language = Python;
+}
+
+@header {
+from cStringIO import StringIO
+}
+
+@init {
+self.output = StringIO()
+}
+
+program
+ : declaration+
+ ;
+
+/** In this rule, the functionHeader left prefix on the last two
+ * alternatives is not LL(k) for a fixed k. However, it is
+ * LL(*). The LL(*) algorithm simply scans ahead until it sees
+ * either the ';' or the '{' of the block and then it picks
+ * the appropriate alternative. Lookhead can be arbitrarily
+ * long in theory, but is <=10 in most cases. Works great.
+ * Use ANTLRWorks to see the lookahead use (step by Location)
+ * and look for blue tokens in the input window pane. :)
+ */
+declaration
+ : variable
+ | functionHeader ';'
+ {self.output.write($functionHeader.name+" is a declaration\n")}
+ | functionHeader block
+ {self.output.write($functionHeader.name+" is a definition\n")}
+ ;
+
+variable
+ : type declarator ';'
+ ;
+
+declarator
+ : ID
+ ;
+
+functionHeader returns [name]
+ : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+ {$name = $ID.text}
+ ;
+
+formalParameter
+ : type declarator
+ ;
+
+type
+ : 'int'
+ | 'char'
+ | 'void'
+ | ID
+ ;
+
+block
+ : '{'
+ variable*
+ stat*
+ '}'
+ ;
+
+stat: forStat
+ | expr ';'
+ | block
+ | assignStat ';'
+ | ';'
+ ;
+
+forStat
+ : 'for' '(' assignStat ';' expr ';' assignStat ')' block
+ ;
+
+assignStat
+ : ID '=' expr
+ ;
+
+expr: condExpr
+ ;
+
+condExpr
+ : aexpr ( ('==' | '<') aexpr )?
+ ;
+
+aexpr
+ : atom ( '+' atom )*
+ ;
+
+atom
+ : ID
+ | INT
+ | '(' expr ')'
+ ;
+
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ ;
+
+INT : ('0'..'9')+
+ ;
+
+WS : ( ' '
+ | '\t'
+ | '\r'
+ | '\n'
+ )+
+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.input b/antlr-3.4/runtime/Python/tests/t018llstar.input
new file mode 100644
index 0000000..1aa5a0d
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t018llstar.input
@@ -0,0 +1,12 @@
+char c;
+int x;
+
+void bar(int x);
+
+int foo(int y, char d) {
+ int i;
+ for (i=0; i<3; i=i+1) {
+ x=3;
+ y=5;
+ }
+}
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.output b/antlr-3.4/runtime/Python/tests/t018llstar.output
new file mode 100644
index 0000000..757c53a
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t018llstar.output
@@ -0,0 +1,2 @@
+bar is a declaration
+foo is a definition
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.py b/antlr-3.4/runtime/Python/tests/t018llstar.py
new file mode 100644
index 0000000..fe67fe2
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t018llstar.py
@@ -0,0 +1,76 @@
+import antlr3
+import testbase
+import unittest
+import os
+import sys
+from cStringIO import StringIO
+import difflib
+
+class t018llstar(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid(self):
+ inputPath = os.path.splitext(__file__)[0] + '.input'
+ cStream = antlr3.StringStream(open(inputPath).read())
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.program()
+
+ output = parser.output.getvalue()
+
+ outputPath = os.path.splitext(__file__)[0] + '.output'
+ testOutput = open(outputPath).read()
+
+ success = (output == testOutput)
+ if not success:
+ d = difflib.Differ()
+ r = d.compare(output.splitlines(1), testOutput.splitlines(1))
+ self.fail(
+ ''.join([l.encode('ascii', 'backslashreplace') for l in r])
+ )
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+
+## # run an infinite loop with randomly mangled input
+## while True:
+## print "ping"
+
+## input = open(inputPath).read()
+
+## import random
+## input = list(input) # make it mutable
+## for _ in range(3):
+## p1 = random.randrange(len(input))
+## p2 = random.randrange(len(input))
+
+## c1 = input[p1]
+## input[p1] = input[p2]
+## input[p2] = c1
+## input = ''.join(input) # back to string
+
+
+## try:
+## cStream = antlr3.StringStream(input)
+## lexer = Lexer(cStream)
+## tStream = antlr3.CommonTokenStream(lexer)
+## parser = TestParser(tStream)
+## parser.program()
+
+## except antlr3.RecognitionException, exc:
+## print exc
+## for l in input.splitlines()[0:exc.line]:
+## print l
+## print ' '*exc.charPositionInLine + '^'
+
+## except BaseException, exc:
+## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())])
+## print "%s at %d:%d" % (exc, cStream.line, cStream.charPositionInLine)
+## print
+
+## raise
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.g b/antlr-3.4/runtime/Python/tests/t019lexer.g
new file mode 100644
index 0000000..3647775
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t019lexer.g
@@ -0,0 +1,64 @@
+lexer grammar t019lexer;
+options {
+ language=Python;
+ filter=true;
+}
+
+IMPORT
+ : 'import' WS name=QIDStar WS? ';'
+ ;
+
+/** Avoids having "return foo;" match as a field */
+RETURN
+ : 'return' (options {greedy=false;}:.)* ';'
+ ;
+
+CLASS
+ : 'class' WS name=ID WS? ('extends' WS QID WS?)?
+ ('implements' WS QID WS? (',' WS? QID WS?)*)? '{'
+ ;
+
+COMMENT
+ : '/*' (options {greedy=false;} : . )* '*/'
+ ;
+
+STRING
+ : '"' (options {greedy=false;}: ESC | .)* '"'
+ ;
+
+CHAR
+ : '\'' (options {greedy=false;}: ESC | .)* '\''
+ ;
+
+WS : (' '|'\t'|'\n')+
+ ;
+
+fragment
+QID : ID ('.' ID)*
+ ;
+
+/** QID cannot see beyond end of token so using QID '.*'? somewhere won't
+ * ever match since k=1 lookahead in the QID loop of '.' will make it loop.
+ * I made this rule to compensate.
+ */
+fragment
+QIDStar
+ : ID ('.' ID)* '.*'?
+ ;
+
+fragment
+TYPE: QID '[]'?
+ ;
+
+fragment
+ARG : TYPE WS ID
+ ;
+
+fragment
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+ ;
+
+fragment
+ESC : '\\' ('"'|'\''|'\\')
+ ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.input b/antlr-3.4/runtime/Python/tests/t019lexer.input
new file mode 100644
index 0000000..d01e1c1
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t019lexer.input
@@ -0,0 +1,13 @@
+import org.antlr.runtime.*;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ for (int i=0; i<args.length; i++) {
+ CharStream input = new ANTLRFileStream(args[i]);
+ FuzzyJava lex = new FuzzyJava(input);
+ TokenStream tokens = new CommonTokenStream(lex);
+ tokens.toString();
+ //System.out.println(tokens);
+ }
+ }
+}
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.py b/antlr-3.4/runtime/Python/tests/t019lexer.py
new file mode 100644
index 0000000..de21d33
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t019lexer.py
@@ -0,0 +1,22 @@
+import os
+import antlr3
+import testbase
+import unittest
+
+class t019lexer(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid(self):
+ inputPath = os.path.splitext(__file__)[0] + '.input'
+ stream = antlr3.StringStream(open(inputPath).read())
+ lexer = self.getLexer(stream)
+
+ while True:
+ token = lexer.nextToken()
+ if token.type == antlr3.EOF:
+ break
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.input b/antlr-3.4/runtime/Python/tests/t020fuzzy.input
new file mode 100644
index 0000000..d01e1c1
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t020fuzzy.input
@@ -0,0 +1,13 @@
+import org.antlr.runtime.*;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ for (int i=0; i<args.length; i++) {
+ CharStream input = new ANTLRFileStream(args[i]);
+ FuzzyJava lex = new FuzzyJava(input);
+ TokenStream tokens = new CommonTokenStream(lex);
+ tokens.toString();
+ //System.out.println(tokens);
+ }
+ }
+}
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.output b/antlr-3.4/runtime/Python/tests/t020fuzzy.output
new file mode 100644
index 0000000..da134f0
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t020fuzzy.output
@@ -0,0 +1,12 @@
+found class Main
+found method main
+found var i
+found var input
+found call ANTLRFileStream
+found var lex
+found call FuzzyJava
+found var tokens
+found call CommonTokenStream
+found call tokens.toString
+found // comment //System.out.println(tokens);
+
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.py b/antlr-3.4/runtime/Python/tests/t020fuzzy.py
new file mode 100644
index 0000000..773aa2e
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t020fuzzy.py
@@ -0,0 +1,40 @@
+import os
+import sys
+import antlr3
+import testbase
+import unittest
+from cStringIO import StringIO
+import difflib
+
+class t020fuzzy(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar('t020fuzzyLexer.g')
+
+
+ def testValid(self):
+ inputPath = os.path.splitext(__file__)[0] + '.input'
+ stream = antlr3.StringStream(open(inputPath).read())
+ lexer = self.getLexer(stream)
+
+ while True:
+ token = lexer.nextToken()
+ if token.type == antlr3.EOF:
+ break
+
+
+ output = lexer.output.getvalue()
+
+ outputPath = os.path.splitext(__file__)[0] + '.output'
+ testOutput = open(outputPath).read()
+
+ success = (output == testOutput)
+ if not success:
+ d = difflib.Differ()
+ r = d.compare(output.splitlines(1), testOutput.splitlines(1))
+ self.fail(
+ ''.join([l.encode('ascii', 'backslashreplace') for l in r])
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzyLexer.g b/antlr-3.4/runtime/Python/tests/t020fuzzyLexer.g
new file mode 100644
index 0000000..819af69
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t020fuzzyLexer.g
@@ -0,0 +1,96 @@
+lexer grammar t020fuzzyLexer;
+options {
+ language=Python;
+ filter=true;
+}
+
+@header {
+from cStringIO import StringIO
+}
+
+@init {
+self.output = StringIO()
+}
+
+IMPORT
+ : 'import' WS name=QIDStar WS? ';'
+ ;
+
+/** Avoids having "return foo;" match as a field */
+RETURN
+ : 'return' (options {greedy=false;}:.)* ';'
+ ;
+
+CLASS
+ : 'class' WS name=ID WS? ('extends' WS QID WS?)?
+ ('implements' WS QID WS? (',' WS? QID WS?)*)? '{'
+ {self.output.write("found class "+$name.text+"\n")}
+ ;
+
+METHOD
+ : TYPE WS name=ID WS? '(' ( ARG WS? (',' WS? ARG WS?)* )? ')' WS?
+ ('throws' WS QID WS? (',' WS? QID WS?)*)? '{'
+ {self.output.write("found method "+$name.text+"\n");}
+ ;
+
+FIELD
+ : TYPE WS name=ID '[]'? WS? (';'|'=')
+ {self.output.write("found var "+$name.text+"\n");}
+ ;
+
+STAT: ('if'|'while'|'switch'|'for') WS? '(' ;
+
+CALL
+ : name=QID WS? '('
+ {self.output.write("found call "+$name.text+"\n");}
+ ;
+
+COMMENT
+ : '/*' (options {greedy=false;} : . )* '*/'
+ {self.output.write("found comment "+self.getText()+"\n");}
+ ;
+
+SL_COMMENT
+ : '//' (options {greedy=false;} : . )* '\n'
+ {self.output.write("found // comment "+self.getText()+"\n");}
+ ;
+
+STRING
+ : '"' (options {greedy=false;}: ESC | .)* '"'
+ ;
+
+CHAR
+ : '\'' (options {greedy=false;}: ESC | .)* '\''
+ ;
+
+WS : (' '|'\t'|'\n')+
+ ;
+
+fragment
+QID : ID ('.' ID)*
+ ;
+
+/** QID cannot see beyond end of token so using QID '.*'? somewhere won't
+ * ever match since k=1 lookahead in the QID loop of '.' will make it loop.
+ * I made this rule to compensate.
+ */
+fragment
+QIDStar
+ : ID ('.' ID)* '.*'?
+ ;
+
+fragment
+TYPE: QID '[]'?
+ ;
+
+fragment
+ARG : TYPE WS ID
+ ;
+
+fragment
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+ ;
+
+fragment
+ESC : '\\' ('"'|'\''|'\\')
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t021hoist.g b/antlr-3.4/runtime/Python/tests/t021hoist.g
new file mode 100644
index 0000000..8caa3ab
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t021hoist.g
@@ -0,0 +1,37 @@
+grammar t021hoist;
+options {
+ language=Python;
+}
+
+/* With this true, enum is seen as a keyword. False, it's an identifier */
+@parser::init {
+self.enableEnum = False
+}
+
+stat returns [enumIs]
+ : identifier {enumIs = "ID"}
+ | enumAsKeyword {enumIs = "keyword"}
+ ;
+
+identifier
+ : ID
+ | enumAsID
+ ;
+
+enumAsKeyword : {self.enableEnum}? 'enum' ;
+
+enumAsID : {not self.enableEnum}? 'enum' ;
+
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ ;
+
+INT : ('0'..'9')+
+ ;
+
+WS : ( ' '
+ | '\t'
+ | '\r'
+ | '\n'
+ )+
+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t021hoist.py b/antlr-3.4/runtime/Python/tests/t021hoist.py
new file mode 100644
index 0000000..59d7260
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t021hoist.py
@@ -0,0 +1,38 @@
+import os
+import sys
+import antlr3
+import testbase
+import unittest
+
+
+class t021hoist(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('enum')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.enableEnum = True
+ enumIs = parser.stat()
+
+ assert enumIs == 'keyword', repr(enumIs)
+
+
+ def testValid2(self):
+ cStream = antlr3.StringStream('enum')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.enableEnum = False
+ enumIs = parser.stat()
+
+ assert enumIs == 'ID', repr(enumIs)
+
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t022scopes.g b/antlr-3.4/runtime/Python/tests/t022scopes.g
new file mode 100644
index 0000000..1affc83
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t022scopes.g
@@ -0,0 +1,127 @@
+grammar t022scopes;
+
+options {
+ language=Python;
+}
+
+/* global scopes */
+
+scope aScope {
+names
+}
+
+a
+scope aScope;
+ : {$aScope::names = [];} ID*
+ ;
+
+
+/* rule scopes, from the book, final beta, p.147 */
+
+b[v]
+scope {x}
+ : {$b::x = v;} b2
+ ;
+
+b2
+ : b3
+ ;
+
+b3
+ : {$b::x}?=> ID // only visible, if b was called with True
+ | NUM
+ ;
+
+
+/* rule scopes, from the book, final beta, p.148 */
+
+c returns [res]
+scope {
+ symbols
+}
+@init {
+ $c::symbols = set();
+}
+ : '{' c1* c2+ '}'
+ { $res = $c::symbols; }
+ ;
+
+c1
+ : 'int' ID {$c::symbols.add($ID.text)} ';'
+ ;
+
+c2
+ : ID '=' NUM ';'
+ {
+ if $ID.text not in $c::symbols:
+ raise RuntimeError($ID.text)
+ }
+ ;
+
+/* recursive rule scopes, from the book, final beta, p.150 */
+
+d returns [res]
+scope {
+ symbols
+}
+@init {
+ $d::symbols = set();
+}
+ : '{' d1* d2* '}'
+ { $res = $d::symbols; }
+ ;
+
+d1
+ : 'int' ID {$d::symbols.add($ID.text)} ';'
+ ;
+
+d2
+ : ID '=' NUM ';'
+ {
+ for s in reversed(range(len($d))):
+ if $ID.text in $d[s]::symbols:
+ break
+ else:
+ raise RuntimeError($ID.text)
+ }
+ | d
+ ;
+
+/* recursive rule scopes, access bottom-most scope */
+
+e returns [res]
+scope {
+ a
+}
+@after {
+ $res = $e::a;
+}
+ : NUM { $e[0]::a = int($NUM.text); }
+ | '{' e '}'
+ ;
+
+
+/* recursive rule scopes, access with negative index */
+
+f returns [res]
+scope {
+ a
+}
+@after {
+ $res = $f::a;
+}
+ : NUM { $f[-2]::a = int($NUM.text); }
+ | '{' f '}'
+ ;
+
+
+/* tokens */
+
+ID : ('a'..'z')+
+ ;
+
+NUM : ('0'..'9')+
+ ;
+
+WS : (' '|'\n'|'\r')+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t022scopes.py b/antlr-3.4/runtime/Python/tests/t022scopes.py
new file mode 100644
index 0000000..01bc597
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t022scopes.py
@@ -0,0 +1,167 @@
+import antlr3
+import testbase
+import unittest
+import textwrap
+
+
+class t022scopes(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def emitErrorMessage(self, msg):
+ # report errors to /dev/null
+ pass
+
+ def reportError(self, re):
+ # no error recovery yet, just crash!
+ raise re
+
+ return TParser
+
+
+ def testa1(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.a()
+
+
+ def testb1(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ try:
+ parser.b(False)
+ self.fail()
+ except antlr3.RecognitionException:
+ pass
+
+
+ def testb2(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.b(True)
+
+
+ def testc1(self):
+ cStream = antlr3.StringStream(
+ textwrap.dedent('''\
+ {
+ int i;
+ int j;
+ i = 0;
+ }
+ '''))
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ symbols = parser.c()
+
+ self.failUnlessEqual(
+ symbols,
+ set(['i', 'j'])
+ )
+
+
+ def testc2(self):
+ cStream = antlr3.StringStream(
+ textwrap.dedent('''\
+ {
+ int i;
+ int j;
+ i = 0;
+ x = 4;
+ }
+ '''))
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ try:
+ parser.c()
+ self.fail()
+ except RuntimeError, exc:
+ self.failUnlessEqual(exc.args[0], 'x')
+
+
+ def testd1(self):
+ cStream = antlr3.StringStream(
+ textwrap.dedent('''\
+ {
+ int i;
+ int j;
+ i = 0;
+ {
+ int i;
+ int x;
+ x = 5;
+ }
+ }
+ '''))
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ symbols = parser.d()
+
+ self.failUnlessEqual(
+ symbols,
+ set(['i', 'j'])
+ )
+
+
+ def teste1(self):
+ cStream = antlr3.StringStream(
+ textwrap.dedent('''\
+ { { { { 12 } } } }
+ '''))
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ res = parser.e()
+
+ self.failUnlessEqual(res, 12)
+
+
+ def testf1(self):
+ cStream = antlr3.StringStream(
+ textwrap.dedent('''\
+ { { { { 12 } } } }
+ '''))
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ res = parser.f()
+
+ self.failUnlessEqual(res, None)
+
+
+ def testf2(self):
+ cStream = antlr3.StringStream(
+ textwrap.dedent('''\
+ { { 12 } }
+ '''))
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ res = parser.f()
+
+ self.failUnlessEqual(res, None)
+
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t023scopes.g b/antlr-3.4/runtime/Python/tests/t023scopes.g
new file mode 100644
index 0000000..02e69b1
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t023scopes.g
@@ -0,0 +1,18 @@
+grammar t023scopes;
+
+options {
+ language=Python;
+}
+
+prog
+scope {
+name
+}
+ : ID {$prog::name=$ID.text;}
+ ;
+
+ID : ('a'..'z')+
+ ;
+
+WS : (' '|'\n'|'\r')+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t023scopes.py b/antlr-3.4/runtime/Python/tests/t023scopes.py
new file mode 100644
index 0000000..4c33b8a
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t023scopes.py
@@ -0,0 +1,20 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t023scopes(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.prog()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t024finally.g b/antlr-3.4/runtime/Python/tests/t024finally.g
new file mode 100644
index 0000000..1cd2527
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t024finally.g
@@ -0,0 +1,19 @@
+grammar t024finally;
+
+options {
+ language=Python;
+}
+
+prog returns [events]
+@init {events = []}
+@after {events.append('after')}
+ : ID {raise RuntimeError}
+ ;
+ catch [RuntimeError] {events.append('catch')}
+ finally {events.append('finally')}
+
+ID : ('a'..'z')+
+ ;
+
+WS : (' '|'\n'|'\r')+ {$channel=HIDDEN}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t024finally.py b/antlr-3.4/runtime/Python/tests/t024finally.py
new file mode 100644
index 0000000..9a269dd
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t024finally.py
@@ -0,0 +1,23 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t024finally(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.prog()
+
+ assert events == ['catch', 'finally'], events
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.g b/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.g
new file mode 100644
index 0000000..b3500cc
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.g
@@ -0,0 +1,18 @@
+lexer grammar t025lexerRulePropertyRef;
+options {
+ language = Python;
+}
+
+@lexer::init {
+self.properties = []
+}
+
+IDENTIFIER:
+ ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ {
+self.properties.append(
+ ($text, $type, $line, $pos, $index, $channel, $start, $stop)
+)
+ }
+ ;
+WS: (' ' | '\n')+;
diff --git a/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.py b/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.py
new file mode 100644
index 0000000..ae4ac79
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.py
@@ -0,0 +1,54 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t025lexerRulePropertyRef(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+ lexer = self.getLexer(stream)
+
+ while True:
+ token = lexer.nextToken()
+ if token.type == antlr3.EOF:
+ break
+
+ assert len(lexer.properties) == 3, lexer.properties
+
+ text, type, line, pos, index, channel, start, stop = lexer.properties[0]
+ assert text == 'foobar', lexer.properties[0]
+ assert type == self.lexerModule.IDENTIFIER, lexer.properties[0]
+ assert line == 1, lexer.properties[0]
+ assert pos == 0, lexer.properties[0]
+ assert index == -1, lexer.properties[0]
+ assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[0]
+ assert start == 0, lexer.properties[0]
+ assert stop == 5, lexer.properties[0]
+
+ text, type, line, pos, index, channel, start, stop = lexer.properties[1]
+ assert text == '_Ab98', lexer.properties[1]
+ assert type == self.lexerModule.IDENTIFIER, lexer.properties[1]
+ assert line == 1, lexer.properties[1]
+ assert pos == 7, lexer.properties[1]
+ assert index == -1, lexer.properties[1]
+ assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[1]
+ assert start == 7, lexer.properties[1]
+ assert stop == 11, lexer.properties[1]
+
+ text, type, line, pos, index, channel, start, stop = lexer.properties[2]
+ assert text == 'A12sdf', lexer.properties[2]
+ assert type == self.lexerModule.IDENTIFIER, lexer.properties[2]
+ assert line == 2, lexer.properties[2]
+ assert pos == 1, lexer.properties[2]
+ assert index == -1, lexer.properties[2]
+ assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[2]
+ assert start == 15, lexer.properties[2]
+ assert stop == 20, lexer.properties[2]
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t026actions.g b/antlr-3.4/runtime/Python/tests/t026actions.g
new file mode 100644
index 0000000..e8f9fef
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t026actions.g
@@ -0,0 +1,39 @@
+grammar t026actions;
+options {
+ language = Python;
+}
+
+@lexer::init {
+ self.foobar = 'attribute;'
+}
+
+prog
+@init {
+ self.capture('init;')
+}
+@after {
+ self.capture('after;')
+}
+ : IDENTIFIER EOF
+ ;
+ catch [ RecognitionException, exc ] {
+ self.capture('catch;')
+ raise
+ }
+ finally {
+ self.capture('finally;')
+ }
+
+
+IDENTIFIER
+ : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ {
+ # a comment
+ self.capture('action;')
+ self.capture('\%r \%r \%r \%r \%r \%r \%r \%r;' \% ($text, $type, $line, $pos, $index, $channel, $start, $stop))
+ if True:
+ self.capture(self.foobar)
+ }
+ ;
+
+WS: (' ' | '\n')+;
diff --git a/antlr-3.4/runtime/Python/tests/t026actions.py b/antlr-3.4/runtime/Python/tests/t026actions.py
new file mode 100644
index 0000000..dd4e5d6
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t026actions.py
@@ -0,0 +1,66 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t026actions(testbase.ANTLRTest):
+ def parserClass(self, base):
+ class TParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._errors = []
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def emitErrorMessage(self, msg):
+ self._errors.append(msg)
+
+
+ return TParser
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._errors = []
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def emitErrorMessage(self, msg):
+ self._errors.append(msg)
+
+
+ return TLexer
+
+
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.prog()
+
+ self.assertEqual(
+ parser._output,
+ 'init;after;finally;')
+ self.assertEqual(
+ lexer._output,
+ 'action;u\'foobar\' 4 1 0 -1 0 0 5;attribute;action;u\'_Ab98\' 4 1 7 -1 0 7 11;attribute;action;u\'A12sdf\' 4 2 1 -1 0 15 20;attribute;')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t027eof.g b/antlr-3.4/runtime/Python/tests/t027eof.g
new file mode 100644
index 0000000..9cfbb3a
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t027eof.g
@@ -0,0 +1,8 @@
+lexer grammar t027eof;
+
+options {
+ language=Python;
+}
+
+END: EOF;
+SPACE: ' ';
diff --git a/antlr-3.4/runtime/Python/tests/t027eof.py b/antlr-3.4/runtime/Python/tests/t027eof.py
new file mode 100644
index 0000000..b6ae18d
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t027eof.py
@@ -0,0 +1,25 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t027eof(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ @testbase.broken("That's not how EOF is supposed to be used", Exception)
+ def testValid1(self):
+ cStream = antlr3.StringStream(' ')
+ lexer = self.getLexer(cStream)
+
+ tok = lexer.nextToken()
+ assert tok.type == self.lexerModule.SPACE, tok
+
+ tok = lexer.nextToken()
+ assert tok.type == self.lexerModule.END, tok
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled b/antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled
new file mode 100644
index 0000000..d3ba76c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled
@@ -0,0 +1,5 @@
+lexer grammar t028labelExpr;
+ETAGO: ('</a')=> '</';
+MDO: {True}? ('<!a')=> '<!';
+STAGO: {True}? ('<a')=> '<';
+CDATA: '<';
diff --git a/antlr-3.4/runtime/Python/tests/t029synpredgate.g b/antlr-3.4/runtime/Python/tests/t029synpredgate.g
new file mode 100644
index 0000000..7900262
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t029synpredgate.g
@@ -0,0 +1,16 @@
+lexer grammar t029synpredgate;
+options {
+ language = Python;
+}
+
+FOO
+ : ('ab')=> A
+ | ('ac')=> B
+ ;
+
+fragment
+A: 'a';
+
+fragment
+B: 'a';
+
diff --git a/antlr-3.4/runtime/Python/tests/t029synpredgate.py b/antlr-3.4/runtime/Python/tests/t029synpredgate.py
new file mode 100644
index 0000000..b658688
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t029synpredgate.py
@@ -0,0 +1,21 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t029synpredgate(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ stream = antlr3.StringStream('ac')
+ lexer = self.getLexer(stream)
+ token = lexer.nextToken()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+
diff --git a/antlr-3.4/runtime/Python/tests/t030specialStates.g b/antlr-3.4/runtime/Python/tests/t030specialStates.g
new file mode 100644
index 0000000..7b2e423
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t030specialStates.g
@@ -0,0 +1,26 @@
+grammar t030specialStates;
+options {
+ language = Python;
+}
+
+@init {
+self.cond = True
+}
+
+@members {
+def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise re
+}
+
+r
+ : ( {self.cond}? NAME
+ | {not self.cond}? NAME WS+ NAME
+ )
+ ( WS+ NAME )?
+ EOF
+ ;
+
+NAME: ('a'..'z') ('a'..'z' | '0'..'9')+;
+NUMBER: ('0'..'9')+;
+WS: ' '+;
diff --git a/antlr-3.4/runtime/Python/tests/t030specialStates.py b/antlr-3.4/runtime/Python/tests/t030specialStates.py
new file mode 100644
index 0000000..86c4f7c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t030specialStates.py
@@ -0,0 +1,47 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t030specialStates(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('foo')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.r()
+
+
+ def testValid2(self):
+ cStream = antlr3.StringStream('foo name1')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.r()
+
+
+ def testValid3(self):
+ cStream = antlr3.StringStream('bar name1')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.cond = False
+ events = parser.r()
+
+
+ def testValid4(self):
+ cStream = antlr3.StringStream('bar name1 name2')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.cond = False
+ events = parser.r()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t031emptyAlt.g b/antlr-3.4/runtime/Python/tests/t031emptyAlt.g
new file mode 100644
index 0000000..0afa596
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t031emptyAlt.g
@@ -0,0 +1,16 @@
+grammar t031emptyAlt;
+options {
+ language = Python;
+}
+
+r
+ : NAME
+ ( {self.cond}?=> WS+ NAME
+ |
+ )
+ EOF
+ ;
+
+NAME: ('a'..'z') ('a'..'z' | '0'..'9')+;
+NUMBER: ('0'..'9')+;
+WS: ' '+;
diff --git a/antlr-3.4/runtime/Python/tests/t031emptyAlt.py b/antlr-3.4/runtime/Python/tests/t031emptyAlt.py
new file mode 100644
index 0000000..fcae8e1
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t031emptyAlt.py
@@ -0,0 +1,21 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t031emptyAlt(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('foo')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.r()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t032subrulePredict.g b/antlr-3.4/runtime/Python/tests/t032subrulePredict.g
new file mode 100644
index 0000000..3cc2327
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t032subrulePredict.g
@@ -0,0 +1,8 @@
+grammar t032subrulePredict;
+options {
+ language = Python;
+}
+
+a: 'BEGIN' b WS+ 'END';
+b: ( WS+ 'A' )+;
+WS: ' ';
diff --git a/antlr-3.4/runtime/Python/tests/t032subrulePredict.py b/antlr-3.4/runtime/Python/tests/t032subrulePredict.py
new file mode 100644
index 0000000..7b62add
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t032subrulePredict.py
@@ -0,0 +1,44 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t032subrulePredict(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(
+ 'BEGIN A END'
+ )
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.a()
+
+
+ @testbase.broken("DFA tries to look beyond end of rule b", Exception)
+ def testValid2(self):
+ cStream = antlr3.StringStream(
+ ' A'
+ )
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.b()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t033backtracking.g b/antlr-3.4/runtime/Python/tests/t033backtracking.g
new file mode 100644
index 0000000..85a4b30
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t033backtracking.g
@@ -0,0 +1,534 @@
+grammar t033backtracking;
+options {
+ language=Python;
+ backtrack=true;
+ memoize=true;
+ k=2;
+}
+
+scope Symbols {
+ types;
+}
+
+@header {
+# compatibility stuff
+try:
+ set = set
+ frozenset = frozenset
+except NameError:
+ from sets import Set as set, ImmutableSet as frozenset
+
+
+try:
+ reversed = reversed
+except NameError:
+ def reversed(l):
+ l = l[:]
+ l.reverse()
+ return l
+
+}
+
+@members {
+ def isTypeName(self, name):
+ for scope in reversed(self.Symbols_stack):
+ if name in scope.types:
+ return True
+
+ return False
+
+}
+
+translation_unit
+scope Symbols; // entire file is a scope
+@init {
+ $Symbols::types = set()
+}
+ : external_declaration+
+ ;
+
+/** Either a function definition or any other kind of C decl/def.
+ * The LL(*) analysis algorithm fails to deal with this due to
+ * recursion in the declarator rules. I'm putting in a
+ * manual predicate here so that we don't backtrack over
+ * the entire function. Further, you get a better error
+ * as errors within the function itself don't make it fail
+ * to predict that it's a function. Weird errors previously.
+ * Remember: the goal is to avoid backtrack like the plague
+ * because it makes debugging, actions, and errors harder.
+ *
+ * Note that k=1 results in a much smaller predictor for the
+ * fixed lookahead; k=2 made a few extra thousand lines. ;)
+ * I'll have to optimize that in the future.
+ */
+external_declaration
+options {k=1;}
+ : ( declaration_specifiers? declarator declaration* '{' )=> function_definition
+ | declaration
+ ;
+
+function_definition
+scope Symbols; // put parameters and locals into same scope for now
+@init {
+ $Symbols::types = set()
+}
+ : declaration_specifiers? declarator
+// ( declaration+ compound_statement // K&R style
+// | compound_statement // ANSI style
+// )
+ ;
+
+declaration
+scope {
+ isTypedef;
+}
+@init {
+ $declaration::isTypedef = False
+}
+ : 'typedef' declaration_specifiers? {$declaration::isTypedef = True}
+ init_declarator_list ';' // special case, looking for typedef
+ | declaration_specifiers init_declarator_list? ';'
+ ;
+
+declaration_specifiers
+ : ( storage_class_specifier
+ | type_specifier
+ | type_qualifier
+ )+
+ ;
+
+init_declarator_list
+ : init_declarator (',' init_declarator)*
+ ;
+
+init_declarator
+ : declarator //('=' initializer)?
+ ;
+
+storage_class_specifier
+ : 'extern'
+ | 'static'
+ | 'auto'
+ | 'register'
+ ;
+
+type_specifier
+ : 'void'
+ | 'char'
+ | 'short'
+ | 'int'
+ | 'long'
+ | 'float'
+ | 'double'
+ | 'signed'
+ | 'unsigned'
+// | struct_or_union_specifier
+// | enum_specifier
+ | type_id
+ ;
+
+type_id
+ : {self.isTypeName(self.input.LT(1).getText())}? IDENTIFIER
+// {System.out.println($IDENTIFIER.text+" is a type");}
+ ;
+
+// struct_or_union_specifier
+// options {k=3;}
+// scope Symbols; // structs are scopes
+// @init {
+// $Symbols::types = set()
+// }
+// : struct_or_union IDENTIFIER? '{' struct_declaration_list '}'
+// | struct_or_union IDENTIFIER
+// ;
+
+// struct_or_union
+// : 'struct'
+// | 'union'
+// ;
+
+// struct_declaration_list
+// : struct_declaration+
+// ;
+
+// struct_declaration
+// : specifier_qualifier_list struct_declarator_list ';'
+// ;
+
+// specifier_qualifier_list
+// : ( type_qualifier | type_specifier )+
+// ;
+
+// struct_declarator_list
+// : struct_declarator (',' struct_declarator)*
+// ;
+
+// struct_declarator
+// : declarator (':' constant_expression)?
+// | ':' constant_expression
+// ;
+
+// enum_specifier
+// options {k=3;}
+// : 'enum' '{' enumerator_list '}'
+// | 'enum' IDENTIFIER '{' enumerator_list '}'
+// | 'enum' IDENTIFIER
+// ;
+
+// enumerator_list
+// : enumerator (',' enumerator)*
+// ;
+
+// enumerator
+// : IDENTIFIER ('=' constant_expression)?
+// ;
+
+type_qualifier
+ : 'const'
+ | 'volatile'
+ ;
+
+declarator
+ : pointer? direct_declarator
+ | pointer
+ ;
+
+direct_declarator
+ : ( IDENTIFIER
+ {
+ if len($declaration)>0 and $declaration::isTypedef:
+ $Symbols::types.add($IDENTIFIER.text)
+ print "define type "+$IDENTIFIER.text
+ }
+ | '(' declarator ')'
+ )
+ declarator_suffix*
+ ;
+
+declarator_suffix
+ : /*'[' constant_expression ']'
+ |*/ '[' ']'
+// | '(' parameter_type_list ')'
+// | '(' identifier_list ')'
+ | '(' ')'
+ ;
+
+pointer
+ : '*' type_qualifier+ pointer?
+ | '*' pointer
+ | '*'
+ ;
+
+// parameter_type_list
+// : parameter_list (',' '...')?
+// ;
+
+// parameter_list
+// : parameter_declaration (',' parameter_declaration)*
+// ;
+
+// parameter_declaration
+// : declaration_specifiers (declarator|abstract_declarator)*
+// ;
+
+// identifier_list
+// : IDENTIFIER (',' IDENTIFIER)*
+// ;
+
+// type_name
+// : specifier_qualifier_list abstract_declarator?
+// ;
+
+// abstract_declarator
+// : pointer direct_abstract_declarator?
+// | direct_abstract_declarator
+// ;
+
+// direct_abstract_declarator
+// : ( '(' abstract_declarator ')' | abstract_declarator_suffix ) abstract_declarator_suffix*
+// ;
+
+// abstract_declarator_suffix
+// : '[' ']'
+// | '[' constant_expression ']'
+// | '(' ')'
+// | '(' parameter_type_list ')'
+// ;
+
+// initializer
+// : assignment_expression
+// | '{' initializer_list ','? '}'
+// ;
+
+// initializer_list
+// : initializer (',' initializer)*
+// ;
+
+// // E x p r e s s i o n s
+
+// argument_expression_list
+// : assignment_expression (',' assignment_expression)*
+// ;
+
+// additive_expression
+// : (multiplicative_expression) ('+' multiplicative_expression | '-' multiplicative_expression)*
+// ;
+
+// multiplicative_expression
+// : (cast_expression) ('*' cast_expression | '/' cast_expression | '%' cast_expression)*
+// ;
+
+// cast_expression
+// : '(' type_name ')' cast_expression
+// | unary_expression
+// ;
+
+// unary_expression
+// : postfix_expression
+// | '++' unary_expression
+// | '--' unary_expression
+// | unary_operator cast_expression
+// | 'sizeof' unary_expression
+// | 'sizeof' '(' type_name ')'
+// ;
+
+// postfix_expression
+// : primary_expression
+// ( '[' expression ']'
+// | '(' ')'
+// | '(' argument_expression_list ')'
+// | '.' IDENTIFIER
+// | '*' IDENTIFIER
+// | '->' IDENTIFIER
+// | '++'
+// | '--'
+// )*
+// ;
+
+// unary_operator
+// : '&'
+// | '*'
+// | '+'
+// | '-'
+// | '~'
+// | '!'
+// ;
+
+// primary_expression
+// : IDENTIFIER
+// | constant
+// | '(' expression ')'
+// ;
+
+// constant
+// : HEX_LITERAL
+// | OCTAL_LITERAL
+// | DECIMAL_LITERAL
+// | CHARACTER_LITERAL
+// | STRING_LITERAL
+// | FLOATING_POINT_LITERAL
+// ;
+
+// /////
+
+// expression
+// : assignment_expression (',' assignment_expression)*
+// ;
+
+// constant_expression
+// : conditional_expression
+// ;
+
+// assignment_expression
+// : lvalue assignment_operator assignment_expression
+// | conditional_expression
+// ;
+
+// lvalue
+// : unary_expression
+// ;
+
+// assignment_operator
+// : '='
+// | '*='
+// | '/='
+// | '%='
+// | '+='
+// | '-='
+// | '<<='
+// | '>>='
+// | '&='
+// | '^='
+// | '|='
+// ;
+
+// conditional_expression
+// : logical_or_expression ('?' expression ':' conditional_expression)?
+// ;
+
+// logical_or_expression
+// : logical_and_expression ('||' logical_and_expression)*
+// ;
+
+// logical_and_expression
+// : inclusive_or_expression ('&&' inclusive_or_expression)*
+// ;
+
+// inclusive_or_expression
+// : exclusive_or_expression ('|' exclusive_or_expression)*
+// ;
+
+// exclusive_or_expression
+// : and_expression ('^' and_expression)*
+// ;
+
+// and_expression
+// : equality_expression ('&' equality_expression)*
+// ;
+// equality_expression
+// : relational_expression (('=='|'!=') relational_expression)*
+// ;
+
+// relational_expression
+// : shift_expression (('<'|'>'|'<='|'>=') shift_expression)*
+// ;
+
+// shift_expression
+// : additive_expression (('<<'|'>>') additive_expression)*
+// ;
+
+// // S t a t e m e n t s
+
+// statement
+// : labeled_statement
+// | compound_statement
+// | expression_statement
+// | selection_statement
+// | iteration_statement
+// | jump_statement
+// ;
+
+// labeled_statement
+// : IDENTIFIER ':' statement
+// | 'case' constant_expression ':' statement
+// | 'default' ':' statement
+// ;
+
+// compound_statement
+// scope Symbols; // blocks have a scope of symbols
+// @init {
+// $Symbols::types = {}
+// }
+// : '{' declaration* statement_list? '}'
+// ;
+
+// statement_list
+// : statement+
+// ;
+
+// expression_statement
+// : ';'
+// | expression ';'
+// ;
+
+// selection_statement
+// : 'if' '(' expression ')' statement (options {k=1; backtrack=false;}:'else' statement)?
+// | 'switch' '(' expression ')' statement
+// ;
+
+// iteration_statement
+// : 'while' '(' expression ')' statement
+// | 'do' statement 'while' '(' expression ')' ';'
+// | 'for' '(' expression_statement expression_statement expression? ')' statement
+// ;
+
+// jump_statement
+// : 'goto' IDENTIFIER ';'
+// | 'continue' ';'
+// | 'break' ';'
+// | 'return' ';'
+// | 'return' expression ';'
+// ;
+
+IDENTIFIER
+ : LETTER (LETTER|'0'..'9')*
+ ;
+
+fragment
+LETTER
+ : '$'
+ | 'A'..'Z'
+ | 'a'..'z'
+ | '_'
+ ;
+
+CHARACTER_LITERAL
+ : '\'' ( EscapeSequence | ~('\''|'\\') ) '\''
+ ;
+
+STRING_LITERAL
+ : '"' ( EscapeSequence | ~('\\'|'"') )* '"'
+ ;
+
+HEX_LITERAL : '0' ('x'|'X') HexDigit+ IntegerTypeSuffix? ;
+
+DECIMAL_LITERAL : ('0' | '1'..'9' '0'..'9'*) IntegerTypeSuffix? ;
+
+OCTAL_LITERAL : '0' ('0'..'7')+ IntegerTypeSuffix? ;
+
+fragment
+HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;
+
+fragment
+IntegerTypeSuffix
+ : ('u'|'U')? ('l'|'L')
+ | ('u'|'U') ('l'|'L')?
+ ;
+
+FLOATING_POINT_LITERAL
+ : ('0'..'9')+ '.' ('0'..'9')* Exponent? FloatTypeSuffix?
+ | '.' ('0'..'9')+ Exponent? FloatTypeSuffix?
+ | ('0'..'9')+ Exponent FloatTypeSuffix?
+ | ('0'..'9')+ Exponent? FloatTypeSuffix
+ ;
+
+fragment
+Exponent : ('e'|'E') ('+'|'-')? ('0'..'9')+ ;
+
+fragment
+FloatTypeSuffix : ('f'|'F'|'d'|'D') ;
+
+fragment
+EscapeSequence
+ : '\\' ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\')
+ | OctalEscape
+ ;
+
+fragment
+OctalEscape
+ : '\\' ('0'..'3') ('0'..'7') ('0'..'7')
+ | '\\' ('0'..'7') ('0'..'7')
+ | '\\' ('0'..'7')
+ ;
+
+fragment
+UnicodeEscape
+ : '\\' 'u' HexDigit HexDigit HexDigit HexDigit
+ ;
+
+WS : (' '|'\r'|'\t'|'\u000C'|'\n') {$channel=HIDDEN;}
+ ;
+
+COMMENT
+ : '/*' ( options {greedy=false;} : . )* '*/' {$channel=HIDDEN;}
+ ;
+
+LINE_COMMENT
+ : '//' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;}
+ ;
+
+// ignore #line info for now
+LINE_COMMAND
+ : '#' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;}
+ ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t033backtracking.py b/antlr-3.4/runtime/Python/tests/t033backtracking.py
new file mode 100644
index 0000000..8b5c66a
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t033backtracking.py
@@ -0,0 +1,31 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t033backtracking(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ @testbase.broken("Some bug in the tool", SyntaxError)
+ def testValid1(self):
+ cStream = antlr3.StringStream('int a;')
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.translation_unit()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.g b/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.g
new file mode 100644
index 0000000..7311235
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.g
@@ -0,0 +1,30 @@
+grammar t034tokenLabelPropertyRef;
+options {
+ language = Python;
+}
+
+a: t=A
+ {
+ print $t.text
+ print $t.type
+ print $t.line
+ print $t.pos
+ print $t.channel
+ print $t.index
+ #print $t.tree
+ }
+ ;
+
+A: 'a'..'z';
+
+WS :
+ ( ' '
+ | '\t'
+ | ( '\n'
+ | '\r\n'
+ | '\r'
+ )
+ )+
+ { $channel = HIDDEN }
+ ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py b/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py
new file mode 100644
index 0000000..b94de13
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py
@@ -0,0 +1,40 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t034tokenLabelPropertyRef(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(' a')
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.a()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.g b/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.g
new file mode 100644
index 0000000..710a91c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.g
@@ -0,0 +1,16 @@
+grammar t035ruleLabelPropertyRef;
+options {
+ language = Python;
+}
+
+a returns [bla]: t=b
+ {
+ $bla = $t.start, $t.stop, $t.text
+ }
+ ;
+
+b: A+;
+
+A: 'a'..'z';
+
+WS: ' '+ { $channel = HIDDEN };
diff --git a/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.py b/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.py
new file mode 100644
index 0000000..c42dbaa
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.py
@@ -0,0 +1,47 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t035ruleLabelPropertyRef(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(' a a a a ')
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ start, stop, text = parser.a()
+
+ # first token of rule b is the 2nd token (counting hidden tokens)
+ assert start.index == 1, start
+
+ # first token of rule b is the 7th token (counting hidden tokens)
+ assert stop.index == 7, stop
+
+ assert text == "a a a a", text
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.g b/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.g
new file mode 100644
index 0000000..04ce14c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.g
@@ -0,0 +1,25 @@
+grammar t036multipleReturnValues;
+options {
+ language = Python;
+}
+
+a returns [foo, bar]: A
+ {
+ $foo = "foo";
+ $bar = "bar";
+ }
+ ;
+
+A: 'a'..'z';
+
+WS :
+ ( ' '
+ | '\t'
+ | ( '\n'
+ | '\r\n'
+ | '\r'
+ )
+ )+
+ { $channel = HIDDEN }
+ ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.py b/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.py
new file mode 100644
index 0000000..97e04e3
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.py
@@ -0,0 +1,43 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t036multipleReturnValues(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(' a')
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ ret = parser.a()
+ assert ret.foo == 'foo', ret.foo
+ assert ret.bar == 'bar', ret.bar
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
diff --git a/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.g b/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.g
new file mode 100644
index 0000000..d2ab177
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.g
@@ -0,0 +1,15 @@
+grammar t037rulePropertyRef;
+options {
+ language = Python;
+}
+
+a returns [bla]
+@after {
+ $bla = $start, $stop, $text
+}
+ : A+
+ ;
+
+A: 'a'..'z';
+
+WS: ' '+ { $channel = HIDDEN };
diff --git a/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.py b/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.py
new file mode 100644
index 0000000..998a2ba
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.py
@@ -0,0 +1,47 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t037rulePropertyRef(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(' a a a a ')
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ start, stop, text = parser.a().bla
+
+ # first token of rule b is the 2nd token (counting hidden tokens)
+ assert start.index == 1, start
+
+ # first token of rule b is the 7th token (counting hidden tokens)
+ assert stop.index == 7, stop
+
+ assert text == "a a a a", text
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.g b/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.g
new file mode 100644
index 0000000..fcc1a61
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.g
@@ -0,0 +1,28 @@
+lexer grammar t038lexerRuleLabel;
+options {
+ language = Python;
+}
+
+A: 'a'..'z' WS '0'..'9'
+ {
+ print $WS
+ print $WS.type
+ print $WS.line
+ print $WS.pos
+ print $WS.channel
+ print $WS.index
+ print $WS.text
+ }
+ ;
+
+fragment WS :
+ ( ' '
+ | '\t'
+ | ( '\n'
+ | '\r\n'
+ | '\r'
+ )
+ )+
+ { $channel = HIDDEN }
+ ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.py b/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.py
new file mode 100644
index 0000000..2af65f9
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.py
@@ -0,0 +1,33 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t038lexerRuleLabel(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('a 2')
+
+ lexer = self.getLexer(cStream)
+
+ while True:
+ t = lexer.nextToken()
+ if t.type == antlr3.EOF:
+ break
+ print t
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t039labels.g b/antlr-3.4/runtime/Python/tests/t039labels.g
new file mode 100644
index 0000000..d9dc248
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t039labels.g
@@ -0,0 +1,18 @@
+grammar t039labels;
+options {
+ language = Python;
+}
+
+a returns [l]
+ : ids+=A ( ',' ids+=(A|B) )* C D w=. ids+=. F EOF
+ { l = ($ids, $w) }
+ ;
+
+A: 'a'..'z';
+B: '0'..'9';
+C: a='A' { print $a };
+D: a='FOOBAR' { print $a };
+E: 'GNU' a=. { print $a };
+F: 'BLARZ' a=EOF { print $a };
+
+WS: ' '+ { $channel = HIDDEN };
diff --git a/antlr-3.4/runtime/Python/tests/t039labels.py b/antlr-3.4/runtime/Python/tests/t039labels.py
new file mode 100644
index 0000000..8159d6b
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t039labels.py
@@ -0,0 +1,53 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t039labels(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(
+ 'a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ'
+ )
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ ids, w = parser.a()
+
+ assert len(ids) == 6, ids
+ assert ids[0].text == 'a', ids[0]
+ assert ids[1].text == 'b', ids[1]
+ assert ids[2].text == 'c', ids[2]
+ assert ids[3].text == '1', ids[3]
+ assert ids[4].text == '2', ids[4]
+ assert ids[5].text == 'A', ids[5]
+
+ assert w.text == 'GNU1', w
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
diff --git a/antlr-3.4/runtime/Python/tests/t040bug80.g b/antlr-3.4/runtime/Python/tests/t040bug80.g
new file mode 100644
index 0000000..bdf610b
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t040bug80.g
@@ -0,0 +1,13 @@
+lexer grammar t040bug80;
+options {
+ language = Python;
+}
+
+ID_LIKE
+ : 'defined'
+ | {False}? Identifier
+ | Identifier
+ ;
+
+fragment
+Identifier: 'a'..'z'+ ; // with just 'a', output compiles
diff --git a/antlr-3.4/runtime/Python/tests/t040bug80.py b/antlr-3.4/runtime/Python/tests/t040bug80.py
new file mode 100644
index 0000000..c6637e5
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t040bug80.py
@@ -0,0 +1,33 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t040bug80(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('defined')
+ lexer = self.getLexer(cStream)
+ while True:
+ t = lexer.nextToken()
+ if t.type == antlr3.EOF:
+ break
+ print t
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
diff --git a/antlr-3.4/runtime/Python/tests/t041parameters.g b/antlr-3.4/runtime/Python/tests/t041parameters.g
new file mode 100644
index 0000000..b9a8892
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t041parameters.g
@@ -0,0 +1,16 @@
+grammar t041parameters;
+options {
+ language = Python;
+}
+
+a[arg1, arg2] returns [l]
+ : A+ EOF
+ {
+ l = ($arg1, $arg2)
+ $arg1 = "gnarz"
+ }
+ ;
+
+A: 'a'..'z';
+
+WS: ' '+ { $channel = HIDDEN };
diff --git a/antlr-3.4/runtime/Python/tests/t041parameters.py b/antlr-3.4/runtime/Python/tests/t041parameters.py
new file mode 100644
index 0000000..1fe4a4f
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t041parameters.py
@@ -0,0 +1,45 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t041parameters(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream('a a a')
+
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ r = parser.a('foo', 'bar')
+
+ assert r == ('foo', 'bar'), r
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+
+
diff --git a/antlr-3.4/runtime/Python/tests/t042ast.g b/antlr-3.4/runtime/Python/tests/t042ast.g
new file mode 100644
index 0000000..f676835
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t042ast.g
@@ -0,0 +1,353 @@
+grammar t042ast;
+options {
+ language = Python;
+ output = AST;
+}
+
+tokens {
+ VARDEF;
+ FLOAT;
+ EXPR;
+ BLOCK;
+ VARIABLE;
+ FIELD;
+ CALL;
+ INDEX;
+ FIELDACCESS;
+}
+
+@init {
+self.flag = False
+}
+
+r1
+ : INT ('+'^ INT)*
+ ;
+
+r2
+ : 'assert'^ x=expression (':'! y=expression)? ';'!
+ ;
+
+r3
+ : 'if'^ expression s1=statement ('else'! s2=statement)?
+ ;
+
+r4
+ : 'while'^ expression statement
+ ;
+
+r5
+ : 'return'^ expression? ';'!
+ ;
+
+r6
+ : (INT|ID)+
+ ;
+
+r7
+ : INT ->
+ ;
+
+r8
+ : 'var' ID ':' type -> ^('var' type ID)
+ ;
+
+r9
+ : type ID ';' -> ^(VARDEF type ID)
+ ;
+
+r10
+ : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text + ".0"))}
+ ;
+
+r11
+ : expression -> ^(EXPR expression)
+ | -> EXPR
+ ;
+
+r12
+ : ID (',' ID)* -> ID+
+ ;
+
+r13
+ : type ID (',' ID)* ';' -> ^(type ID+)
+ ;
+
+r14
+ : expression? statement* type+
+ -> ^(EXPR expression? statement* type+)
+ ;
+
+r15
+ : INT -> INT INT
+ ;
+
+r16
+ : 'int' ID (',' ID)* -> ^('int' ID)+
+ ;
+
+r17
+ : 'for' '(' start=statement ';' expression ';' next=statement ')' statement
+ -> ^('for' $start expression $next statement)
+ ;
+
+r18
+ : t='for' -> ^(BLOCK)
+ ;
+
+r19
+ : t='for' -> ^(BLOCK[$t])
+ ;
+
+r20
+ : t='for' -> ^(BLOCK[$t,"FOR"])
+ ;
+
+r21
+ : t='for' -> BLOCK
+ ;
+
+r22
+ : t='for' -> BLOCK[$t]
+ ;
+
+r23
+ : t='for' -> BLOCK[$t,"FOR"]
+ ;
+
+r24
+ : r=statement expression -> ^($r expression)
+ ;
+
+r25
+ : r+=statement (',' r+=statement)+ expression -> ^($r expression)
+ ;
+
+r26
+ : r+=statement (',' r+=statement)+ -> ^(BLOCK $r+)
+ ;
+
+r27
+ : r=statement expression -> ^($r ^($r expression))
+ ;
+
+r28
+ : ('foo28a'|'foo28b') ->
+ ;
+
+r29
+ : (r+=statement)* -> ^(BLOCK $r+)
+ ;
+
+r30
+ : statement* -> ^(BLOCK statement?)
+ ;
+
+r31
+ : modifier type ID ('=' expression)? ';'
+ -> {self.flag == 0}? ^(VARDEF ID modifier* type expression?)
+ -> {self.flag == 1}? ^(VARIABLE ID modifier* type expression?)
+ -> ^(FIELD ID modifier* type expression?)
+ ;
+
+r32[which]
+ : ID INT -> {which==1}? ID
+ -> {which==2}? INT
+ -> // yield nothing as else-clause
+ ;
+
+r33
+ : modifiers! statement
+ ;
+
+r34
+ : modifiers! r34a[$modifiers.tree]
+ //| modifiers! r33b[$modifiers.tree]
+ ;
+
+r34a[mod]
+ : 'class' ID ('extends' sup=type)?
+ ( 'implements' i+=type (',' i+=type)*)?
+ '{' statement* '}'
+ -> ^('class' ID {$mod} ^('extends' $sup)? ^('implements' $i+)? statement* )
+ ;
+
+r35
+ : '{' 'extends' (sup=type)? '}'
+ -> ^('extends' $sup)?
+ ;
+
+r36
+ : 'if' '(' expression ')' s1=statement
+ ( 'else' s2=statement -> ^('if' ^(EXPR expression) $s1 $s2)
+ | -> ^('if' ^(EXPR expression) $s1)
+ )
+ ;
+
+r37
+ : (INT -> INT) ('+' i=INT -> ^('+' $r37 $i) )*
+ ;
+
+r38
+ : INT ('+'^ INT)*
+ ;
+
+r39
+ : (primary->primary) // set return tree to just primary
+ ( '(' arg=expression ')'
+ -> ^(CALL $r39 $arg)
+ | '[' ie=expression ']'
+ -> ^(INDEX $r39 $ie)
+ | '.' p=primary
+ -> ^(FIELDACCESS $r39 $p)
+ )*
+ ;
+
+r40
+ : (INT -> INT) ( ('+' i+=INT)* -> ^('+' $r40 $i*) ) ';'
+ ;
+
+r41
+ : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';'
+ ;
+
+r42
+ : ids+=ID (','! ids+=ID)*
+ ;
+
+r43 returns [res]
+ : ids+=ID! (','! ids+=ID!)* {$res = [id.text for id in $ids]}
+ ;
+
+r44
+ : ids+=ID^ (','! ids+=ID^)*
+ ;
+
+r45
+ : primary^
+ ;
+
+r46 returns [res]
+ : ids+=primary! (','! ids+=primary!)* {$res = [id.text for id in $ids]}
+ ;
+
+r47
+ : ids+=primary (','! ids+=primary)*
+ ;
+
+r48
+ : ids+=. (','! ids+=.)*
+ ;
+
+r49
+ : .^ ID
+ ;
+
+r50
+ : ID
+ -> ^({CommonTree(CommonToken(type=FLOAT, text="1.0"))} ID)
+ ;
+
+/** templates tested:
+ tokenLabelPropertyRef_tree
+*/
+r51 returns [res]
+ : ID t=ID ID
+ { $res = $t.tree }
+ ;
+
+/** templates tested:
+ rulePropertyRef_tree
+*/
+r52 returns [res]
+@after {
+ $res = $tree
+}
+ : ID
+ ;
+
+/** templates tested:
+ ruleLabelPropertyRef_tree
+*/
+r53 returns [res]
+ : t=primary
+ { $res = $t.tree }
+ ;
+
+/** templates tested:
+ ruleSetPropertyRef_tree
+*/
+r54 returns [res]
+@after {
+ $tree = $t.tree;
+}
+ : ID t=expression ID
+ ;
+
+/** backtracking */
+r55
+options { backtrack=true; k=1; }
+ : (modifier+ INT)=> modifier+ expression
+ | modifier+ statement
+ ;
+
+
+/** templates tested:
+ rewriteTokenRef with len(args)>0
+*/
+r56
+ : t=ID* -> ID[$t,'foo']
+ ;
+
+/** templates tested:
+ rewriteTokenRefRoot with len(args)>0
+*/
+r57
+ : t=ID* -> ^(ID[$t,'foo'])
+ ;
+
+/** templates tested:
+ ???
+*/
+r58
+ : ({CommonTree(CommonToken(type=FLOAT, text="2.0"))})^
+ ;
+
+/** templates tested:
+ rewriteTokenListLabelRefRoot
+*/
+r59
+ : (t+=ID)+ statement -> ^($t statement)+
+ ;
+
+primary
+ : ID
+ ;
+
+expression
+ : r1
+ ;
+
+statement
+ : 'fooze'
+ | 'fooze2'
+ ;
+
+modifiers
+ : modifier+
+ ;
+
+modifier
+ : 'public'
+ | 'private'
+ ;
+
+type
+ : 'int'
+ | 'bool'
+ ;
+
+ID : 'a'..'z' + ;
+INT : '0'..'9' +;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+
diff --git a/antlr-3.4/runtime/Python/tests/t042ast.py b/antlr-3.4/runtime/Python/tests/t042ast.py
new file mode 100644
index 0000000..e29c077
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t042ast.py
@@ -0,0 +1,688 @@
+import unittest
+import textwrap
+import antlr3
+import testbase
+
+class t042ast(testbase.ANTLRTest):
+## def lexerClass(self, base):
+## class TLexer(base):
+## def reportError(self, re):
+## # no error recovery yet, just crash!
+## raise re
+
+## return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def parse(self, text, method, rArgs=[], **kwargs):
+ self.compileGrammar() #options='-trace')
+
+ cStream = antlr3.StringStream(text)
+ self.lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(self.lexer)
+ self.parser = self.getParser(tStream)
+
+ for attr, val in kwargs.items():
+ setattr(self.parser, attr, val)
+
+ return getattr(self.parser, method)(*rArgs)
+
+
+ def testR1(self):
+ r = self.parse("1 + 2", 'r1')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(+ 1 2)'
+ )
+
+
+ def testR2a(self):
+ r = self.parse("assert 2+3;", 'r2')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(assert (+ 2 3))'
+ )
+
+
+ def testR2b(self):
+ r = self.parse("assert 2+3 : 5;", 'r2')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(assert (+ 2 3) 5)'
+ )
+
+
+ def testR3a(self):
+ r = self.parse("if 1 fooze", 'r3')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(if 1 fooze)'
+ )
+
+
+ def testR3b(self):
+ r = self.parse("if 1 fooze else fooze", 'r3')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(if 1 fooze fooze)'
+ )
+
+
+ def testR4a(self):
+ r = self.parse("while 2 fooze", 'r4')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(while 2 fooze)'
+ )
+
+
+ def testR5a(self):
+ r = self.parse("return;", 'r5')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'return'
+ )
+
+
+ def testR5b(self):
+ r = self.parse("return 2+3;", 'r5')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(return (+ 2 3))'
+ )
+
+
+ def testR6a(self):
+ r = self.parse("3", 'r6')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '3'
+ )
+
+
+ def testR6b(self):
+ r = self.parse("3 a", 'r6')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '3 a'
+ )
+
+
+ def testR7(self):
+ r = self.parse("3", 'r7')
+ self.failUnless(
+ r.tree is None
+ )
+
+
+ def testR8(self):
+ r = self.parse("var foo:bool", 'r8')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(var bool foo)'
+ )
+
+
+ def testR9(self):
+ r = self.parse("int foo;", 'r9')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(VARDEF int foo)'
+ )
+
+
+ def testR10(self):
+ r = self.parse("10", 'r10')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '10.0'
+ )
+
+
+ def testR11a(self):
+ r = self.parse("1+2", 'r11')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(EXPR (+ 1 2))'
+ )
+
+
+ def testR11b(self):
+ r = self.parse("", 'r11')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'EXPR'
+ )
+
+
+ def testR12a(self):
+ r = self.parse("foo", 'r12')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'foo'
+ )
+
+
+ def testR12b(self):
+ r = self.parse("foo, bar, gnurz", 'r12')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'foo bar gnurz'
+ )
+
+
+ def testR13a(self):
+ r = self.parse("int foo;", 'r13')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(int foo)'
+ )
+
+
+ def testR13b(self):
+ r = self.parse("bool foo, bar, gnurz;", 'r13')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(bool foo bar gnurz)'
+ )
+
+
+ def testR14a(self):
+ r = self.parse("1+2 int", 'r14')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(EXPR (+ 1 2) int)'
+ )
+
+
+ def testR14b(self):
+ r = self.parse("1+2 int bool", 'r14')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(EXPR (+ 1 2) int bool)'
+ )
+
+
+ def testR14c(self):
+ r = self.parse("int bool", 'r14')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(EXPR int bool)'
+ )
+
+
+ def testR14d(self):
+ r = self.parse("fooze fooze int bool", 'r14')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(EXPR fooze fooze int bool)'
+ )
+
+
+ def testR14e(self):
+ r = self.parse("7+9 fooze fooze int bool", 'r14')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(EXPR (+ 7 9) fooze fooze int bool)'
+ )
+
+
+ def testR15(self):
+ r = self.parse("7", 'r15')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '7 7'
+ )
+
+
+ def testR16a(self):
+ r = self.parse("int foo", 'r16')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(int foo)'
+ )
+
+
+ def testR16b(self):
+ r = self.parse("int foo, bar, gnurz", 'r16')
+
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(int foo) (int bar) (int gnurz)'
+ )
+
+
+ def testR17a(self):
+ r = self.parse("for ( fooze ; 1 + 2 ; fooze ) fooze", 'r17')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(for fooze (+ 1 2) fooze fooze)'
+ )
+
+
+ def testR18a(self):
+ r = self.parse("for", 'r18')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'BLOCK'
+ )
+
+
+ def testR19a(self):
+ r = self.parse("for", 'r19')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'for'
+ )
+
+
+ def testR20a(self):
+ r = self.parse("for", 'r20')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'FOR'
+ )
+
+
+ def testR21a(self):
+ r = self.parse("for", 'r21')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'BLOCK'
+ )
+
+
+ def testR22a(self):
+ r = self.parse("for", 'r22')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'for'
+ )
+
+
+ def testR23a(self):
+ r = self.parse("for", 'r23')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'FOR'
+ )
+
+
+ def testR24a(self):
+ r = self.parse("fooze 1 + 2", 'r24')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(fooze (+ 1 2))'
+ )
+
+
+ def testR25a(self):
+ r = self.parse("fooze, fooze2 1 + 2", 'r25')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(fooze (+ 1 2))'
+ )
+
+
+ def testR26a(self):
+ r = self.parse("fooze, fooze2", 'r26')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(BLOCK fooze fooze2)'
+ )
+
+
+ def testR27a(self):
+ r = self.parse("fooze 1 + 2", 'r27')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(fooze (fooze (+ 1 2)))'
+ )
+
+
+ def testR28(self):
+ r = self.parse("foo28a", 'r28')
+ self.failUnless(
+ r.tree is None
+ )
+
+
+ def testR29(self):
+ try:
+ r = self.parse("", 'r29')
+ self.fail()
+ except RuntimeError:
+ pass
+
+
+# FIXME: broken upstream?
+## def testR30(self):
+## try:
+## r = self.parse("fooze fooze", 'r30')
+## self.fail(r.tree.toStringTree())
+## except RuntimeError:
+## pass
+
+
+ def testR31a(self):
+ r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=0)
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(VARDEF gnurz public int (+ 1 2))'
+ )
+
+
+ def testR31b(self):
+ r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=1)
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(VARIABLE gnurz public int (+ 1 2))'
+ )
+
+
+ def testR31c(self):
+ r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=2)
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(FIELD gnurz public int (+ 1 2))'
+ )
+
+
+ def testR32a(self):
+ r = self.parse("gnurz 32", 'r32', [1], flag=2)
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'gnurz'
+ )
+
+
+ def testR32b(self):
+ r = self.parse("gnurz 32", 'r32', [2], flag=2)
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '32'
+ )
+
+
+ def testR32c(self):
+ r = self.parse("gnurz 32", 'r32', [3], flag=2)
+ self.failUnless(
+ r.tree is None
+ )
+
+
+ def testR33a(self):
+ r = self.parse("public private fooze", 'r33')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'fooze'
+ )
+
+
+ def testR34a(self):
+ r = self.parse("public class gnurz { fooze fooze2 }", 'r34')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(class gnurz public fooze fooze2)'
+ )
+
+
+ def testR34b(self):
+ r = self.parse("public class gnurz extends bool implements int, bool { fooze fooze2 }", 'r34')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(class gnurz public (extends bool) (implements int bool) fooze fooze2)'
+ )
+
+
+ def testR35(self):
+ try:
+ r = self.parse("{ extends }", 'r35')
+ self.fail()
+
+ except RuntimeError:
+ pass
+
+
+ def testR36a(self):
+ r = self.parse("if ( 1 + 2 ) fooze", 'r36')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(if (EXPR (+ 1 2)) fooze)'
+ )
+
+
+ def testR36b(self):
+ r = self.parse("if ( 1 + 2 ) fooze else fooze2", 'r36')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(if (EXPR (+ 1 2)) fooze fooze2)'
+ )
+
+
+ def testR37(self):
+ r = self.parse("1 + 2 + 3", 'r37')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(+ (+ 1 2) 3)'
+ )
+
+
+ def testR38(self):
+ r = self.parse("1 + 2 + 3", 'r38')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(+ (+ 1 2) 3)'
+ )
+
+
+ def testR39a(self):
+ r = self.parse("gnurz[1]", 'r39')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(INDEX gnurz 1)'
+ )
+
+
+ def testR39b(self):
+ r = self.parse("gnurz(2)", 'r39')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(CALL gnurz 2)'
+ )
+
+
+ def testR39c(self):
+ r = self.parse("gnurz.gnarz", 'r39')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(FIELDACCESS gnurz gnarz)'
+ )
+
+
+ def testR39d(self):
+ r = self.parse("gnurz.gnarz.gnorz", 'r39')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(FIELDACCESS (FIELDACCESS gnurz gnarz) gnorz)'
+ )
+
+
+ def testR40(self):
+ r = self.parse("1 + 2 + 3;", 'r40')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(+ 1 2 3)'
+ )
+
+
+ def testR41(self):
+ r = self.parse("1 + 2 + 3;", 'r41')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(3 (2 1))'
+ )
+
+
+ def testR42(self):
+ r = self.parse("gnurz, gnarz, gnorz", 'r42')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'gnurz gnarz gnorz'
+ )
+
+
+ def testR43(self):
+ r = self.parse("gnurz, gnarz, gnorz", 'r43')
+ self.failUnless(
+ r.tree is None
+ )
+ self.failUnlessEqual(
+ r.res,
+ ['gnurz', 'gnarz', 'gnorz']
+ )
+
+
+ def testR44(self):
+ r = self.parse("gnurz, gnarz, gnorz", 'r44')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(gnorz (gnarz gnurz))'
+ )
+
+
+ def testR45(self):
+ r = self.parse("gnurz", 'r45')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'gnurz'
+ )
+
+
+ def testR46(self):
+ r = self.parse("gnurz, gnarz, gnorz", 'r46')
+ self.failUnless(
+ r.tree is None
+ )
+ self.failUnlessEqual(
+ r.res,
+ ['gnurz', 'gnarz', 'gnorz']
+ )
+
+
+ def testR47(self):
+ r = self.parse("gnurz, gnarz, gnorz", 'r47')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'gnurz gnarz gnorz'
+ )
+
+
+ def testR48(self):
+ r = self.parse("gnurz, gnarz, gnorz", 'r48')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'gnurz gnarz gnorz'
+ )
+
+
+ def testR49(self):
+ r = self.parse("gnurz gnorz", 'r49')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(gnurz gnorz)'
+ )
+
+
+ def testR50(self):
+ r = self.parse("gnurz", 'r50')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(1.0 gnurz)'
+ )
+
+
+ def testR51(self):
+ r = self.parse("gnurza gnurzb gnurzc", 'r51')
+ self.failUnlessEqual(
+ r.res.toStringTree(),
+ 'gnurzb'
+ )
+
+
+ def testR52(self):
+ r = self.parse("gnurz", 'r52')
+ self.failUnlessEqual(
+ r.res.toStringTree(),
+ 'gnurz'
+ )
+
+
+ def testR53(self):
+ r = self.parse("gnurz", 'r53')
+ self.failUnlessEqual(
+ r.res.toStringTree(),
+ 'gnurz'
+ )
+
+
+ def testR54(self):
+ r = self.parse("gnurza 1 + 2 gnurzb", 'r54')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(+ 1 2)'
+ )
+
+
+ def testR55a(self):
+ r = self.parse("public private 1 + 2", 'r55')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'public private (+ 1 2)'
+ )
+
+
+ def testR55b(self):
+ r = self.parse("public fooze", 'r55')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'public fooze'
+ )
+
+
+ def testR56(self):
+ r = self.parse("a b c d", 'r56')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'foo'
+ )
+
+
+ def testR57(self):
+ r = self.parse("a b c d", 'r57')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ 'foo'
+ )
+
+
+ def testR59(self):
+ r = self.parse("a b c fooze", 'r59')
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ '(a fooze) (b fooze) (c fooze)'
+ )
+
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t043synpred.g b/antlr-3.4/runtime/Python/tests/t043synpred.g
new file mode 100644
index 0000000..7294f23
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t043synpred.g
@@ -0,0 +1,14 @@
+grammar t043synpred;
+options {
+ language = Python;
+}
+
+a: ((s+ P)=> s+ b)? E;
+b: P 'foo';
+
+s: S;
+
+
+S: ' ';
+P: '+';
+E: '>';
diff --git a/antlr-3.4/runtime/Python/tests/t043synpred.py b/antlr-3.4/runtime/Python/tests/t043synpred.py
new file mode 100644
index 0000000..9246de2
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t043synpred.py
@@ -0,0 +1,39 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t043synpred(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def testValid1(self):
+ cStream = antlr3.StringStream(' +foo>')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ events = parser.a()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t044trace.g b/antlr-3.4/runtime/Python/tests/t044trace.g
new file mode 100644
index 0000000..0b7aa71
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t044trace.g
@@ -0,0 +1,20 @@
+grammar t044trace;
+options {
+ language = Python;
+}
+
+@init {
+ self._stack = None
+}
+
+a: '<' ((INT '+')=>b|c) '>';
+b: c ('+' c)*;
+c: INT
+ {
+ if self._stack is None:
+ self._stack = self.getRuleInvocationStack()
+ }
+ ;
+
+INT: ('0'..'9')+;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
diff --git a/antlr-3.4/runtime/Python/tests/t044trace.py b/antlr-3.4/runtime/Python/tests/t044trace.py
new file mode 100644
index 0000000..13c9b76
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t044trace.py
@@ -0,0 +1,95 @@
+import antlr3
+import testbase
+import unittest
+
+
+class T(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar(options='-trace')
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self.traces = []
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self.traces = []
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ def getRuleInvocationStack(self):
+ return self._getRuleInvocationStack(base.__module__)
+
+ return TParser
+
+
+ def testTrace(self):
+ cStream = antlr3.StringStream('< 1 + 2 + 3 >')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.a()
+
+ self.failUnlessEqual(
+ lexer.traces,
+ [ '>T__7', '<T__7', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
+ '>T__6', '<T__6', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
+ '>T__6', '<T__6', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
+ '>T__8', '<T__8']
+ )
+
+ self.failUnlessEqual(
+ parser.traces,
+ [ '>a', '>synpred1_t044trace_fragment', '<synpred1_t044trace_fragment', '>b', '>c',
+ '<c', '>c', '<c', '>c', '<c', '<b', '<a' ]
+ )
+
+
+ def testInvokationStack(self):
+ cStream = antlr3.StringStream('< 1 + 2 + 3 >')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.a()
+
+ self.failUnlessEqual(
+ parser._stack,
+ ['a', 'b', 'c']
+ )
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t045dfabug.g b/antlr-3.4/runtime/Python/tests/t045dfabug.g
new file mode 100644
index 0000000..4ad895b
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t045dfabug.g
@@ -0,0 +1,32 @@
+grammar t045dfabug;
+options {
+ language = Python;
+ output = AST;
+}
+
+
+// this rule used to generate an infinite loop in DFA.predict
+r
+options { backtrack=true; }
+ : (modifier+ INT)=> modifier+ expression
+ | modifier+ statement
+ ;
+
+expression
+ : INT '+' INT
+ ;
+
+statement
+ : 'fooze'
+ | 'fooze2'
+ ;
+
+modifier
+ : 'public'
+ | 'private'
+ ;
+
+ID : 'a'..'z' + ;
+INT : '0'..'9' +;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+
diff --git a/antlr-3.4/runtime/Python/tests/t045dfabug.py b/antlr-3.4/runtime/Python/tests/t045dfabug.py
new file mode 100644
index 0000000..76be15e
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t045dfabug.py
@@ -0,0 +1,21 @@
+import unittest
+import textwrap
+import antlr3
+import testbase
+
+class T(testbase.ANTLRTest):
+
+ def testbug(self):
+ self.compileGrammar()
+
+ cStream = antlr3.StringStream("public fooze")
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+
+ parser.r()
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t046rewrite.g b/antlr-3.4/runtime/Python/tests/t046rewrite.g
new file mode 100644
index 0000000..e8dc1dc
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t046rewrite.g
@@ -0,0 +1,54 @@
+grammar t046rewrite;
+options {
+ language=Python;
+}
+
+program
+@init {
+ start = self.input.LT(1)
+}
+ : method+
+ {
+ self.input.insertBefore(start,"public class Wrapper {\n")
+ self.input.insertAfter($method.stop, "\n}\n")
+ }
+ ;
+
+method
+ : m='method' ID '(' ')' body
+ {self.input.replace($m, "public void");}
+ ;
+
+body
+scope {
+ decls
+}
+@init {
+ $body::decls = set()
+}
+ : lcurly='{' stat* '}'
+ {
+ for it in $body::decls:
+ self.input.insertAfter($lcurly, "\nint "+it+";")
+ }
+ ;
+
+stat: ID '=' expr ';' {$body::decls.add($ID.text);}
+ ;
+
+expr: mul ('+' mul)*
+ ;
+
+mul : atom ('*' atom)*
+ ;
+
+atom: ID
+ | INT
+ ;
+
+ID : ('a'..'z'|'A'..'Z')+ ;
+
+INT : ('0'..'9')+ ;
+
+WS : (' '|'\t'|'\n')+ {$channel=HIDDEN;}
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t046rewrite.py b/antlr-3.4/runtime/Python/tests/t046rewrite.py
new file mode 100644
index 0000000..a61ede4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t046rewrite.py
@@ -0,0 +1,55 @@
+import unittest
+import textwrap
+import antlr3
+import testbase
+
+class T(testbase.ANTLRTest):
+ def testRewrite(self):
+ self.compileGrammar()
+
+ input = textwrap.dedent(
+ '''\
+ method foo() {
+ i = 3;
+ k = i;
+ i = k*4;
+ }
+
+ method bar() {
+ j = i*2;
+ }
+ ''')
+
+ cStream = antlr3.StringStream(input)
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.TokenRewriteStream(lexer)
+ parser = self.getParser(tStream)
+ parser.program()
+
+ expectedOutput = textwrap.dedent('''\
+ public class Wrapper {
+ public void foo() {
+ int k;
+ int i;
+ i = 3;
+ k = i;
+ i = k*4;
+ }
+
+ public void bar() {
+ int j;
+ j = i*2;
+ }
+ }
+
+ ''')
+
+ self.failUnlessEqual(
+ str(tStream),
+ expectedOutput
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t047treeparser.g b/antlr-3.4/runtime/Python/tests/t047treeparser.g
new file mode 100644
index 0000000..7e50ac4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t047treeparser.g
@@ -0,0 +1,113 @@
+grammar t047treeparser;
+options {
+ language=Python;
+ output=AST;
+}
+
+tokens {
+ VAR_DEF;
+ ARG_DEF;
+ FUNC_HDR;
+ FUNC_DECL;
+ FUNC_DEF;
+ BLOCK;
+}
+
+program
+ : declaration+
+ ;
+
+declaration
+ : variable
+ | functionHeader ';' -> ^(FUNC_DECL functionHeader)
+ | functionHeader block -> ^(FUNC_DEF functionHeader block)
+ ;
+
+variable
+ : type declarator ';' -> ^(VAR_DEF type declarator)
+ ;
+
+declarator
+ : ID
+ ;
+
+functionHeader
+ : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+ -> ^(FUNC_HDR type ID formalParameter+)
+ ;
+
+formalParameter
+ : type declarator -> ^(ARG_DEF type declarator)
+ ;
+
+type
+ : 'int'
+ | 'char'
+ | 'void'
+ | ID
+ ;
+
+block
+ : lc='{'
+ variable*
+ stat*
+ '}'
+ -> ^(BLOCK[$lc,"BLOCK"] variable* stat*)
+ ;
+
+stat: forStat
+ | expr ';'!
+ | block
+ | assignStat ';'!
+ | ';'!
+ ;
+
+forStat
+ : 'for' '(' start=assignStat ';' expr ';' next=assignStat ')' block
+ -> ^('for' $start expr $next block)
+ ;
+
+assignStat
+ : ID EQ expr -> ^(EQ ID expr)
+ ;
+
+expr: condExpr
+ ;
+
+condExpr
+ : aexpr ( ('=='^ | '<'^) aexpr )?
+ ;
+
+aexpr
+ : atom ( '+'^ atom )*
+ ;
+
+atom
+ : ID
+ | INT
+ | '(' expr ')' -> expr
+ ;
+
+FOR : 'for' ;
+INT_TYPE : 'int' ;
+CHAR: 'char';
+VOID: 'void';
+
+ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+ ;
+
+INT : ('0'..'9')+
+ ;
+
+EQ : '=' ;
+EQEQ : '==' ;
+LT : '<' ;
+PLUS : '+' ;
+
+WS : ( ' '
+ | '\t'
+ | '\r'
+ | '\n'
+ )+
+ { $channel=HIDDEN }
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t047treeparser.py b/antlr-3.4/runtime/Python/tests/t047treeparser.py
new file mode 100644
index 0000000..1c0cb05
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t047treeparser.py
@@ -0,0 +1,122 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+
+class T(testbase.ANTLRTest):
+ def walkerClass(self, base):
+ class TWalker(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self.traces = []
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TWalker
+
+
+ def setUp(self):
+ self.compileGrammar()
+ self.compileGrammar('t047treeparserWalker.g', options='-trace')
+
+
+ def testWalker(self):
+ input = textwrap.dedent(
+ '''\
+ char c;
+ int x;
+
+ void bar(int x);
+
+ int foo(int y, char d) {
+ int i;
+ for (i=0; i<3; i=i+1) {
+ x=3;
+ y=5;
+ }
+ }
+ ''')
+
+ cStream = antlr3.StringStream(input)
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ r = parser.program()
+
+ self.failUnlessEqual(
+ r.tree.toStringTree(),
+ "(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))"
+ )
+
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = self.getWalker(nodes)
+ walker.program()
+
+ # FIXME: need to crosscheck with Java target (compile walker with
+ # -trace option), if this is the real list. For now I'm happy that
+ # it does not crash ;)
+ self.failUnlessEqual(
+ walker.traces,
+ [ '>program', '>declaration', '>variable', '>type', '<type',
+ '>declarator', '<declarator', '<variable', '<declaration',
+ '>declaration', '>variable', '>type', '<type', '>declarator',
+ '<declarator', '<variable', '<declaration', '>declaration',
+ '>functionHeader', '>type', '<type', '>formalParameter',
+ '>type', '<type', '>declarator', '<declarator',
+ '<formalParameter', '<functionHeader', '<declaration',
+ '>declaration', '>functionHeader', '>type', '<type',
+ '>formalParameter', '>type', '<type', '>declarator',
+ '<declarator', '<formalParameter', '>formalParameter', '>type',
+ '<type', '>declarator', '<declarator', '<formalParameter',
+ '<functionHeader', '>block', '>variable', '>type', '<type',
+ '>declarator', '<declarator', '<variable', '>stat', '>forStat',
+ '>expr', '>expr', '>atom', '<atom', '<expr', '<expr', '>expr',
+ '>expr', '>atom', '<atom', '<expr', '>expr', '>atom', '<atom',
+ '<expr', '<expr', '>expr', '>expr', '>expr', '>atom', '<atom',
+ '<expr', '>expr', '>atom', '<atom', '<expr', '<expr', '<expr',
+ '>block', '>stat', '>expr', '>expr', '>atom', '<atom', '<expr',
+ '<expr', '<stat', '>stat', '>expr', '>expr', '>atom', '<atom',
+ '<expr', '<expr', '<stat', '<block', '<forStat', '<stat',
+ '<block', '<declaration', '<program'
+ ]
+ )
+
+ def testRuleLabelPropertyRefText(self):
+ self.compileGrammar()
+ self.compileGrammar('t047treeparserWalker.g', options='-trace')
+
+ input = textwrap.dedent(
+ '''\
+ char c;
+ ''')
+
+ cStream = antlr3.StringStream(input)
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ r = parser.variable()
+
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = self.getWalker(nodes)
+ r = walker.variable()
+
+ self.failUnlessEqual(r, 'c')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t047treeparserWalker.g b/antlr-3.4/runtime/Python/tests/t047treeparserWalker.g
new file mode 100644
index 0000000..b6e0b3c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t047treeparserWalker.g
@@ -0,0 +1,67 @@
+tree grammar t047treeparserWalker;
+options {
+ language=Python;
+ tokenVocab=t047treeparser;
+ ASTLabelType=CommonTree;
+}
+
+program
+ : declaration+
+ ;
+
+declaration
+ : variable
+ | ^(FUNC_DECL functionHeader)
+ | ^(FUNC_DEF functionHeader block)
+ ;
+
+variable returns [res]
+ : ^(VAR_DEF type declarator)
+ {
+ $res = $declarator.text;
+ }
+ ;
+
+declarator
+ : ID
+ ;
+
+functionHeader
+ : ^(FUNC_HDR type ID formalParameter+)
+ ;
+
+formalParameter
+ : ^(ARG_DEF type declarator)
+ ;
+
+type
+ : 'int'
+ | 'char'
+ | 'void'
+ | ID
+ ;
+
+block
+ : ^(BLOCK variable* stat*)
+ ;
+
+stat: forStat
+ | expr
+ | block
+ ;
+
+forStat
+ : ^('for' expr expr expr block)
+ ;
+
+expr: ^(EQEQ expr expr)
+ | ^(LT expr expr)
+ | ^(PLUS expr expr)
+ | ^(EQ ID expr)
+ | atom
+ ;
+
+atom
+ : ID
+ | INT
+ ;
diff --git a/antlr-3.4/runtime/Python/tests/t048rewrite.g b/antlr-3.4/runtime/Python/tests/t048rewrite.g
new file mode 100644
index 0000000..4103b82
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t048rewrite.g
@@ -0,0 +1,9 @@
+lexer grammar t048rewrite;
+options {
+ language=Python;
+}
+
+A: 'a';
+B: 'b';
+C: 'c';
+
diff --git a/antlr-3.4/runtime/Python/tests/t048rewrite.py b/antlr-3.4/runtime/Python/tests/t048rewrite.py
new file mode 100644
index 0000000..685bf86
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t048rewrite.py
@@ -0,0 +1,490 @@
+"""Testsuite for TokenRewriteStream class."""
+
+# don't care about docstrings
+# pylint: disable-msg=C0111
+
+import unittest
+import antlr3
+import testbase
+
+class T1(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def _parse(self, input):
+ cStream = antlr3.StringStream(input)
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.TokenRewriteStream(lexer)
+ tStream.fillBuffer()
+
+ return tStream
+
+
+ def testInsertBeforeIndex0(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(0, "0")
+
+ result = tokens.toString()
+ expecting = "0abc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testInsertAfterLastIndex(self):
+ tokens = self._parse("abc")
+ tokens.insertAfter(2, "x")
+
+ result = tokens.toString()
+ expecting = "abcx"
+ self.failUnlessEqual(result, expecting)
+
+
+ def test2InsertBeforeAfterMiddleIndex(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(1, "x")
+ tokens.insertAfter(1, "x")
+
+ result = tokens.toString()
+ expecting = "axbxc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceIndex0(self):
+ tokens = self._parse("abc")
+ tokens.replace(0, "x")
+
+ result = tokens.toString()
+ expecting = "xbc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceLastIndex(self):
+ tokens = self._parse("abc")
+ tokens.replace(2, "x")
+
+ result = tokens.toString()
+ expecting = "abx"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceMiddleIndex(self):
+ tokens = self._parse("abc")
+ tokens.replace(1, "x")
+
+ result = tokens.toString()
+ expecting = "axc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def test2ReplaceMiddleIndex(self):
+ tokens = self._parse("abc")
+ tokens.replace(1, "x")
+ tokens.replace(1, "y")
+
+ result = tokens.toString()
+ expecting = "ayc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def test2ReplaceMiddleIndex1InsertBefore(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(0, "_")
+ tokens.replace(1, "x")
+ tokens.replace(1, "y")
+
+ result = tokens.toString()
+ expecting = "_ayc"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testReplaceThenDeleteMiddleIndex(self):
+ tokens = self._parse("abc")
+ tokens.replace(1, "x")
+ tokens.delete(1)
+
+ result = tokens.toString()
+ expecting = "ac"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testInsertInPriorReplace(self):
+ tokens = self._parse("abc")
+ tokens.replace(0, 2, "x")
+ tokens.insertBefore(1, "0")
+ try:
+ tokens.toString()
+ self.fail()
+ except ValueError, exc:
+ self.failUnlessEqual(
+ str(exc),
+ "insert op <InsertBeforeOp@1:\"0\"> within boundaries of "
+ "previous <[email protected]:\"x\">"
+ )
+
+ def testInsertThenReplaceSameIndex(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(0, "0")
+ tokens.replace(0, "x") # supercedes insert at 0
+
+ result = tokens.toString()
+ expecting = "0xbc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def test2InsertMiddleIndex(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(1, "x")
+ tokens.insertBefore(1, "y")
+
+ result = tokens.toString()
+ expecting = "ayxbc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def test2InsertThenReplaceIndex0(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(0, "x")
+ tokens.insertBefore(0, "y")
+ tokens.replace(0, "z")
+
+ result = tokens.toString()
+ expecting = "yxzbc"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceThenInsertBeforeLastIndex(self):
+ tokens = self._parse("abc")
+ tokens.replace(2, "x")
+ tokens.insertBefore(2, "y")
+
+ result = tokens.toString()
+ expecting = "abyx"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testInsertThenReplaceLastIndex(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(2, "y")
+ tokens.replace(2, "x")
+
+ result = tokens.toString()
+ expecting = "abyx"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceThenInsertAfterLastIndex(self):
+ tokens = self._parse("abc")
+ tokens.replace(2, "x")
+ tokens.insertAfter(2, "y")
+
+ result = tokens.toString()
+ expecting = "abxy"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceRangeThenInsertAtLeftEdge(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(2, 4, "x")
+ tokens.insertBefore(2, "y")
+
+ result = tokens.toString()
+ expecting = "abyxba"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceRangeThenInsertAtRightEdge(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(2, 4, "x")
+ tokens.insertBefore(4, "y") # no effect; within range of a replace
+
+ try:
+ tokens.toString()
+ self.fail()
+ except ValueError, exc:
+ self.failUnlessEqual(
+ str(exc),
+ "insert op <InsertBeforeOp@4:\"y\"> within boundaries of "
+ "previous <[email protected]:\"x\">")
+
+
+ def testReplaceRangeThenInsertAfterRightEdge(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(2, 4, "x")
+ tokens.insertAfter(4, "y")
+
+ result = tokens.toString()
+ expecting = "abxyba"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceAll(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(0, 6, "x")
+
+ result = tokens.toString()
+ expecting = "x"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceSubsetThenFetch(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(2, 4, "xyz")
+
+ result = tokens.toString(0, 6)
+ expecting = "abxyzba"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testReplaceThenReplaceSuperset(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(2, 4, "xyz")
+ tokens.replace(3, 5, "foo") # overlaps, error
+
+ try:
+ tokens.toString()
+ self.fail()
+ except ValueError, exc:
+ self.failUnlessEqual(
+ str(exc),
+ "replace op boundaries of <[email protected]:\"foo\"> overlap "
+ "with previous <[email protected]:\"xyz\">")
+
+
+ def testReplaceThenReplaceLowerIndexedSuperset(self):
+ tokens = self._parse("abcccba")
+ tokens.replace(2, 4, "xyz")
+ tokens.replace(1, 3, "foo") # overlap, error
+
+ try:
+ tokens.toString()
+ self.fail()
+ except ValueError, exc:
+ self.failUnlessEqual(
+ str(exc),
+ "replace op boundaries of <[email protected]:\"foo\"> overlap "
+ "with previous <[email protected]:\"xyz\">")
+
+
+ def testReplaceSingleMiddleThenOverlappingSuperset(self):
+ tokens = self._parse("abcba")
+ tokens.replace(2, 2, "xyz")
+ tokens.replace(0, 3, "foo")
+
+ result = tokens.toString()
+ expecting = "fooa"
+ self.failUnlessEqual(result, expecting)
+
+
+ def testCombineInserts(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(0, "x")
+ tokens.insertBefore(0, "y")
+ result = tokens.toString()
+ expecting = "yxabc"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testCombine3Inserts(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(1, "x")
+ tokens.insertBefore(0, "y")
+ tokens.insertBefore(1, "z")
+ result = tokens.toString()
+ expecting = "yazxbc"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testCombineInsertOnLeftWithReplace(self):
+ tokens = self._parse("abc")
+ tokens.replace(0, 2, "foo")
+ tokens.insertBefore(0, "z") # combine with left edge of rewrite
+ result = tokens.toString()
+ expecting = "zfoo"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testCombineInsertOnLeftWithDelete(self):
+ tokens = self._parse("abc")
+ tokens.delete(0, 2)
+ tokens.insertBefore(0, "z") # combine with left edge of rewrite
+ result = tokens.toString()
+ expecting = "z" # make sure combo is not znull
+ self.failUnlessEqual(expecting, result)
+
+
+ def testDisjointInserts(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(1, "x")
+ tokens.insertBefore(2, "y")
+ tokens.insertBefore(0, "z")
+ result = tokens.toString()
+ expecting = "zaxbyc"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testOverlappingReplace(self):
+ tokens = self._parse("abcc")
+ tokens.replace(1, 2, "foo")
+ tokens.replace(0, 3, "bar") # wipes prior nested replace
+ result = tokens.toString()
+ expecting = "bar"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testOverlappingReplace2(self):
+ tokens = self._parse("abcc")
+ tokens.replace(0, 3, "bar")
+ tokens.replace(1, 2, "foo") # cannot split earlier replace
+
+ try:
+ tokens.toString()
+ self.fail()
+ except ValueError, exc:
+ self.failUnlessEqual(
+ str(exc),
+ "replace op boundaries of <[email protected]:\"foo\"> overlap "
+ "with previous <[email protected]:\"bar\">")
+
+
+ def testOverlappingReplace3(self):
+ tokens = self._parse("abcc")
+ tokens.replace(1, 2, "foo")
+ tokens.replace(0, 2, "bar") # wipes prior nested replace
+ result = tokens.toString()
+ expecting = "barc"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testOverlappingReplace4(self):
+ tokens = self._parse("abcc")
+ tokens.replace(1, 2, "foo")
+ tokens.replace(1, 3, "bar") # wipes prior nested replace
+ result = tokens.toString()
+ expecting = "abar"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testDropIdenticalReplace(self):
+ tokens = self._parse("abcc")
+ tokens.replace(1, 2, "foo")
+ tokens.replace(1, 2, "foo") # drop previous, identical
+ result = tokens.toString()
+ expecting = "afooc"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testDropPrevCoveredInsert(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(1, "foo")
+ tokens.replace(1, 2, "foo") # kill prev insert
+ result = tokens.toString()
+ expecting = "afoofoo"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testLeaveAloneDisjointInsert(self):
+ tokens = self._parse("abcc")
+ tokens.insertBefore(1, "x")
+ tokens.replace(2, 3, "foo")
+ result = tokens.toString()
+ expecting = "axbfoo"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testLeaveAloneDisjointInsert2(self):
+ tokens = self._parse("abcc")
+ tokens.replace(2, 3, "foo")
+ tokens.insertBefore(1, "x")
+ result = tokens.toString()
+ expecting = "axbfoo"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testInsertBeforeTokenThenDeleteThatToken(self):
+ tokens = self._parse("abc")
+ tokens.insertBefore(2, "y")
+ tokens.delete(2)
+ result = tokens.toString()
+ expecting = "aby"
+ self.failUnlessEqual(expecting, result)
+
+
+class T2(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar('t048rewrite2.g')
+
+
+ def _parse(self, input):
+ cStream = antlr3.StringStream(input)
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.TokenRewriteStream(lexer)
+ tStream.fillBuffer()
+
+ return tStream
+
+
+ def testToStringStartStop(self):
+ # Tokens: 0123456789
+ # Input: x = 3 * 0
+ tokens = self._parse("x = 3 * 0;")
+ tokens.replace(4, 8, "0") # replace 3 * 0 with 0
+
+ result = tokens.toOriginalString()
+ expecting = "x = 3 * 0;"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString()
+ expecting = "x = 0;"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(0, 9)
+ expecting = "x = 0;"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(4, 8)
+ expecting = "0"
+ self.failUnlessEqual(expecting, result)
+
+
+ def testToStringStartStop2(self):
+ # Tokens: 012345678901234567
+ # Input: x = 3 * 0 + 2 * 0
+ tokens = self._parse("x = 3 * 0 + 2 * 0;")
+
+ result = tokens.toOriginalString()
+ expecting = "x = 3 * 0 + 2 * 0;"
+ self.failUnlessEqual(expecting, result)
+
+ tokens.replace(4, 8, "0") # replace 3 * 0 with 0
+ result = tokens.toString()
+ expecting = "x = 0 + 2 * 0;"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(0, 17)
+ expecting = "x = 0 + 2 * 0;"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(4, 8)
+ expecting = "0"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(0, 8)
+ expecting = "x = 0"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(12, 16)
+ expecting = "2 * 0"
+ self.failUnlessEqual(expecting, result)
+
+ tokens.insertAfter(17, "// comment")
+ result = tokens.toString(12, 18)
+ expecting = "2 * 0;// comment"
+ self.failUnlessEqual(expecting, result)
+
+ result = tokens.toString(0, 8) # try again after insert at end
+ expecting = "x = 0"
+ self.failUnlessEqual(expecting, result)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t048rewrite2.g b/antlr-3.4/runtime/Python/tests/t048rewrite2.g
new file mode 100644
index 0000000..f98251c
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t048rewrite2.g
@@ -0,0 +1,12 @@
+lexer grammar t048rewrite2;
+options {
+ language=Python;
+}
+
+ID : 'a'..'z'+;
+INT : '0'..'9'+;
+SEMI : ';';
+PLUS : '+';
+MUL : '*';
+ASSIGN : '=';
+WS : ' '+;
diff --git a/antlr-3.4/runtime/Python/tests/t049treeparser.py b/antlr-3.4/runtime/Python/tests/t049treeparser.py
new file mode 100644
index 0000000..9c7157d
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t049treeparser.py
@@ -0,0 +1,499 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+
+class T(testbase.ANTLRTest):
+ def walkerClass(self, base):
+ class TWalker(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TWalker
+
+
+ def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+ walkerCls = self.compileInlineGrammar(treeGrammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = walkerCls(nodes)
+ getattr(walker, treeEntry)()
+
+ return walker._output
+
+
+ def testFlatList(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : ID INT
+ {self.capture("\%s, \%s" \% ($ID, $INT))}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("abc, 34", found)
+
+
+
+ def testSimpleTree(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : ^(ID INT)
+ {self.capture(str($ID)+", "+str($INT))}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("abc, 34", found)
+
+
+ def testFlatVsTreeDecision(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : b c ;
+ b : ID INT -> ^(ID INT);
+ c : ID INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : b b ;
+ b : ID INT {self.capture(str($ID)+" "+str($INT)+'\n')}
+ | ^(ID INT) {self.capture("^("+str($ID)+" "+str($INT)+')');}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 1 b 2"
+ )
+ self.failUnlessEqual("^(a 1)b 2\n", found)
+
+
+ def testFlatVsTreeDecision2(self):
+ grammar = textwrap.dedent(
+ r"""grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : b c ;
+ b : ID INT+ -> ^(ID INT+);
+ c : ID INT+;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : b b ;
+ b : ID INT+ {self.capture(str($ID)+" "+str($INT)+"\n")}
+ | ^(x=ID (y=INT)+) {self.capture("^("+str($x)+' '+str($y)+')')}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 1 2 3 b 4 5"
+ )
+ self.failUnlessEqual("^(a 3)b 5\n", found)
+
+
+ def testCyclicDFALookahead(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT+ PERIOD;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ PERIOD : '.' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : ID INT+ PERIOD {self.capture("alt 1")}
+ | ID INT+ SEMI {self.capture("alt 2")}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 1 2 3."
+ )
+ self.failUnlessEqual("alt 1", found)
+
+
+## def testTemplateOutput(self):
+## String grammar =
+## "grammar T;\n" +
+## "options {output=AST;}\n" +
+## "a : ID INT;\n" +
+## "ID : 'a'..'z'+ ;\n" +
+## "INT : '0'..'9'+;\n" +
+## "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+## String treeGrammar =
+## "tree grammar TP;\n" +
+## "options {output=template; ASTLabelType=CommonTree;}\n" +
+## "s : a {System.out.println($a.st);};\n" +
+## "a : ID INT -> {new StringTemplate($INT.text)}\n" +
+## " ;\n";
+
+## String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+## treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+## assertEquals("34\n", found);
+## }
+
+
+ def testNullableChildList(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT? -> ^(ID INT?);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : ^(ID INT?)
+ {self.capture(str($ID))}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc"
+ )
+ self.failUnlessEqual("abc", found)
+
+
+ def testNullableChildList2(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT? SEMI -> ^(ID INT?) SEMI ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : ^(ID INT?) SEMI
+ {self.capture(str($ID))}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc;"
+ )
+ self.failUnlessEqual("abc", found)
+
+
+ def testNullableChildList3(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a : ^(ID INT? b) SEMI
+ {self.capture(str($ID)+", "+str($b.text))}
+ ;
+ b : ID? ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc def;"
+ )
+ self.failUnlessEqual("abc, def", found)
+
+
+ def testActionsAfterRoot(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : x=ID INT? SEMI -> ^($x INT?) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar TP;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ }
+ a @init {x=0} : ^(ID {x=1} {x=2} INT?)
+ {self.capture(str($ID)+", "+str(x))}
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc;"
+ )
+ self.failUnless("abc, 2\n", found)
+
+
+ def testWildcardLookahead(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID '+'^ INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ PERIOD : '.' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;}
+ a : ^('+' . INT) { self.capture("alt 1") }
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a + 2")
+ self.assertEquals("alt 1", found)
+
+
+ def testWildcardLookahead2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID '+'^ INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ PERIOD : '.' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;}
+ a : ^('+' . INT) { self.capture("alt 1") }
+ | ^('+' . .) { self.capture("alt 2") }
+ ;
+ ''')
+
+ # AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a + 2")
+ self.assertEquals("alt 1", found)
+
+
+ def testWildcardLookahead3(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID '+'^ INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ PERIOD : '.' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;}
+ a : ^('+' ID INT) { self.capture("alt 1") }
+ | ^('+' . .) { self.capture("alt 2") }
+ ;
+ ''')
+
+ # AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a + 2")
+ self.assertEquals("alt 1", found)
+
+
+ def testWildcardPlusLookahead(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID '+'^ INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ SEMI : ';' ;
+ PERIOD : '.' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;}
+ a : ^('+' INT INT ) { self.capture("alt 1") }
+ | ^('+' .+) { self.capture("alt 2") }
+ ;
+ ''')
+
+ # AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a + 2")
+ self.assertEquals("alt 2", found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t050decorate.g b/antlr-3.4/runtime/Python/tests/t050decorate.g
new file mode 100644
index 0000000..a8b17d1
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t050decorate.g
@@ -0,0 +1,29 @@
+grammar t050decorate;
+options {
+ language = Python;
+}
+
+@header {
+ def logme(func):
+ def decorated(self, *args, **kwargs):
+ self.events.append('before')
+ try:
+ return func(self, *args, **kwargs)
+ finally:
+ self.events.append('after')
+
+ return decorated
+}
+
+@parser::init {
+self.events = []
+}
+
+document
+@decorate {
+ @logme
+}
+ : IDENTIFIER
+ ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
diff --git a/antlr-3.4/runtime/Python/tests/t050decorate.py b/antlr-3.4/runtime/Python/tests/t050decorate.py
new file mode 100644
index 0000000..bb6b85e
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t050decorate.py
@@ -0,0 +1,21 @@
+import antlr3
+import testbase
+import unittest
+
+class t013parser(testbase.ANTLRTest):
+ def setUp(self):
+ self.compileGrammar()
+
+
+ def testValid(self):
+ cStream = antlr3.StringStream('foobar')
+ lexer = self.getLexer(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = self.getParser(tStream)
+ parser.document()
+
+ assert parser.events == ['before', 'after']
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t051treeRewriteAST.py b/antlr-3.4/runtime/Python/tests/t051treeRewriteAST.py
new file mode 100644
index 0000000..39253b4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t051treeRewriteAST.py
@@ -0,0 +1,1593 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+
+class T(testbase.ANTLRTest):
+ def walkerClass(self, base):
+ class TWalker(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+ self.buf = ""
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TWalker
+
+
+ def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+ walkerCls = self.compileInlineGrammar(treeGrammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = walkerCls(nodes)
+ r = getattr(walker, treeEntry)()
+
+ if r.tree is not None:
+ return r.tree.toStringTree()
+
+ return ""
+
+
+ def testFlatList(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T1;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP1;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T1;
+ }
+
+ a : ID INT -> INT ID;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("34 abc", found)
+
+
+ def testSimpleTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T2;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP2;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T2;
+ }
+ a : ^(ID INT) -> ^(INT ID);
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("(34 abc)", found)
+
+
+ def testCombinedRewriteAndAuto(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T3;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT) | INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP3;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T3;
+ }
+ a : ^(ID INT) -> ^(INT ID) | INT;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("(34 abc)", found)
+
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "34"
+ )
+
+ self.failUnlessEqual("34", found)
+
+
+ def testAvoidDup(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T4;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP4;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T4;
+ }
+ a : ID -> ^(ID ID);
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc"
+ )
+
+ self.failUnlessEqual("(abc abc)", found)
+
+
+ def testLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T5;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID+ INT+ -> (^(ID INT))+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP5;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T5;
+ }
+ a : (^(ID INT))+ -> INT+ ID+;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a b c 3 4 5"
+ )
+
+ self.failUnlessEqual("3 4 5 a b c", found)
+
+
+ def testAutoDup(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T6;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP6;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T6;
+ }
+ a : ID;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc"
+ )
+
+ self.failUnlessEqual("abc", found)
+
+
+ def testAutoDupRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T7;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP7;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T7;
+ }
+ a : b c ;
+ b : ID ;
+ c : INT ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 1"
+ )
+
+ self.failUnlessEqual("a 1", found)
+
+
+ def testAutoWildcard(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ a : ID .
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34")
+ self.assertEquals("abc 34", found)
+
+
+# def testNoWildcardAsRootError(self):
+# ErrorQueue equeue = new ErrorQueue();
+# ErrorManager.setErrorListener(equeue);
+# >
+# String treeGrammar =
+# "tree grammar TP;\n"+
+# "options {language=Python;output=AST;}
+# "a : ^(. INT)
+# " ;\n";
+# >
+# Grammar g = new Grammar(treeGrammar);
+# Tool antlr = newTool();
+# antlr.setOutputDirectory(null); // write to /dev/null
+# CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+# g.setCodeGenerator(generator);
+# generator.genRecognizer();
+# >
+# assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+# >
+# int expectedMsgID = ErrorManager.MSG_WILDCARD_AS_ROOT;
+# Object expectedArg = null;
+# antlr.RecognitionException expectedExc = null;
+# GrammarSyntaxMessage expectedMessage =
+# new GrammarSyntaxMessage(expectedMsgID, g, null, expectedArg, expectedExc);
+# >
+# checkError(equeue, expectedMessage);
+# }
+
+ def testAutoWildcard2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ a : ^(ID .)
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34")
+ self.assertEquals("(abc 34)", found)
+
+
+ def testAutoWildcardWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ a : ID c=.
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34")
+ self.assertEquals("abc 34", found)
+
+
+ def testAutoWildcardWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ a : ID c+=.
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34")
+ self.assertEquals("abc 34", found)
+
+
+ def testAutoDupMultiple(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T8;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ID INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP8;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T8;
+ }
+ a : ID ID INT
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a b 3"
+ )
+
+ self.failUnlessEqual("a b 3", found)
+
+
+ def testAutoDupTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T9;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP9;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T9;
+ }
+ a : ^(ID INT)
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 3"
+ )
+
+ self.failUnlessEqual("(a 3)", found)
+
+
+ def testAutoDupTreeWithLabels(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T10;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP10;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T10;
+ }
+ a : ^(x=ID y=INT)
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 3"
+ )
+
+ self.failUnlessEqual("(a 3)", found)
+
+
+ def testAutoDupTreeWithListLabels(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T11;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP11;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T11;
+ }
+ a : ^(x+=ID y+=INT)
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 3"
+ )
+
+ self.failUnlessEqual("(a 3)", found)
+
+
+ def testAutoDupTreeWithRuleRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T12;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP12;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T12;
+ }
+ a : ^(b INT) ;
+ b : ID ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 3"
+ )
+
+ self.failUnlessEqual("(a 3)", found)
+
+
+ def testAutoDupTreeWithRuleRootAndLabels(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T13;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP13;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T13;
+ }
+ a : ^(x=b INT) ;
+ b : ID ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 3"
+ )
+
+ self.failUnlessEqual("(a 3)", found)
+
+
+ def testAutoDupTreeWithRuleRootAndListLabels(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T14;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP14;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T14;
+ }
+ a : ^(x+=b y+=c) ;
+ b : ID ;
+ c : INT ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a 3"
+ )
+
+ self.failUnlessEqual("(a 3)", found)
+
+
+ def testAutoDupNestedTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T15;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : x=ID y=ID INT -> ^($x ^($y INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP15;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T15;
+ }
+ a : ^(ID ^(ID INT))
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "a b 3"
+ )
+
+ self.failUnlessEqual("(a (b 3))", found)
+
+
+ def testDelete(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T16;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP16;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T16;
+ }
+ a : ID ->
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc"
+ )
+
+ self.failUnlessEqual("", found)
+
+ def testSetMatchNoRewrite(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ a : b INT;
+ b : ID | INT;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("abc 34", found)
+
+
+ def testSetOptionalMatchNoRewrite(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ a : (ID|INT)? INT ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34")
+
+ self.failUnlessEqual("abc 34", found)
+
+
+ def testSetMatchNoRewriteLevel2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : x=ID INT -> ^($x INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ a : ^(ID (ID | INT) ) ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("(abc 34)", found)
+
+
+ def testSetMatchNoRewriteLevel2Root(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : x=ID INT -> ^($x INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ a : ^((ID | INT) INT) ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("(abc 34)", found)
+
+
+ ## REWRITE MODE
+
+ def testRewriteModeCombinedRewriteAndAuto(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T17;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID INT) | INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP17;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T17;
+ rewrite=true;
+ }
+ a : ^(ID INT) -> ^(ID["ick"] INT)
+ | INT // leaves it alone, returning $a.start
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("(ick 34)", found)
+
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "34"
+ )
+
+ self.failUnlessEqual("34", found)
+
+
+ def testRewriteModeFlatTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T18;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ID INT | INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP18;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T18;
+ rewrite=true;
+ }
+ s : ID a ;
+ a : INT -> INT["1"]
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34"
+ )
+ self.assertEquals("abc 1", found)
+
+
+ def testRewriteModeChainRuleFlatTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ID INT | INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : a ;
+ a : b ;
+ b : ID INT -> INT ID
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.assertEquals("34 abc", found)
+
+
+ def testRewriteModeChainRuleTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ^(ID INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : a ;
+ a : b ; // a.tree must become b.tree
+ b : ^(ID INT) -> INT
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.assertEquals("34", found)
+
+
+ def testRewriteModeChainRuleTree2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ^(ID INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ tokens { X; }
+ s : a* b ; // only b contributes to tree, but it's after a*; s.tree = b.tree
+ a : X ;
+ b : ^(ID INT) -> INT
+ ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.assertEquals("34", found)
+
+
+ def testRewriteModeChainRuleTree3(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : 'boo' ID INT -> 'boo' ^(ID INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ tokens { X; }
+ s : 'boo' a* b ; // don't reset s.tree to b.tree due to 'boo'
+ a : X ;
+ b : ^(ID INT) -> INT
+ ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "boo abc 34")
+ self.assertEquals("boo 34", found)
+
+
+ def testRewriteModeChainRuleTree4(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ tokens { X; }
+ s : ^('boo' a* b) ; // don't reset s.tree to b.tree due to 'boo'
+ a : X ;
+ b : ^(ID INT) -> INT
+ ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "boo abc 34")
+ self.assertEquals("(boo 34)", found)
+
+
+ def testRewriteModeChainRuleTree5(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ tokens { X; }
+ s : ^(a b) ; // s.tree is a.tree
+ a : 'boo' ;
+ b : ^(ID INT) -> INT
+ ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "boo abc 34")
+ self.assertEquals("(boo 34)", found)
+
+
+ def testRewriteOfRuleRef(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ID INT | INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : a -> a ;
+ a : ID INT -> ID INT ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.failUnlessEqual("abc 34", found)
+
+
+ def testRewriteOfRuleRefRoot(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT INT -> ^(INT ^(ID INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(a ^(ID INT)) -> a ;
+ a : INT ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 12 34")
+ # emits whole tree when you ref the root since I can't know whether
+ # you want the children or not. You might be returning a whole new
+ # tree. Hmm...still seems weird. oh well.
+ self.failUnlessEqual("(12 (abc 34))", found)
+
+
+ def testRewriteOfRuleRefRootLabeled(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT INT -> ^(INT ^(ID INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(label=a ^(ID INT)) -> a ;
+ a : INT ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 12 34")
+ # emits whole tree when you ref the root since I can't know whether
+ # you want the children or not. You might be returning a whole new
+ # tree. Hmm...still seems weird. oh well.
+ self.failUnlessEqual("(12 (abc 34))", found)
+
+
+ def testRewriteOfRuleRefRootListLabeled(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT INT -> ^(INT ^(ID INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(label+=a ^(ID INT)) -> a ;
+ a : INT ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 12 34")
+ # emits whole tree when you ref the root since I can't know whether
+ # you want the children or not. You might be returning a whole new
+ # tree. Hmm...still seems weird. oh well.
+ self.failUnlessEqual("(12 (abc 34))", found)
+
+
+ def testRewriteOfRuleRefChild(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ^(ID ^(INT INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(ID a) -> a ;
+ a : ^(INT INT) ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.failUnlessEqual("(34 34)", found)
+
+
+ def testRewriteOfRuleRefLabel(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ^(ID ^(INT INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(ID label=a) -> a ;
+ a : ^(INT INT) ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.failUnlessEqual("(34 34)", found)
+
+
+ def testRewriteOfRuleRefListLabel(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ^(ID ^(INT INT));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ treeGrammar = textwrap.dedent(
+ r"""
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(ID label+=a) -> a ;
+ a : ^(INT INT) ;
+ """)
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.failUnlessEqual("(34 34)", found)
+
+
+
+ def testRewriteModeWithPredicatedRewrites(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T19;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID["root"] ^(ID INT)) | INT -> ^(ID["root"] INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP19;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T19;
+ rewrite=true;
+ }
+ s : ^(ID a) { self.buf += $s.start.toStringTree() };
+ a : ^(ID INT) -> {True}? ^(ID["ick"] INT)
+ -> INT
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("(root (ick 34))", found)
+
+
+ def testWildcardSingleNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT -> ^(ID["root"] INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ s : ^(ID c=.) -> $c
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("34", found)
+
+ def testWildcardUnlabeledSingleNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID INT -> ^(ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ s : ^(ID .) -> ID
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 34")
+ self.assertEquals("abc", found)
+
+
+ def testWildcardGrabsSubtree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ s : ^(ID c=.) -> $c
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 1 2 3")
+ self.assertEquals("(1 2 3)", found)
+
+
+ def testWildcardGrabsSubtree2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : ID x=INT y=INT z=INT -> ID ^($x $y $z);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ s : ID c=. -> $c
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "abc 1 2 3")
+ self.assertEquals("(1 2 3)", found)
+
+
+ def testWildcardListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST;}
+ a : INT INT INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+ s : (c+=.)+ -> $c+
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "1 2 3")
+ self.assertEquals("1 2 3", found)
+
+
+ def testWildcardListLabel2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python; output=AST; ASTLabelType=CommonTree;}
+ a : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z));
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+ s : ^(INT (c+=.)+) -> $c+
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 's',
+ "1 2 3")
+ self.assertEquals("(2 3) (2 3)", found)
+
+
+ def testRuleResultAsRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID '=' INT -> ^('=' ID INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ COLON : ':' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ rewrite=true;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ a : ^(eq e1=ID e2=.) -> ^(eq $e2 $e1) ;
+ eq : '=' | ':' {pass} ; // bug in set match, doesn't add to tree!! booh. force nonset.
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ "abc = 34")
+ self.assertEquals("(= 34 abc)", found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t052import.py b/antlr-3.4/runtime/Python/tests/t052import.py
new file mode 100644
index 0000000..8924462
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t052import.py
@@ -0,0 +1,1203 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class T(testbase.ANTLRTest):
+ def setUp(self):
+ self.oldPath = sys.path[:]
+ sys.path.insert(0, self.baseDir)
+
+
+ def tearDown(self):
+ sys.path = self.oldPath
+
+
+ def parserClass(self, base):
+ class TParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def execParser(self, grammar, grammarEntry, slaves, input):
+ for slave in slaves:
+ parserName = self.writeInlineGrammar(slave)[0]
+ # slave parsers are imported as normal python modules
+ # to force reloading current version, purge module from sys.modules
+ try:
+ del sys.modules[parserName+'Parser']
+ except KeyError:
+ pass
+
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ getattr(parser, grammarEntry)()
+
+ return parser._output
+
+
+ def execLexer(self, grammar, slaves, input):
+ for slave in slaves:
+ parserName = self.writeInlineGrammar(slave)[0]
+ # slave parsers are imported as normal python modules
+ # to force reloading current version, purge module from sys.modules
+ try:
+ del sys.modules[parserName+'Parser']
+ except KeyError:
+ pass
+
+ lexerCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+
+ while True:
+ token = lexer.nextToken()
+ if token is None or token.type == antlr3.EOF:
+ break
+
+ lexer._output += token.text
+
+ return lexer._output
+
+
+ # @Test public void testWildcardStillWorks() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String grammar =
+ # "parser grammar S;\n" +
+ # "a : B . C ;\n"; // not qualified ID
+ # Grammar g = new Grammar(grammar);
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # }
+
+
+ def testDelegatorInvokesDelegateRule(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar S1;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM1.capture(t)
+
+ }
+
+ a : B { self.capture("S.a") } ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ grammar M1;
+ options {
+ language=Python;
+ }
+ import S1;
+ s : a ;
+ B : 'b' ; // defines B from inherited token space
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execParser(
+ master, 's',
+ slaves=[slave],
+ input="b"
+ )
+
+ self.failUnlessEqual("S.a", found)
+
+
+ # @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception {
+ # // must generate something like:
+ # // public int a(int x) throws RecognitionException { return gS.a(x); }
+ # // in M.
+ # String slave =
+ # "parser grammar S;\n" +
+ # "a : B {System.out.print(\"S.a\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "s : a {System.out.println($a.text);} ;\n" +
+ # "B : 'b' ;" + // defines B from inherited token space
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # String found = execParser("M.g", master, "MParser", "MLexer",
+ # "s", "b", debug);
+ # assertEquals("S.ab\n", found);
+ # }
+
+
+ def testDelegatorInvokesDelegateRuleWithArgs(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar S2;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM2.capture(t)
+ }
+ a[x] returns [y] : B {self.capture("S.a"); $y="1000";} ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ grammar M2;
+ options {
+ language=Python;
+ }
+ import S2;
+ s : label=a[3] {self.capture($label.y);} ;
+ B : 'b' ; // defines B from inherited token space
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execParser(
+ master, 's',
+ slaves=[slave],
+ input="b"
+ )
+
+ self.failUnlessEqual("S.a1000", found)
+
+
+ def testDelegatorAccessesDelegateMembers(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar S3;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM3.capture(t)
+
+ def foo(self):
+ self.capture("foo")
+ }
+ a : B ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ grammar M3; // uses no rules from the import
+ options {
+ language=Python;
+ }
+ import S3;
+ s : 'b' {self.gS3.foo();} ; // gS is import pointer
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execParser(
+ master, 's',
+ slaves=[slave],
+ input="b"
+ )
+
+ self.failUnlessEqual("foo", found)
+
+
+ def testDelegatorInvokesFirstVersionOfDelegateRule(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar S4;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM4.capture(t)
+ }
+ a : b {self.capture("S.a");} ;
+ b : B ;
+ ''')
+
+ slave2 = textwrap.dedent(
+ r'''
+ parser grammar T4;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM4.capture(t)
+ }
+ a : B {self.capture("T.a");} ; // hidden by S.a
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ grammar M4;
+ options {
+ language=Python;
+ }
+ import S4,T4;
+ s : a ;
+ B : 'b' ;
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execParser(
+ master, 's',
+ slaves=[slave, slave2],
+ input="b"
+ )
+
+ self.failUnlessEqual("S.a", found)
+
+
+ def testDelegatesSeeSameTokenType(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar S5; // A, B, C token type order
+ options {
+ language=Python;
+ }
+ tokens { A; B; C; }
+ @members {
+ def capture(self, t):
+ self.gM5.capture(t)
+ }
+ x : A {self.capture("S.x ");} ;
+ ''')
+
+ slave2 = textwrap.dedent(
+ r'''
+ parser grammar T5;
+ options {
+ language=Python;
+ }
+ tokens { C; B; A; } /// reverse order
+ @members {
+ def capture(self, t):
+ self.gM5.capture(t)
+ }
+ y : A {self.capture("T.y");} ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ grammar M5;
+ options {
+ language=Python;
+ }
+ import S5,T5;
+ s : x y ; // matches AA, which should be "aa"
+ B : 'b' ; // another order: B, A, C
+ A : 'a' ;
+ C : 'c' ;
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execParser(
+ master, 's',
+ slaves=[slave, slave2],
+ input="aa"
+ )
+
+ self.failUnlessEqual("S.x T.y", found)
+
+
+ # @Test public void testDelegatesSeeSameTokenType2() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" + // A, B, C token type order
+ # "tokens { A; B; C; }\n" +
+ # "x : A {System.out.println(\"S.x\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String slave2 =
+ # "parser grammar T;\n" +
+ # "tokens { C; B; A; }\n" + // reverse order
+ # "y : A {System.out.println(\"T.y\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "T.g", slave2);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S,T;\n" +
+ # "s : x y ;\n" + // matches AA, which should be "aa"
+ # "B : 'b' ;\n" + // another order: B, A, C
+ # "A : 'a' ;\n" +
+ # "C : 'c' ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]";
+ # String expectedStringLiteralToTypeMap = "{}";
+ # String expectedTypeToTokenList = "[A, B, C, WS]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # }
+
+ # @Test public void testCombinedImportsCombined() throws Exception {
+ # // for now, we don't allow combined to import combined
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "grammar S;\n" + // A, B, C token type order
+ # "tokens { A; B; C; }\n" +
+ # "x : 'x' INT {System.out.println(\"S.x\");} ;\n" +
+ # "INT : '0'..'9'+ ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "s : x INT ;\n";
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+ # String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S";
+ # assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
+ # }
+
+ # @Test public void testSameStringTwoNames() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" +
+ # "tokens { A='a'; }\n" +
+ # "x : A {System.out.println(\"S.x\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String slave2 =
+ # "parser grammar T;\n" +
+ # "tokens { X='a'; }\n" +
+ # "y : X {System.out.println(\"T.y\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "T.g", slave2);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S,T;\n" +
+ # "s : x y ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # String expectedTokenIDToTypeMap = "[A=4, WS=6, X=5]";
+ # String expectedStringLiteralToTypeMap = "{'a'=4}";
+ # String expectedTypeToTokenList = "[A, X, WS]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # Object expectedArg = "X='a'";
+ # Object expectedArg2 = "A";
+ # int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT;
+ # GrammarSemanticsMessage expectedMessage =
+ # new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+ # checkGrammarSemanticsError(equeue, expectedMessage);
+
+ # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+ # String expectedError =
+ # "error(158): T.g:2:10: cannot alias X='a'; string already assigned to A";
+ # assertEquals(expectedError, equeue.errors.get(0).toString());
+ # }
+
+ # @Test public void testSameNameTwoStrings() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" +
+ # "tokens { A='a'; }\n" +
+ # "x : A {System.out.println(\"S.x\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String slave2 =
+ # "parser grammar T;\n" +
+ # "tokens { A='x'; }\n" +
+ # "y : A {System.out.println(\"T.y\");} ;\n";
+
+ # writeFile(tmpdir, "T.g", slave2);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S,T;\n" +
+ # "s : x y ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]";
+ # String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}";
+ # String expectedTypeToTokenList = "[A, WS, T__6]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap));
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # Object expectedArg = "A='x'";
+ # Object expectedArg2 = "'a'";
+ # int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT;
+ # GrammarSemanticsMessage expectedMessage =
+ # new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+ # checkGrammarSemanticsError(equeue, expectedMessage);
+
+ # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+ # String expectedError =
+ # "error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'";
+ # assertEquals(expectedError, equeue.errors.get(0).toString());
+ # }
+
+ # @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" +
+ # "options {tokenVocab=whatever;}\n" +
+ # "tokens { A='a'; }\n" +
+ # "x : A {System.out.println(\"S.x\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "s : x ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # Object expectedArg = "S";
+ # int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE;
+ # GrammarSemanticsMessage expectedMessage =
+ # new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+ # checkGrammarSemanticsWarning(equeue, expectedMessage);
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size());
+
+ # String expectedError =
+ # "warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S";
+ # assertEquals(expectedError, equeue.warnings.get(0).toString());
+ # }
+
+ # @Test public void testImportedTokenVocabWorksInRoot() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" +
+ # "tokens { A='a'; }\n" +
+ # "x : A {System.out.println(\"S.x\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # String tokens =
+ # "A=99\n";
+ # writeFile(tmpdir, "Test.tokens", tokens);
+
+ # String master =
+ # "grammar M;\n" +
+ # "options {tokenVocab=Test;}\n" +
+ # "import S;\n" +
+ # "s : x ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # String expectedTokenIDToTypeMap = "[A=99, WS=101]";
+ # String expectedStringLiteralToTypeMap = "{'a'=100}";
+ # String expectedTypeToTokenList = "[A, 'a', WS]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # }
+
+ # @Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" +
+ # "options {toke\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "s : x ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # // whole bunch of errors from bad S.g file
+ # assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size());
+ # }
+
+ # @Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar S;\n" +
+ # ": A {System.out.println(\"S.x\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "s : x ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+
+ # // whole bunch of errors from bad S.g file
+ # assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size());
+ # }
+
+
+ def testDelegatorRuleOverridesDelegate(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar S6;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM6.capture(t)
+ }
+ a : b {self.capture("S.a");} ;
+ b : B ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ grammar M6;
+ options {
+ language=Python;
+ }
+ import S6;
+ b : 'b'|'c' ;
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execParser(
+ master, 'a',
+ slaves=[slave],
+ input="c"
+ )
+
+ self.failUnlessEqual("S.a", found)
+
+
+ # @Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception {
+ # String slave =
+ # "parser grammar JavaDecl;\n" +
+ # "type : 'int' ;\n" +
+ # "decl : type ID ';'\n" +
+ # " | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" +
+ # " ;\n" +
+ # "init : '=' INT ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "JavaDecl.g", slave);
+ # String master =
+ # "grammar Java;\n" +
+ # "import JavaDecl;\n" +
+ # "prog : decl ;\n" +
+ # "type : 'int' | 'float' ;\n" +
+ # "\n" +
+ # "ID : 'a'..'z'+ ;\n" +
+ # "INT : '0'..'9'+ ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # // for float to work in decl, type must be overridden
+ # String found = execParser("Java.g", master, "JavaParser", "JavaLexer",
+ # "prog", "float x = 3;", debug);
+ # assertEquals("JavaDecl: floatx=3;\n", found);
+ # }
+
+ # @Test public void testDelegatorRuleOverridesDelegates() throws Exception {
+ # String slave =
+ # "parser grammar S;\n" +
+ # "a : b {System.out.println(\"S.a\");} ;\n" +
+ # "b : B ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # String slave2 =
+ # "parser grammar T;\n" +
+ # "tokens { A='x'; }\n" +
+ # "b : B {System.out.println(\"T.b\");} ;\n";
+ # writeFile(tmpdir, "T.g", slave2);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S, T;\n" +
+ # "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # String found = execParser("M.g", master, "MParser", "MLexer",
+ # "a", "c", debug);
+ # assertEquals("M.b\n" +
+ # "S.a\n", found);
+ # }
+
+ # LEXER INHERITANCE
+
+ def testLexerDelegatorInvokesDelegateRule(self):
+ slave = textwrap.dedent(
+ r'''
+ lexer grammar S7;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM7.capture(t)
+ }
+ A : 'a' {self.capture("S.A ");} ;
+ C : 'c' ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ lexer grammar M7;
+ options {
+ language=Python;
+ }
+ import S7;
+ B : 'b' ;
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execLexer(
+ master,
+ slaves=[slave],
+ input="abc"
+ )
+
+ self.failUnlessEqual("S.A abc", found)
+
+
+ def testLexerDelegatorRuleOverridesDelegate(self):
+ slave = textwrap.dedent(
+ r'''
+ lexer grammar S8;
+ options {
+ language=Python;
+ }
+ @members {
+ def capture(self, t):
+ self.gM8.capture(t)
+ }
+ A : 'a' {self.capture("S.A")} ;
+ ''')
+
+ master = textwrap.dedent(
+ r'''
+ lexer grammar M8;
+ options {
+ language=Python;
+ }
+ import S8;
+ A : 'a' {self.capture("M.A ");} ;
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ found = self.execLexer(
+ master,
+ slaves=[slave],
+ input="a"
+ )
+
+ self.failUnlessEqual("M.A a", found)
+
+ # @Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception {
+ # // M.Tokens has nothing to predict tokens from S. Should
+ # // not include S.Tokens alt in this case?
+ # String slave =
+ # "lexer grammar S;\n" +
+ # "A : 'a' {System.out.println(\"S.A\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "lexer grammar M;\n" +
+ # "import S;\n" +
+ # "A : 'a' {System.out.println(\"M.A\");} ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # writeFile(tmpdir, "/M.g", master);
+
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # composite.assignTokenTypes();
+ # composite.defineGrammarSymbols();
+ # composite.createNFAs();
+ # g.createLookaheadDFAs(false);
+
+ # // predict only alts from M not S
+ # String expectingDFA =
+ # ".s0-'a'->.s1\n" +
+ # ".s0-{'\\n', ' '}->:s3=>2\n" +
+ # ".s1-<EOT>->:s2=>1\n";
+ # org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1);
+ # FASerializer serializer = new FASerializer(g);
+ # String result = serializer.serialize(dfa.startState);
+ # assertEquals(expectingDFA, result);
+
+ # // must not be a "unreachable alt: Tokens" error
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # }
+
+ # @Test public void testInvalidImportMechanism() throws Exception {
+ # // M.Tokens has nothing to predict tokens from S. Should
+ # // not include S.Tokens alt in this case?
+ # String slave =
+ # "lexer grammar S;\n" +
+ # "A : 'a' {System.out.println(\"S.A\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "tree grammar M;\n" +
+ # "import S;\n" +
+ # "a : A ;";
+ # writeFile(tmpdir, "/M.g", master);
+
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+
+ # assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+
+ # String expectedError =
+ # "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S";
+ # assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
+ # }
+
+ # @Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception {
+ # // if this compiles, it means that synpred1_S is defined in S.java
+ # // but not MParser.java. MParser has its own synpred1_M which must
+ # // be separate to compile.
+ # String slave =
+ # "parser grammar S;\n" +
+ # "a : 'a' {System.out.println(\"S.a1\");}\n" +
+ # " | 'a' {System.out.println(\"S.a2\");}\n" +
+ # " ;\n" +
+ # "b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "grammar M;\n" +
+ # "options {backtrack=true;}\n" +
+ # "import S;\n" +
+ # "start : a b ;\n" +
+ # "nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # String found = execParser("M.g", master, "MParser", "MLexer",
+ # "start", "ax", debug);
+ # assertEquals("S.a1\n", found);
+ # }
+
+ # @Test public void testKeywordVSIDGivesNoWarning() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "lexer grammar S;\n" +
+ # "A : 'abc' {System.out.println(\"S.A\");} ;\n" +
+ # "ID : 'a'..'z'+ ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "a : A {System.out.println(\"M.a\");} ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # String found = execParser("M.g", master, "MParser", "MLexer",
+ # "a", "abc", debug);
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size());
+
+ # assertEquals("S.A\nM.a\n", found);
+ # }
+
+ # @Test public void testWarningForUndefinedToken() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "lexer grammar S;\n" +
+ # "A : 'abc' {System.out.println(\"S.A\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "a : ABC A {System.out.println(\"M.a\");} ;\n" +
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # // A is defined in S but M should still see it and not give warning.
+ # // only problem is ABC.
+
+ # rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug);
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size());
+
+ # String expectedError =
+ # "warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:3:5: no lexer rule corresponding to token: ABC";
+ # assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+",""));
+ # }
+
+ # /** Make sure that M can import S that imports T. */
+ # @Test public void test3LevelImport() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar T;\n" +
+ # "a : T ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "T.g", slave);
+ # String slave2 =
+ # "parser grammar S;\n" + // A, B, C token type order
+ # "import T;\n" +
+ # "a : S ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave2);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "a : M ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+ # g.composite.defineGrammarSymbols();
+
+ # String expectedTokenIDToTypeMap = "[M=6, S=5, T=4]";
+ # String expectedStringLiteralToTypeMap = "{}";
+ # String expectedTypeToTokenList = "[T, S, M]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+ # boolean ok =
+ # rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
+ # boolean expecting = true; // should be ok
+ # assertEquals(expecting, ok);
+ # }
+
+ # @Test public void testBigTreeOfImports() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar T;\n" +
+ # "x : T ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "T.g", slave);
+ # slave =
+ # "parser grammar S;\n" +
+ # "import T;\n" +
+ # "y : S ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+
+ # slave =
+ # "parser grammar C;\n" +
+ # "i : C ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "C.g", slave);
+ # slave =
+ # "parser grammar B;\n" +
+ # "j : B ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "B.g", slave);
+ # slave =
+ # "parser grammar A;\n" +
+ # "import B,C;\n" +
+ # "k : A ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "A.g", slave);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S,A;\n" +
+ # "a : M ;\n" ;
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+ # g.composite.defineGrammarSymbols();
+
+ # String expectedTokenIDToTypeMap = "[A=8, B=6, C=7, M=9, S=5, T=4]";
+ # String expectedStringLiteralToTypeMap = "{}";
+ # String expectedTypeToTokenList = "[T, S, B, C, A, M]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+ # boolean ok =
+ # rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
+ # boolean expecting = true; // should be ok
+ # assertEquals(expecting, ok);
+ # }
+
+ # @Test public void testRulesVisibleThroughMultilevelImport() throws Exception {
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String slave =
+ # "parser grammar T;\n" +
+ # "x : T ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "T.g", slave);
+ # String slave2 =
+ # "parser grammar S;\n" + // A, B, C token type order
+ # "import T;\n" +
+ # "a : S ;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave2);
+
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "a : M x ;\n" ; // x MUST BE VISIBLE TO M
+ # writeFile(tmpdir, "M.g", master);
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+ # g.composite.defineGrammarSymbols();
+
+ # String expectedTokenIDToTypeMap = "[M=6, S=5, T=4]";
+ # String expectedStringLiteralToTypeMap = "{}";
+ # String expectedTypeToTokenList = "[T, S, M]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+ # }
+
+ # @Test public void testNestedComposite() throws Exception {
+ # // Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438
+ # ErrorQueue equeue = new ErrorQueue();
+ # ErrorManager.setErrorListener(equeue);
+ # String gstr =
+ # "lexer grammar L;\n" +
+ # "T1: '1';\n" +
+ # "T2: '2';\n" +
+ # "T3: '3';\n" +
+ # "T4: '4';\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "L.g", gstr);
+ # gstr =
+ # "parser grammar G1;\n" +
+ # "s: a | b;\n" +
+ # "a: T1;\n" +
+ # "b: T2;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "G1.g", gstr);
+
+ # gstr =
+ # "parser grammar G2;\n" +
+ # "import G1;\n" +
+ # "a: T3;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "G2.g", gstr);
+ # String G3str =
+ # "grammar G3;\n" +
+ # "import G2;\n" +
+ # "b: T4;\n" ;
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "G3.g", G3str);
+
+ # Tool antlr = newTool(new String[] {"-lib", tmpdir});
+ # CompositeGrammar composite = new CompositeGrammar();
+ # Grammar g = new Grammar(antlr,tmpdir+"/G3.g",composite);
+ # composite.setDelegationRoot(g);
+ # g.parseAndBuildAST();
+ # g.composite.assignTokenTypes();
+ # g.composite.defineGrammarSymbols();
+
+ # String expectedTokenIDToTypeMap = "[T1=4, T2=5, T3=6, T4=7]";
+ # String expectedStringLiteralToTypeMap = "{}";
+ # String expectedTypeToTokenList = "[T1, T2, T3, T4]";
+
+ # assertEquals(expectedTokenIDToTypeMap,
+ # realElements(g.composite.tokenIDToTypeMap).toString());
+ # assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+ # assertEquals(expectedTypeToTokenList,
+ # realElements(g.composite.typeToTokenList).toString());
+
+ # assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+ # boolean ok =
+ # rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null, false);
+ # boolean expecting = true; // should be ok
+ # assertEquals(expecting, ok);
+ # }
+
+ # @Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception {
+ # String slave =
+ # "parser grammar S;\n" +
+ # "a : B {System.out.print(\"S.a\");} ;\n";
+ # mkdir(tmpdir);
+ # writeFile(tmpdir, "S.g", slave);
+ # String master =
+ # "grammar M;\n" +
+ # "import S;\n" +
+ # "@header{package mypackage;}\n" +
+ # "@lexer::header{package mypackage;}\n" +
+ # "s : a ;\n" +
+ # "B : 'b' ;" + // defines B from inherited token space
+ # "WS : (' '|'\\n') {skip();} ;\n" ;
+ # boolean ok = antlr("M.g", "M.g", master, debug);
+ # boolean expecting = true; // should be ok
+ # assertEquals(expecting, ok);
+ # }
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t053hetero.py b/antlr-3.4/runtime/Python/tests/t053hetero.py
new file mode 100644
index 0000000..db3e9db
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t053hetero.py
@@ -0,0 +1,939 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class T(testbase.ANTLRTest):
+ def parserClass(self, base):
+ class TParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TParser
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def execParser(self, grammar, grammarEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+
+ if r is not None:
+ return r.tree.toStringTree()
+
+ return ""
+
+
+ def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+ walkerCls = self.compileInlineGrammar(treeGrammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = walkerCls(nodes)
+ r = getattr(walker, treeEntry)()
+
+ if r is not None:
+ return r.tree.toStringTree()
+
+ return ""
+
+
+ # PARSERS -- AUTO AST
+
+ def testToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T1;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : ID<V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testTokenCommonTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID<CommonTree> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a")
+
+ self.failUnlessEqual("a", found)
+
+
+ def testTokenWithQualifiedType(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @members {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+ }
+ a : ID<TParser.V> ; // TParser.V is qualified name
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testNamedType(self):
+ grammar = textwrap.dedent(
+ r"""
+ grammar $T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+ }
+ a : ID<node=V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ """)
+
+ found = self.execParser(grammar, 'a', input="a")
+ self.assertEquals("a<V>", found)
+
+
+ def testTokenWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T2;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : x=ID<V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testTokenWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T3;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : x+=ID<V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testTokenRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T4;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : ID<V>^ ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testTokenRootWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T5;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : x+=ID<V>^ ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testString(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T6;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : 'begin'<V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="begin"
+ )
+
+ self.failUnlessEqual("begin<V>", found)
+
+
+ def testStringRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T7;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : 'begin'<V>^ ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="begin"
+ )
+
+ self.failUnlessEqual("begin<V>", found)
+
+
+ # PARSERS -- REWRITE AST
+
+ def testRewriteToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T8;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : ID -> ID<V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("a<V>", found)
+
+
+ def testRewriteTokenWithArgs(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T9;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def __init__(self, *args):
+ if len(args) == 4:
+ ttype = args[0]
+ x = args[1]
+ y = args[2]
+ z = args[3]
+ token = CommonToken(type=ttype, text="")
+
+ elif len(args) == 3:
+ ttype = args[0]
+ token = args[1]
+ x = args[2]
+ y, z = 0, 0
+
+ else:
+ raise TypeError("Invalid args \%r" \% (args,))
+
+ CommonTree.__init__(self, token)
+ self.x = x
+ self.y = y
+ self.z = z
+
+ def toString(self):
+ txt = ""
+ if self.token is not None:
+ txt += self.token.text
+ txt +="<V>;\%d\%d\%d" \% (self.x, self.y, self.z)
+ return txt
+ __str__ = toString
+
+ }
+ a : ID -> ID<V>[42,19,30] ID<V>[$ID,99];
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a"
+ )
+
+ self.failUnlessEqual("<V>;421930 a<V>;9900", found)
+
+
+ def testRewriteTokenRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T10;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : ID INT -> ^(ID<V> INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a 2"
+ )
+
+ self.failUnlessEqual("(a<V> 2)", found)
+
+
+ def testRewriteString(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T11;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : 'begin' -> 'begin'<V> ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="begin"
+ )
+
+ self.failUnlessEqual("begin<V>", found)
+
+
+ def testRewriteStringRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T12;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : 'begin' INT -> ^('begin'<V> INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="begin 2"
+ )
+
+ self.failUnlessEqual("(begin<V> 2)", found)
+
+ def testRewriteRuleResults(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ tokens {LIST;}
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ class W(CommonTree):
+ def __init__(self, tokenType, txt):
+ super(W, self).__init__(
+ CommonToken(type=tokenType, text=txt))
+
+ def toString(self):
+ return self.token.text + "<W>"
+ __str__ = toString
+
+ }
+ a : id (',' id)* -> ^(LIST<W>["LIST"] id+);
+ id : ID -> ID<V>;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="a,b,c")
+
+ self.failUnlessEqual("(LIST<W> a<V> b<V> c<V>)", found)
+
+ def testCopySemanticsWithHetero(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ @header {
+ class V(CommonTree):
+ def dupNode(self):
+ return V(self)
+
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ }
+ a : type ID (',' ID)* ';' -> ^(type ID)+;
+ type : 'int'<V> ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ input="int a, b, c;")
+ self.failUnlessEqual("(int<V> a) (int<V> b) (int<V> c)", found)
+
+ # TREE PARSERS -- REWRITE AST
+
+ def testTreeParserRewriteFlatList(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T13;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP13;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T13;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ class W(CommonTree):
+ def toString(self):
+ return self.token.text + "<W>"
+ __str__ = toString
+
+ }
+ a : ID INT -> INT<V> ID<W>
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc 34"
+ )
+
+ self.failUnlessEqual("34<V> abc<W>", found)
+
+
+ def testTreeParserRewriteTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T14;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP14;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T14;
+ }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return self.token.text + "<V>"
+ __str__ = toString
+
+ class W(CommonTree):
+ def toString(self):
+ return self.token.text + "<W>"
+ __str__ = toString
+
+ }
+ a : ID INT -> ^(INT<V> ID<W>)
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc 34"
+ )
+
+ self.failUnlessEqual("(34<V> abc<W>)", found)
+
+
+ def testTreeParserRewriteImaginary(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T15;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP15;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T15;
+ }
+ tokens { ROOT; }
+ @header {
+ class V(CommonTree):
+ def __init__(self, tokenType):
+ CommonTree.__init__(self, CommonToken(tokenType))
+
+ def toString(self):
+ return tokenNames[self.token.type] + "<V>"
+ __str__ = toString
+
+
+ }
+ a : ID -> ROOT<V> ID
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc"
+ )
+
+ self.failUnlessEqual("ROOT<V> abc", found)
+
+
+ def testTreeParserRewriteImaginaryWithArgs(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T16;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP16;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T16;
+ }
+ tokens { ROOT; }
+ @header {
+ class V(CommonTree):
+ def __init__(self, tokenType, x):
+ CommonTree.__init__(self, CommonToken(tokenType))
+ self.x = x
+
+ def toString(self):
+ return tokenNames[self.token.type] + "<V>;" + str(self.x)
+ __str__ = toString
+
+ }
+ a : ID -> ROOT<V>[42] ID
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc"
+ )
+
+ self.failUnlessEqual("ROOT<V>;42 abc", found)
+
+
+ def testTreeParserRewriteImaginaryRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T17;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP17;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T17;
+ }
+ tokens { ROOT; }
+ @header {
+ class V(CommonTree):
+ def __init__(self, tokenType):
+ CommonTree.__init__(self, CommonToken(tokenType))
+
+ def toString(self):
+ return tokenNames[self.token.type] + "<V>"
+ __str__ = toString
+
+ }
+ a : ID -> ^(ROOT<V> ID)
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc"
+ )
+
+ self.failUnlessEqual("(ROOT<V> abc)", found)
+
+
+ def testTreeParserRewriteImaginaryFromReal(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T18;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP18;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T18;
+ }
+ tokens { ROOT; }
+ @header {
+ class V(CommonTree):
+ def __init__(self, tokenType, tree=None):
+ if tree is None:
+ CommonTree.__init__(self, CommonToken(tokenType))
+ else:
+ CommonTree.__init__(self, tree)
+ self.token.type = tokenType
+
+ def toString(self):
+ return tokenNames[self.token.type]+"<V>@"+str(self.token.line)
+ __str__ = toString
+
+ }
+ a : ID -> ROOT<V>[$ID]
+ ;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc"
+ )
+
+ self.failUnlessEqual("ROOT<V>@1", found)
+
+
+ def testTreeParserAutoHeteroAST(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ID ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''
+ tree grammar TP;
+ options {
+ language=Python;
+ output=AST;
+ ASTLabelType=CommonTree;
+ tokenVocab=T;
+ }
+ tokens { ROOT; }
+ @header {
+ class V(CommonTree):
+ def toString(self):
+ return CommonTree.toString(self) + "<V>"
+ __str__ = toString
+
+ }
+
+ a : ID<V> ';'<V>;
+ ''')
+
+ found = self.execTreeParser(
+ grammar, 'a',
+ treeGrammar, 'a',
+ input="abc;"
+ )
+
+ self.failUnlessEqual("abc<V> ;<V>", found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t054main.py b/antlr-3.4/runtime/Python/tests/t054main.py
new file mode 100644
index 0000000..bb26510
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t054main.py
@@ -0,0 +1,318 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+from StringIO import StringIO
+
+class T(testbase.ANTLRTest):
+ def setUp(self):
+ self.oldPath = sys.path[:]
+ sys.path.insert(0, self.baseDir)
+
+
+ def tearDown(self):
+ sys.path = self.oldPath
+
+
+ def testOverrideMain(self):
+ grammar = textwrap.dedent(
+ r"""lexer grammar T3;
+ options {
+ language = Python;
+ }
+
+ @main {
+ def main(argv):
+ raise RuntimeError("no")
+ }
+
+ ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
+ WS: ' '+ { $channel = HIDDEN; };
+ """)
+
+
+ stdout = StringIO()
+
+ lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+ try:
+ lexerMod.main(
+ ['lexer.py']
+ )
+ self.fail()
+ except RuntimeError:
+ pass
+
+
+ def testLexerFromFile(self):
+ input = "foo bar"
+ inputPath = self.writeFile("input.txt", input)
+
+ grammar = textwrap.dedent(
+ r"""lexer grammar T1;
+ options {
+ language = Python;
+ }
+
+ ID: 'a'..'z'+;
+ WS: ' '+ { $channel = HIDDEN; };
+ """)
+
+
+ stdout = StringIO()
+
+ lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+ lexerMod.main(
+ ['lexer.py', inputPath],
+ stdout=stdout
+ )
+
+ self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
+
+
+ def testLexerFromStdIO(self):
+ input = "foo bar"
+
+ grammar = textwrap.dedent(
+ r"""lexer grammar T2;
+ options {
+ language = Python;
+ }
+
+ ID: 'a'..'z'+;
+ WS: ' '+ { $channel = HIDDEN; };
+ """)
+
+
+ stdout = StringIO()
+
+ lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+ lexerMod.main(
+ ['lexer.py'],
+ stdin=StringIO(input),
+ stdout=stdout
+ )
+
+ self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
+
+
+ def testLexerEncoding(self):
+ input = u"föö bär".encode('utf-8')
+
+ grammar = textwrap.dedent(
+ r"""lexer grammar T3;
+ options {
+ language = Python;
+ }
+
+ ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
+ WS: ' '+ { $channel = HIDDEN; };
+ """)
+
+
+ stdout = StringIO()
+
+ lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+ lexerMod.main(
+ ['lexer.py', '--encoding', 'utf-8'],
+ stdin=StringIO(input),
+ stdout=stdout
+ )
+
+ self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
+
+
+ def testCombined(self):
+ input = "foo bar"
+
+ grammar = textwrap.dedent(
+ r"""grammar T4;
+ options {
+ language = Python;
+ }
+
+ r returns [res]: (ID)+ EOF { $res = $text; };
+
+ ID: 'a'..'z'+;
+ WS: ' '+ { $channel = HIDDEN; };
+ """)
+
+
+ stdout = StringIO()
+
+ lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+ parserMod.main(
+ ['combined.py', '--rule', 'r'],
+ stdin=StringIO(input),
+ stdout=stdout
+ )
+
+ stdout = stdout.getvalue()
+ self.failUnlessEqual(len(stdout.splitlines()), 1, stdout)
+
+
+ def testCombinedOutputAST(self):
+ input = "foo + bar"
+
+ grammar = textwrap.dedent(
+ r"""grammar T5;
+ options {
+ language = Python;
+ output = AST;
+ }
+
+ r: ID OP^ ID EOF!;
+
+ ID: 'a'..'z'+;
+ OP: '+';
+ WS: ' '+ { $channel = HIDDEN; };
+ """)
+
+
+ stdout = StringIO()
+
+ lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+ parserMod.main(
+ ['combined.py', '--rule', 'r'],
+ stdin=StringIO(input),
+ stdout=stdout
+ )
+
+ stdout = stdout.getvalue().strip()
+ self.failUnlessEqual(stdout, "(+ foo bar)")
+
+
+ def testTreeParser(self):
+ grammar = textwrap.dedent(
+ r'''grammar T6;
+ options {
+ language = Python;
+ output = AST;
+ }
+
+ r: ID OP^ ID EOF!;
+
+ ID: 'a'..'z'+;
+ OP: '+';
+ WS: ' '+ { $channel = HIDDEN; };
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar T6Walker;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ tokenVocab=T6;
+ }
+ r returns [res]: ^(OP a=ID b=ID)
+ { $res = "\%s \%s \%s" \% ($a.text, $OP.text, $b.text) }
+ ;
+ ''')
+
+ lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+ walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
+
+ stdout = StringIO()
+ walkerMod.main(
+ ['walker.py', '--rule', 'r', '--parser', 'T6Parser', '--parser-rule', 'r', '--lexer', 'T6Lexer'],
+ stdin=StringIO("a+b"),
+ stdout=stdout
+ )
+
+ stdout = stdout.getvalue().strip()
+ self.failUnlessEqual(stdout, "u'a + b'")
+
+
+ def testTreeParserRewrite(self):
+ grammar = textwrap.dedent(
+ r'''grammar T7;
+ options {
+ language = Python;
+ output = AST;
+ }
+
+ r: ID OP^ ID EOF!;
+
+ ID: 'a'..'z'+;
+ OP: '+';
+ WS: ' '+ { $channel = HIDDEN; };
+ ''')
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar T7Walker;
+ options {
+ language=Python;
+ ASTLabelType=CommonTree;
+ tokenVocab=T7;
+ output=AST;
+ }
+ tokens {
+ ARG;
+ }
+ r: ^(OP a=ID b=ID) -> ^(OP ^(ARG ID) ^(ARG ID));
+ ''')
+
+ lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+ walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
+
+ stdout = StringIO()
+ walkerMod.main(
+ ['walker.py', '--rule', 'r', '--parser', 'T7Parser', '--parser-rule', 'r', '--lexer', 'T7Lexer'],
+ stdin=StringIO("a+b"),
+ stdout=stdout
+ )
+
+ stdout = stdout.getvalue().strip()
+ self.failUnlessEqual(stdout, "(+ (ARG a) (ARG b))")
+
+
+
+ def testGrammarImport(self):
+ slave = textwrap.dedent(
+ r'''
+ parser grammar T8S;
+ options {
+ language=Python;
+ }
+
+ a : B;
+ ''')
+
+ parserName = self.writeInlineGrammar(slave)[0]
+ # slave parsers are imported as normal python modules
+ # to force reloading current version, purge module from sys.modules
+ try:
+ del sys.modules[parserName+'Parser']
+ except KeyError:
+ pass
+
+ master = textwrap.dedent(
+ r'''
+ grammar T8M;
+ options {
+ language=Python;
+ }
+ import T8S;
+ s returns [res]: a { $res = $a.text };
+ B : 'b' ; // defines B from inherited token space
+ WS : (' '|'\n') {self.skip()} ;
+ ''')
+
+ stdout = StringIO()
+
+ lexerMod, parserMod = self.compileInlineGrammar(master, returnModule=True)
+ parserMod.main(
+ ['import.py', '--rule', 's'],
+ stdin=StringIO("b"),
+ stdout=stdout
+ )
+
+ stdout = stdout.getvalue().strip()
+ self.failUnlessEqual(stdout, "u'b'")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t055templates.py b/antlr-3.4/runtime/Python/tests/t055templates.py
new file mode 100644
index 0000000..5090b01
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t055templates.py
@@ -0,0 +1,508 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import stringtemplate3
+import testbase
+import sys
+import os
+from StringIO import StringIO
+
+class T(testbase.ANTLRTest):
+ def execParser(self, grammar, grammarEntry, input, group=None):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ if group is not None:
+ parser.templateLib = group
+ result = getattr(parser, grammarEntry)()
+ if result.st is not None:
+ return result.st.toString()
+ return None
+
+
+ def testInlineTemplate(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a : ID INT
+ -> template(id={$ID.text}, int={$INT.text})
+ "id=<id>, int=<int>"
+ ;
+
+ ID : 'a'..'z'+;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("id=abc, int=34", found)
+
+
+ def testExternalTemplate(self):
+ templates = textwrap.dedent(
+ '''\
+ group T;
+ expr(args, op) ::= <<
+ [<args; separator={<op>}>]
+ >>
+ '''
+ )
+
+ group = stringtemplate3.StringTemplateGroup(
+ file=StringIO(templates),
+ lexer='angle-bracket'
+ )
+
+ grammar = textwrap.dedent(
+ r'''grammar T2;
+ options {
+ language=Python;
+ output=template;
+ }
+ a : r+=arg OP r+=arg
+ -> expr(op={$OP.text}, args={$r})
+ ;
+ arg: ID -> template(t={$ID.text}) "<t>";
+
+ ID : 'a'..'z'+;
+ OP: '+';
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "a + b",
+ group
+ )
+
+ self.failUnlessEqual("[a+b]", found)
+
+
+ def testEmptyTemplate(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a : ID INT
+ ->
+ ;
+
+ ID : 'a'..'z'+;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnless(found is None)
+
+
+ def testList(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a: (r+=b)* EOF
+ -> template(r={$r})
+ "<r; separator=\",\">"
+ ;
+
+ b: ID
+ -> template(t={$ID.text}) "<t>"
+ ;
+
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc def ghi"
+ )
+
+ self.failUnlessEqual("abc,def,ghi", found)
+
+
+ def testAction(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a: ID
+ -> { stringtemplate3.StringTemplate("hello") }
+ ;
+
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc"
+ )
+
+ self.failUnlessEqual("hello", found)
+
+
+ def testTemplateExpressionInAction(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a: ID
+ { $st = %{"hello"} }
+ ;
+
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc"
+ )
+
+ self.failUnlessEqual("hello", found)
+
+
+ def testTemplateExpressionInAction2(self):
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a: ID
+ {
+ res = %{"hello <foo>"}
+ %res.foo = "world";
+ }
+ -> { res }
+ ;
+
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc"
+ )
+
+ self.failUnlessEqual("hello world", found)
+
+
+ def testIndirectTemplateConstructor(self):
+ templates = textwrap.dedent(
+ '''\
+ group T;
+ expr(args, op) ::= <<
+ [<args; separator={<op>}>]
+ >>
+ '''
+ )
+
+ group = stringtemplate3.StringTemplateGroup(
+ file=StringIO(templates),
+ lexer='angle-bracket'
+ )
+
+ grammar = textwrap.dedent(
+ r'''grammar T;
+ options {
+ language=Python;
+ output=template;
+ }
+ a: ID
+ {
+ $st = %({"expr"})(args={[1, 2, 3]}, op={"+"})
+ }
+ ;
+
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc",
+ group
+ )
+
+ self.failUnlessEqual("[1+2+3]", found)
+
+
+ def testPredicates(self):
+ grammar = textwrap.dedent(
+ r'''grammar T3;
+ options {
+ language=Python;
+ output=template;
+ }
+ a : ID INT
+ -> {$ID.text=='a'}? template(int={$INT.text})
+ "A: <int>"
+ -> {$ID.text=='b'}? template(int={$INT.text})
+ "B: <int>"
+ -> template(int={$INT.text})
+ "C: <int>"
+ ;
+
+ ID : 'a'..'z'+;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "b 34"
+ )
+
+ self.failUnlessEqual("B: 34", found)
+
+
+ def testBacktrackingMode(self):
+ grammar = textwrap.dedent(
+ r'''grammar T4;
+ options {
+ language=Python;
+ output=template;
+ backtrack=true;
+ }
+ a : (ID INT)=> ID INT
+ -> template(id={$ID.text}, int={$INT.text})
+ "id=<id>, int=<int>"
+ ;
+
+ ID : 'a'..'z'+;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ '''
+ )
+
+ found = self.execParser(
+ grammar, 'a',
+ "abc 34"
+ )
+
+ self.failUnlessEqual("id=abc, int=34", found)
+
+
+ def testRewrite(self):
+ grammar = textwrap.dedent(
+ r'''grammar T5;
+ options {
+ language=Python;
+ output=template;
+ rewrite=true;
+ }
+
+ prog: stat+;
+
+ stat
+ : 'if' '(' expr ')' stat
+ | 'return' return_expr ';'
+ | '{' stat* '}'
+ | ID '=' expr ';'
+ ;
+
+ return_expr
+ : expr
+ -> template(t={$text}) <<boom(<t>)>>
+ ;
+
+ expr
+ : ID
+ | INT
+ ;
+
+ ID: 'a'..'z'+;
+ INT: '0'..'9'+;
+ WS: (' '|'\n')+ {$channel=HIDDEN;} ;
+ COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ;
+ '''
+ )
+
+ input = textwrap.dedent(
+ '''\
+ if ( foo ) {
+ b = /* bla */ 2;
+ return 1 /* foo */;
+ }
+
+ /* gnurz */
+ return 12;
+ '''
+ )
+
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.TokenRewriteStream(lexer)
+ parser = parserCls(tStream)
+ result = parser.prog()
+
+ found = tStream.toString()
+
+ expected = textwrap.dedent(
+ '''\
+ if ( foo ) {
+ b = /* bla */ 2;
+ return boom(1) /* foo */;
+ }
+
+ /* gnurz */
+ return boom(12);
+ '''
+ )
+
+ self.failUnlessEqual(expected, found)
+
+
+ def testTreeRewrite(self):
+ grammar = textwrap.dedent(
+ r'''grammar T6;
+ options {
+ language=Python;
+ output=AST;
+ }
+
+ tokens {
+ BLOCK;
+ ASSIGN;
+ }
+
+ prog: stat+;
+
+ stat
+ : IF '(' e=expr ')' s=stat
+ -> ^(IF $e $s)
+ | RETURN expr ';'
+ -> ^(RETURN expr)
+ | '{' stat* '}'
+ -> ^(BLOCK stat*)
+ | ID '=' expr ';'
+ -> ^(ASSIGN ID expr)
+ ;
+
+ expr
+ : ID
+ | INT
+ ;
+
+ IF: 'if';
+ RETURN: 'return';
+ ID: 'a'..'z'+;
+ INT: '0'..'9'+;
+ WS: (' '|'\n')+ {$channel=HIDDEN;} ;
+ COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ;
+ '''
+ )
+
+ treeGrammar = textwrap.dedent(
+ r'''tree grammar T6Walker;
+ options {
+ language=Python;
+ tokenVocab=T6;
+ ASTLabelType=CommonTree;
+ output=template;
+ rewrite=true;
+ }
+
+ prog: stat+;
+
+ stat
+ : ^(IF expr stat)
+ | ^(RETURN return_expr)
+ | ^(BLOCK stat*)
+ | ^(ASSIGN ID expr)
+ ;
+
+ return_expr
+ : expr
+ -> template(t={$text}) <<boom(<t>)>>
+ ;
+
+ expr
+ : ID
+ | INT
+ ;
+ '''
+ )
+
+ input = textwrap.dedent(
+ '''\
+ if ( foo ) {
+ b = /* bla */ 2;
+ return 1 /* foo */;
+ }
+
+ /* gnurz */
+ return 12;
+ '''
+ )
+
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+ walkerCls = self.compileInlineGrammar(treeGrammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.TokenRewriteStream(lexer)
+ parser = parserCls(tStream)
+ tree = parser.prog().tree
+ nodes = antlr3.tree.CommonTreeNodeStream(tree)
+ nodes.setTokenStream(tStream)
+ walker = walkerCls(nodes)
+ walker.prog()
+
+ found = tStream.toString()
+
+ expected = textwrap.dedent(
+ '''\
+ if ( foo ) {
+ b = /* bla */ 2;
+ return boom(1) /* foo */;
+ }
+
+ /* gnurz */
+ return boom(12);
+ '''
+ )
+
+ self.failUnlessEqual(expected, found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t056lexer.py b/antlr-3.4/runtime/Python/tests/t056lexer.py
new file mode 100644
index 0000000..a53f92a
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t056lexer.py
@@ -0,0 +1,49 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import stringtemplate3
+import testbase
+import sys
+import os
+from StringIO import StringIO
+
+# FIXME: port other tests from TestLexer.java
+
+class T(testbase.ANTLRTest):
+ def execParser(self, grammar, grammarEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ result = getattr(parser, grammarEntry)()
+ return result
+
+
+ def testRefToRuleDoesNotSetChannel(self):
+ # this must set channel of A to HIDDEN. $channel is local to rule
+ # like $type.
+ grammar = textwrap.dedent(
+ r'''
+ grammar P;
+ options {
+ language=Python;
+ }
+ a returns [foo]: A EOF { $foo = '\%s, channel=\%d' \% ($A.text, $A.channel); } ;
+ A : '-' WS I ;
+ I : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(
+ grammar, 'a',
+ "- 34"
+ )
+
+ self.failUnlessEqual("- 34, channel=0", found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t057autoAST.py b/antlr-3.4/runtime/Python/tests/t057autoAST.py
new file mode 100644
index 0000000..e5c1d35
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t057autoAST.py
@@ -0,0 +1,1005 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class TestAutoAST(testbase.ANTLRTest):
+ def parserClass(self, base):
+ class TParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._errors = []
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def emitErrorMessage(self, msg):
+ self._errors.append(msg)
+
+
+ return TParser
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def execParser(self, grammar, grammarEntry, input, expectErrors=False):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+
+ if not expectErrors:
+ self.assertEquals(len(parser._errors), 0, parser._errors)
+
+ result = ""
+
+ if r is not None:
+ if hasattr(r, 'result'):
+ result += r.result
+
+ if r.tree is not None:
+ result += r.tree.toStringTree()
+
+ if not expectErrors:
+ return result
+
+ else:
+ return result, parser._errors
+
+
+ def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+ walkerCls = self.compileInlineGrammar(treeGrammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = walkerCls(nodes)
+ r = getattr(walker, treeEntry)()
+
+ if r is not None:
+ return r.tree.toStringTree()
+
+ return ""
+
+
+ def testTokenList(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;};
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("abc 34", found);
+
+
+ def testTokenListInSingleAltBlock(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : (ID INT) ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar,"a", "abc 34")
+ self.assertEquals("abc 34", found)
+
+
+ def testSimpleRootAtOuterLevel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : ID^ INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("(abc 34)", found)
+
+
+ def testSimpleRootAtOuterLevelReverse(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : INT ID^ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34 abc")
+ self.assertEquals("(abc 34)", found)
+
+
+ def testBang(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT! ID! INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34 dag 4532")
+ self.assertEquals("abc 4532", found)
+
+
+ def testOptionalThenRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ( ID INT )? ID^ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 1 b")
+ self.assertEquals("(b a 1)", found)
+
+
+ def testLabeledStringRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : v='void'^ ID ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void foo;")
+ self.assertEquals("(void foo ;)", found)
+
+
+ def testWildcard(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : v='void'^ . ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void foo;")
+ self.assertEquals("(void foo ;)", found)
+
+
+ def testWildcardRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : v='void' .^ ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void foo;")
+ self.assertEquals("(foo void ;)", found)
+
+
+ def testWildcardRootWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : v='void' x=.^ ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void foo;")
+ self.assertEquals("(foo void ;)", found)
+
+
+ def testWildcardRootWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : v='void' x=.^ ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void foo;")
+ self.assertEquals("(foo void ;)", found)
+
+
+ def testWildcardBangWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : v='void' x=.! ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void foo;")
+ self.assertEquals("void ;", found)
+
+
+ def testRootRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID^ INT^ ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 34 c")
+ self.assertEquals("(34 a c)", found)
+
+
+ def testRootRoot2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT^ ID^ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 34 c")
+ self.assertEquals("(c (34 a))", found)
+
+
+ def testRootThenRootInLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID^ (INT '*'^ ID)+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 34 * b 9 * c")
+ self.assertEquals("(* (* (a 34) b 9) c)", found)
+
+
+ def testNestedSubrule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'void' (({pass}ID|INT) ID | 'null' ) ';' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void a b;")
+ self.assertEquals("void a b ;", found)
+
+
+ def testInvokeRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : type ID ;
+ type : {pass}'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a")
+ self.assertEquals("int a", found)
+
+
+ def testInvokeRuleAsRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : type^ ID ;
+ type : {pass}'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a")
+ self.assertEquals("(int a)", found)
+
+
+ def testInvokeRuleAsRootWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x=type^ ID ;
+ type : {pass}'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a")
+ self.assertEquals("(int a)", found)
+
+
+ def testInvokeRuleAsRootWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x+=type^ ID ;
+ type : {pass}'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a")
+ self.assertEquals("(int a)", found)
+
+
+ def testRuleRootInLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID ('+'^ ID)* ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a+b+c+d")
+ self.assertEquals("(+ (+ (+ a b) c) d)", found)
+
+
+ def testRuleInvocationRuleRootInLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID (op^ ID)* ;
+ op : {pass}'+' | '-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a+b+c-d")
+ self.assertEquals("(- (+ (+ a b) c) d)", found)
+
+
+ def testTailRecursion(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ s : a ;
+ a : atom ('exp'^ a)? ;
+ atom : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "s", "3 exp 4 exp 5")
+ self.assertEquals("(exp 3 (exp 4 5))", found)
+
+
+ def testSet(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID|INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("abc", found)
+
+
+ def testSetRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ('+' | '-')^ ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "+abc")
+ self.assertEquals("(+ abc)", found)
+
+
+ @testbase.broken(
+ "FAILS until antlr.g rebuilt in v3", testbase.GrammarCompileError)
+ def testSetRootWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x=('+' | '-')^ ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "+abc")
+ self.assertEquals("(+ abc)", found)
+
+
+ def testSetAsRuleRootInLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID (('+'|'-')^ ID)* ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a+b-c")
+ self.assertEquals("(- (+ a b) c)", found)
+
+
+ def testNotSet(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ~ID '+' INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34+2")
+ self.assertEquals("34 + 2", found)
+
+
+ def testNotSetWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x=~ID '+' INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34+2")
+ self.assertEquals("34 + 2", found)
+
+
+ def testNotSetWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x=~ID '+' INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34+2")
+ self.assertEquals("34 + 2", found)
+
+
+ def testNotSetRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ~'+'^ INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34 55")
+ self.assertEquals("(34 55)", found)
+
+
+ def testNotSetRootWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ~'+'^ INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34 55")
+ self.assertEquals("(34 55)", found)
+
+
+ def testNotSetRootWithListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ~'+'^ INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "34 55")
+ self.assertEquals("(34 55)", found)
+
+
+ def testNotSetRuleRootInLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : INT (~INT^ INT)* ;
+ blort : '+' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "3+4+5")
+ self.assertEquals("(+ (+ 3 4) 5)", found)
+
+
+ @testbase.broken("FIXME: What happened to the semicolon?", AssertionError)
+ def testTokenLabelReuse(self):
+ # check for compilation problem due to multiple defines
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result] : id=ID id=ID {$result = "2nd id="+$id.text+";";} ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("2nd id=b;a b", found)
+
+
+ def testTokenLabelReuse2(self):
+ # check for compilation problem due to multiple defines
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result]: id=ID id=ID^ {$result = "2nd id="+$id.text+',';} ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("2nd id=b,(b a)", found)
+
+
+ def testTokenListLabelReuse(self):
+ # check for compilation problem due to multiple defines
+ # make sure ids has both ID tokens
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result] : ids+=ID ids+=ID {$result = "id list=["+",".join([t.text for t in $ids])+'],';} ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ expecting = "id list=[a,b],a b"
+ self.assertEquals(expecting, found)
+
+
+ def testTokenListLabelReuse2(self):
+ # check for compilation problem due to multiple defines
+ # make sure ids has both ID tokens
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result] : ids+=ID^ ids+=ID {$result = "id list=["+",".join([t.text for t in $ids])+'],';} ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ expecting = "id list=[a,b],(a b)"
+ self.assertEquals(expecting, found)
+
+
+ def testTokenListLabelRuleRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : id+=ID^ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("a", found)
+
+
+ def testTokenListLabelBang(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : id+=ID! ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("", found)
+
+
+ def testRuleListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result]: x+=b x+=b {
+ t=$x[1]
+ $result = "2nd x="+t.toStringTree()+',';
+ };
+ b : ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("2nd x=b,a b", found)
+
+
+ def testRuleListLabelRuleRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result] : ( x+=b^ )+ {
+ $result = "x="+$x[1].toStringTree()+',';
+ } ;
+ b : ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("x=(b a),(b a)", found)
+
+
+ def testRuleListLabelBang(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a returns [result] : x+=b! x+=b {
+ $result = "1st x="+$x[0].toStringTree()+',';
+ } ;
+ b : ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("1st x=a,b", found)
+
+
+ def testComplicatedMelange(self):
+ # check for compilation problem
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : A b=B b=B c+=C c+=C D {s = $D.text} ;
+ A : 'a' ;
+ B : 'b' ;
+ C : 'c' ;
+ D : 'd' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b b c c d")
+ self.assertEquals("a b b c c d", found)
+
+
+ def testReturnValueWithAST(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a returns [result] : ID b { $result = str($b.i) + '\n';} ;
+ b returns [i] : INT {$i=int($INT.text);} ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("34\nabc 34", found)
+
+
+ def testSetLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options { language=Python;output=AST; }
+ r : (INT|ID)+ ;
+ ID : 'a'..'z' + ;
+ INT : '0'..'9' +;
+ WS: (' ' | '\n' | '\\t')+ {$channel = HIDDEN;};
+ ''')
+
+ found = self.execParser(grammar, "r", "abc 34 d")
+ self.assertEquals("abc 34 d", found)
+
+
+ def testExtraTokenInSimpleDecl(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ decl : type^ ID '='! INT ';'! ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "decl", "int 34 x=1;",
+ expectErrors=True)
+ self.assertEquals(["line 1:4 extraneous input u'34' expecting ID"],
+ errors)
+ self.assertEquals("(int x 1)", found) # tree gets correct x and 1 tokens
+
+
+ def testMissingIDInSimpleDecl(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ tokens {EXPR;}
+ decl : type^ ID '='! INT ';'! ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "decl", "int =1;",
+ expectErrors=True)
+ self.assertEquals(["line 1:4 missing ID at u'='"], errors)
+ self.assertEquals("(int <missing ID> 1)", found) # tree gets invented ID token
+
+
+ def testMissingSetInSimpleDecl(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ tokens {EXPR;}
+ decl : type^ ID '='! INT ';'! ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "decl", "x=1;",
+ expectErrors=True)
+ self.assertEquals(["line 1:0 mismatched input u'x' expecting set None"], errors)
+ self.assertEquals("(<error: x> x 1)", found) # tree gets invented ID token
+
+
+ def testMissingTokenGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : ID INT ; // follow is EOF
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "abc", expectErrors=True)
+ self.assertEquals(["line 1:3 missing INT at '<EOF>'"], errors)
+ self.assertEquals("abc <missing INT>", found)
+
+
+ def testMissingTokenGivesErrorNodeInInvokedRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b ;
+ b : ID INT ; // follow should see EOF
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "abc", expectErrors=True)
+ self.assertEquals(["line 1:3 mismatched input '<EOF>' expecting INT"], errors)
+ self.assertEquals("<mismatched token: <EOF>, resync=abc>", found)
+
+
+ def testExtraTokenGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b c ;
+ b : ID ;
+ c : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "abc ick 34",
+ expectErrors=True)
+ self.assertEquals(["line 1:4 extraneous input u'ick' expecting INT"],
+ errors)
+ self.assertEquals("abc 34", found)
+
+
+ def testMissingFirstTokenGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+ self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
+ self.assertEquals("<missing ID> 34", found)
+
+
+ def testMissingFirstTokenGivesErrorNode2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b c ;
+ b : ID ;
+ c : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+
+ # finds an error at the first token, 34, and re-syncs.
+ # re-synchronizing does not consume a token because 34 follows
+ # ref to rule b (start of c). It then matches 34 in c.
+ self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
+ self.assertEquals("<missing ID> 34", found)
+
+
+ def testNoViableAltGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b | c ;
+ b : ID ;
+ c : INT ;
+ ID : 'a'..'z'+ ;
+ S : '*' ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "*", expectErrors=True)
+ self.assertEquals(["line 1:0 no viable alternative at input u'*'"],
+ errors)
+ self.assertEquals("<unexpected: [@0,0:0=u'*',<6>,1:0], resync=*>",
+ found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t058rewriteAST.py b/antlr-3.4/runtime/Python/tests/t058rewriteAST.py
new file mode 100644
index 0000000..15036f4
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t058rewriteAST.py
@@ -0,0 +1,1517 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class TestRewriteAST(testbase.ANTLRTest):
+ def parserClass(self, base):
+ class TParser(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._errors = []
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def emitErrorMessage(self, msg):
+ self._errors.append(msg)
+
+
+ return TParser
+
+
+ def lexerClass(self, base):
+ class TLexer(base):
+ def __init__(self, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+
+ self._output = ""
+
+
+ def capture(self, t):
+ self._output += t
+
+
+ def traceIn(self, ruleName, ruleIndex):
+ self.traces.append('>'+ruleName)
+
+
+ def traceOut(self, ruleName, ruleIndex):
+ self.traces.append('<'+ruleName)
+
+
+ def recover(self, input, re):
+ # no error recovery yet, just crash!
+ raise
+
+ return TLexer
+
+
+ def execParser(self, grammar, grammarEntry, input, expectErrors=False):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+
+ if not expectErrors:
+ self.assertEquals(len(parser._errors), 0, parser._errors)
+
+ result = ""
+
+ if r is not None:
+ if hasattr(r, 'result'):
+ result += r.result
+
+ if r.tree is not None:
+ result += r.tree.toStringTree()
+
+ if not expectErrors:
+ return result
+
+ else:
+ return result, parser._errors
+
+
+ def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+ lexerCls, parserCls = self.compileInlineGrammar(grammar)
+ walkerCls = self.compileInlineGrammar(treeGrammar)
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream)
+ r = getattr(parser, grammarEntry)()
+ nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+ nodes.setTokenStream(tStream)
+ walker = walkerCls(nodes)
+ r = getattr(walker, treeEntry)()
+
+ if r is not None:
+ return r.tree.toStringTree()
+
+ return ""
+
+
+ def testDelete(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT -> ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("", found)
+
+
+ def testSingleToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID -> ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("abc", found)
+
+
+ def testSingleTokenToNewNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID -> ID["x"];
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("x", found)
+
+
+ def testSingleTokenToNewNodeRoot(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID -> ^(ID["x"] INT);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("(x INT)", found)
+
+
+ def testSingleTokenToNewNode2(self):
+ # Allow creation of new nodes w/o args.
+ grammar = textwrap.dedent(
+ r'''
+ grammar TT;
+ options {language=Python;output=AST;}
+ a : ID -> ID[ ];
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("ID", found)
+
+
+ def testSingleCharLiteral(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'c' -> 'c';
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "c")
+ self.assertEquals("c", found)
+
+
+ def testSingleStringLiteral(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'ick' -> 'ick';
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "ick")
+ self.assertEquals("ick", found)
+
+
+ def testSingleRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : b -> b;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("abc", found)
+
+
+ def testReorderTokens(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT -> INT ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("34 abc", found)
+
+
+ def testReorderTokenAndRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : b INT -> INT b;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("34 abc", found)
+
+
+ def testTokenTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT -> ^(INT ID);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("(34 abc)", found)
+
+
+ def testTokenTreeAfterOtherStuff(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'void' ID INT -> 'void' ^(INT ID);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "void abc 34")
+ self.assertEquals("void (34 abc)", found)
+
+
+ def testNestedTokenTreeWithOuterLoop(self):
+ # verify that ID and INT both iterate over outer index variable
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {DUH;}
+ a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 1 b 2")
+ self.assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))", found)
+
+
+ def testOptionalSingleToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID -> ID? ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("abc", found)
+
+
+ def testClosureSingleToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID ID -> ID* ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testPositiveClosureSingleToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID ID -> ID+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testOptionalSingleRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : b -> b?;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("abc", found)
+
+
+ def testClosureSingleRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : b b -> b*;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testClosureOfLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x+=b x+=b -> $x*;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testOptionalLabelNoListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : (x=ID)? -> $x?;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("a", found)
+
+
+ def testPositiveClosureSingleRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : b b -> b+;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testSinglePredicateT(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID -> {True}? ID -> ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("abc", found)
+
+
+ def testSinglePredicateF(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID -> {False}? ID -> ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc")
+ self.assertEquals("", found)
+
+
+ def testMultiplePredicate(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT -> {False}? ID
+ -> {True}? INT
+ ->
+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 2")
+ self.assertEquals("2", found)
+
+
+ def testMultiplePredicateTrees(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID INT -> {False}? ^(ID INT)
+ -> {True}? ^(INT ID)
+ -> ID
+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 2")
+ self.assertEquals("(2 a)", found)
+
+
+ def testSimpleTree(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : op INT -> ^(op INT);
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "-34")
+ self.assertEquals("(- 34)", found)
+
+
+ def testSimpleTree2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : op INT -> ^(INT op);
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "+ 34")
+ self.assertEquals("(34 +)", found)
+
+
+
+ def testNestedTrees(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "var a:int; b:float;")
+ self.assertEquals("(var (: a int) (: b float))", found)
+
+
+ def testImaginaryTokenCopy(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {VAR;}
+ a : ID (',' ID)*-> ^(VAR ID)+ ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a,b,c")
+ self.assertEquals("(VAR a) (VAR b) (VAR c)", found)
+
+
+ def testTokenUnreferencedOnLeftButDefined(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {VAR;}
+ a : b -> ID ;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("ID", found)
+
+
+ def testImaginaryTokenCopySetText(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {VAR;}
+ a : ID (',' ID)*-> ^(VAR["var"] ID)+ ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a,b,c")
+ self.assertEquals("(var a) (var b) (var c)", found)
+
+
+ def testImaginaryTokenNoCopyFromToken(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "{a b c}")
+ self.assertEquals("({ a b c)", found)
+
+
+ def testImaginaryTokenNoCopyFromTokenSetText(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : lc='{' ID+ '}' -> ^(BLOCK[$lc,"block"] ID+) ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "{a b c}")
+ self.assertEquals("(block a b c)", found)
+
+
+ def testMixedRewriteAndAutoAST(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : b b^ ; // 2nd b matches only an INT; can make it root
+ b : ID INT -> INT ID
+ | INT
+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 1 2")
+ self.assertEquals("(2 1 a)", found)
+
+
+ def testSubruleWithRewrite(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : b b ;
+ b : (ID INT -> INT ID | INT INT -> INT+ )
+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a 1 2 3")
+ self.assertEquals("1 a 2 3", found)
+
+
+ def testSubruleWithRewrite2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {TYPE;}
+ a : b b ;
+ b : 'int'
+ ( ID -> ^(TYPE 'int' ID)
+ | ID '=' INT -> ^(TYPE 'int' ID INT)
+ )
+ ';'
+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a; int b=3;")
+ self.assertEquals("(TYPE int a) (TYPE int b 3)", found)
+
+
+ def testNestedRewriteShutsOffAutoAST(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : b b ;
+ b : ID ( ID (last=ID -> $last)+ ) ';' // get last ID
+ | INT // should still get auto AST construction
+ ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b c d; 42")
+ self.assertEquals("d 42", found)
+
+
+ def testRewriteActions(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : atom -> ^({self.adaptor.create(INT,"9")} atom) ;
+ atom : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "3")
+ self.assertEquals("(9 3)", found)
+
+
+ def testRewriteActions2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : atom -> {self.adaptor.create(INT,"9")} atom ;
+ atom : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "3")
+ self.assertEquals("9 3", found)
+
+
+ def testRefToOldValue(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ;
+ atom : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "3+4+5")
+ self.assertEquals("(+ (+ 3 4) 5)", found)
+
+
+ def testCopySemanticsForRules(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom)
+ atom : INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "3")
+ self.assertEquals("(3 3)", found)
+
+
+ def testCopySemanticsForRules2(self):
+ # copy type as a root for each invocation of (...)+ in rewrite
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : type ID (',' ID)* ';' -> ^(type ID)+ ;
+ type : 'int' ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a,b,c;")
+ self.assertEquals("(int a) (int b) (int c)", found)
+
+
+ def testCopySemanticsForRules3(self):
+ # copy type *and* modifier even though it's optional
+ # for each invocation of (...)+ in rewrite
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;
+ type : 'int' ;
+ modifier : 'public' ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "public int a,b,c;")
+ self.assertEquals("(int public a) (int public b) (int public c)", found)
+
+
+ def testCopySemanticsForRules3Double(self):
+ # copy type *and* modifier even though it's optional
+ # for each invocation of (...)+ in rewrite
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;
+ type : 'int' ;
+ modifier : 'public' ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "public int a,b,c;")
+ self.assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)", found)
+
+
+ def testCopySemanticsForRules4(self):
+ # copy type *and* modifier even though it's optional
+ # for each invocation of (...)+ in rewrite
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {MOD;}
+ a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;
+ type : 'int' ;
+ modifier : 'public' ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "public int a,b,c;")
+ self.assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)", found)
+
+
+ def testCopySemanticsLists(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {MOD;}
+ a : ID (',' ID)* ';' -> ID+ ID+ ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a,b,c;")
+ self.assertEquals("a b c a b c", found)
+
+
+ def testCopyRuleLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x=b -> $x $x;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("a a", found)
+
+
+ def testCopyRuleLabel2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x=b -> ^($x $x);
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("(a a)", found)
+
+
+ def testQueueingOfTokens(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a,b,c;")
+ self.assertEquals("(int a b c)", found)
+
+
+ def testCopyOfTokens(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'int' ID ';' -> 'int' ID 'int' ID ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a;")
+ self.assertEquals("int a int a", found)
+
+
+ def testTokenCopyInLoop(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a,b,c;")
+ self.assertEquals("(int a) (int b) (int c)", found)
+
+
+ def testTokenCopyInLoopAgainstTwoOthers(self):
+ # must smear 'int' copies across as root of multiple trees
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "int a:1,b:2,c:3;")
+ self.assertEquals("(int a 1) (int b 2) (int c 3)", found)
+
+
+ def testListRefdOneAtATime(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID+ -> ID ID ID ; // works if 3 input IDs
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b c")
+ self.assertEquals("a b c", found)
+
+
+ def testSplitListWithLabels(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {VAR;}
+ a : first=ID others+=ID* -> $first VAR $others+ ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b c")
+ self.assertEquals("a VAR b c", found)
+
+
+ def testComplicatedMelange(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : A A b=B B b=B c+=C C c+=C D {s=$D.text} -> A+ B+ C+ D ;
+ type : 'int' | 'float' ;
+ A : 'a' ;
+ B : 'b' ;
+ C : 'c' ;
+ D : 'd' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a a b b b c c c d")
+ self.assertEquals("a a b b b c c c d", found)
+
+
+ def testRuleLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x=b -> $x;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("a", found)
+
+
+ def testAmbiguousRule(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID a -> a | INT ;
+ ID : 'a'..'z'+ ;
+ INT: '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar,
+ "a", "abc 34")
+ self.assertEquals("34", found)
+
+
+ def testRuleListLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x+=b x+=b -> $x+;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testRuleListLabel2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x+=b x+=b -> $x $x*;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testOptional(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x=b (y=b)? -> $x $y?;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("a", found)
+
+
+ def testOptional2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x=ID (y=b)? -> $x $y?;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testOptional3(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x=ID (y=b)? -> ($x $y)?;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testOptional4(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x+=ID (y=b)? -> ($x $y)?;
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("a b", found)
+
+
+ def testOptional5(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : ID -> ID? ; // match an ID to optional ID
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a")
+ self.assertEquals("a", found)
+
+
+ def testArbitraryExprType(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : x+=b x+=b -> {CommonTree(None)};
+ b : ID ;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "a b")
+ self.assertEquals("", found)
+
+
+ def testSet(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a: (INT|ID)+ -> INT+ ID+ ;
+ INT: '0'..'9'+;
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "2 a 34 de")
+ self.assertEquals("2 34 a de", found)
+
+
+ def testSet2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a: (INT|ID) -> INT? ID? ;
+ INT: '0'..'9'+;
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "2")
+ self.assertEquals("2", found)
+
+
+ @testbase.broken("http://www.antlr.org:8888/browse/ANTLR-162",
+ antlr3.tree.RewriteEmptyStreamException)
+ def testSetWithLabel(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : x=(INT|ID) -> $x ;
+ INT: '0'..'9'+;
+ ID : 'a'..'z'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "2")
+ self.assertEquals("2", found)
+
+
+ def testRewriteAction(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens { FLOAT; }
+ r
+ : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text+".0"))}
+ ;
+ INT : '0'..'9'+;
+ WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+ ''')
+
+ found = self.execParser(grammar, "r", "25")
+ self.assertEquals("25.0", found)
+
+
+ def testOptionalSubruleWithoutRealElements(self):
+ # copy type *and* modifier even though it's optional
+ # for each invocation of (...)+ in rewrite
+ grammar = textwrap.dedent(
+ r"""
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {PARMS;}
+
+ modulo
+ : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?)
+ ;
+ parms : '#'|ID;
+ ID : ('a'..'z' | 'A'..'Z')+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ """)
+
+ found = self.execParser(grammar, "modulo", "modulo abc (x y #)")
+ self.assertEquals("(modulo abc (PARMS x y #))", found)
+
+
+ ## C A R D I N A L I T Y I S S U E S
+
+ def testCardinality(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ tokens {BLOCK;}
+ a : ID ID INT INT INT -> (ID INT)+;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ try:
+ self.execParser(grammar, "a", "a b 3 4 5")
+ self.fail()
+ except antlr3.tree.RewriteCardinalityException:
+ pass
+
+
+ def testCardinality2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID+ -> ID ID ID ; // only 2 input IDs
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ try:
+ self.execParser(grammar, "a", "a b")
+ self.fail()
+ except antlr3.tree.RewriteCardinalityException:
+ pass
+
+
+ def testCardinality3(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID? INT -> ID INT ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ try:
+ self.execParser(grammar, "a", "3")
+ self.fail()
+ except antlr3.tree.RewriteEmptyStreamException:
+ pass
+
+
+ def testLoopCardinality(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID? INT -> ID+ INT ;
+ op : '+'|'-' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ try:
+ self.execParser(grammar, "a", "3")
+ self.fail()
+ except antlr3.tree.RewriteEarlyExitException:
+ pass
+
+
+ def testWildcard(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {language=Python;output=AST;}
+ a : ID c=. -> $c;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found = self.execParser(grammar, "a", "abc 34")
+ self.assertEquals("34", found)
+
+
+ # E R R O R S
+
+ def testExtraTokenInSimpleDecl(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ tokens {EXPR;}
+ decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "decl", "int 34 x=1;",
+ expectErrors=True)
+ self.assertEquals(["line 1:4 extraneous input u'34' expecting ID"],
+ errors)
+ self.assertEquals("(EXPR int x 1)", found) # tree gets correct x and 1 tokens
+
+
+ #@testbase.broken("FIXME", AssertionError)
+ def testMissingIDInSimpleDecl(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ tokens {EXPR;}
+ decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "decl", "int =1;",
+ expectErrors=True)
+ self.assertEquals(["line 1:4 missing ID at u'='"], errors)
+ self.assertEquals("(EXPR int <missing ID> 1)", found) # tree gets invented ID token
+
+
+ def testMissingSetInSimpleDecl(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ tokens {EXPR;}
+ decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
+ type : 'int' | 'float' ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "decl", "x=1;",
+ expectErrors=True)
+ self.assertEquals(["line 1:0 mismatched input u'x' expecting set None"],
+ errors);
+ self.assertEquals("(EXPR <error: x> x 1)", found) # tree gets invented ID token
+
+
+ def testMissingTokenGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : ID INT -> ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "abc",
+ expectErrors=True)
+ self.assertEquals(["line 1:3 missing INT at '<EOF>'"], errors)
+ # doesn't do in-line recovery for sets (yet?)
+ self.assertEquals("abc <missing INT>", found)
+
+
+ def testExtraTokenGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b c -> b c;
+ b : ID -> ID ;
+ c : INT -> INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "abc ick 34",
+ expectErrors=True)
+ self.assertEquals(["line 1:4 extraneous input u'ick' expecting INT"],
+ errors)
+ self.assertEquals("abc 34", found)
+
+
+ #@testbase.broken("FIXME", AssertionError)
+ def testMissingFirstTokenGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : ID INT -> ID INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+ self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
+ self.assertEquals("<missing ID> 34", found)
+
+
+ #@testbase.broken("FIXME", AssertionError)
+ def testMissingFirstTokenGivesErrorNode2(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b c -> b c;
+ b : ID -> ID ;
+ c : INT -> INT ;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+ # finds an error at the first token, 34, and re-syncs.
+ # re-synchronizing does not consume a token because 34 follows
+ # ref to rule b (start of c). It then matches 34 in c.
+ self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
+ self.assertEquals("<missing ID> 34", found)
+
+
+ def testNoViableAltGivesErrorNode(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar foo;
+ options {language=Python;output=AST;}
+ a : b -> b | c -> c;
+ b : ID -> ID ;
+ c : INT -> INT ;
+ ID : 'a'..'z'+ ;
+ S : '*' ;
+ INT : '0'..'9'+;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ found, errors = self.execParser(grammar, "a", "*", expectErrors=True)
+ # finds an error at the first token, 34, and re-syncs.
+ # re-synchronizing does not consume a token because 34 follows
+ # ref to rule b (start of c). It then matches 34 in c.
+ self.assertEquals(["line 1:0 no viable alternative at input u'*'"],
+ errors);
+ self.assertEquals("<unexpected: [@0,0:0=u'*',<6>,1:0], resync=*>",
+ found)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t059debug.py b/antlr-3.4/runtime/Python/tests/t059debug.py
new file mode 100644
index 0000000..1b620d1
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t059debug.py
@@ -0,0 +1,783 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import antlr3.debug
+import testbase
+import sys
+import threading
+import socket
+import errno
+import time
+
+class Debugger(threading.Thread):
+ def __init__(self, port):
+ super(Debugger, self).__init__()
+ self.events = []
+ self.success = False
+ self.port = port
+
+ def run(self):
+ # create listening socket
+ s = None
+ tstart = time.time()
+ while time.time() - tstart < 10:
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect(('127.0.0.1', self.port))
+ break
+ except socket.error, exc:
+ if exc.args[0] != errno.ECONNREFUSED:
+ raise
+ time.sleep(0.1)
+
+ if s is None:
+ self.events.append(['nosocket'])
+ return
+
+ s.setblocking(1)
+ s.settimeout(10.0)
+
+ output = s.makefile('w', 0)
+ input = s.makefile('r', 0)
+
+ try:
+ # handshake
+ l = input.readline().strip()
+ assert l == 'ANTLR 2'
+ l = input.readline().strip()
+ assert l.startswith('grammar "')
+
+ output.write('ACK\n')
+ output.flush()
+
+ while True:
+ event = input.readline().strip()
+ self.events.append(event.split('\t'))
+
+ output.write('ACK\n')
+ output.flush()
+
+ if event == 'terminate':
+ self.success = True
+ break
+
+ except socket.timeout:
+ self.events.append(['timeout'])
+ except socket.error, exc:
+ self.events.append(['socketerror', exc.args])
+
+ s.close()
+
+
+class T(testbase.ANTLRTest):
+ def execParser(self, grammar, grammarEntry, input, listener,
+ parser_args={}):
+ if listener is None:
+ port = 49100
+ debugger = Debugger(port)
+ debugger.start()
+ # TODO(pink): install alarm, so it doesn't hang forever in case of a bug
+
+ else:
+ port = None
+
+ try:
+ lexerCls, parserCls = self.compileInlineGrammar(
+ grammar, options='-debug')
+
+ cStream = antlr3.StringStream(input)
+ lexer = lexerCls(cStream)
+ tStream = antlr3.CommonTokenStream(lexer)
+ parser = parserCls(tStream, dbg=listener, port=port, **parser_args)
+ getattr(parser, grammarEntry)()
+
+ finally:
+ if listener is None:
+ debugger.join()
+ return debugger
+
+ def testBasicParser(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID EOF;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ listener = antlr3.debug.RecordDebugEventListener()
+
+ self.execParser(
+ grammar, 'a',
+ input="a",
+ listener=listener)
+
+ # We only check that some LT events are present. How many is subject
+ # to change (at the time of writing there are two, which is one too
+ # many).
+ lt_events = [event for event in listener.events
+ if event.startswith("LT ")]
+ self.assertNotEqual(lt_events, [])
+
+ # For the rest, filter out LT events to get a reliable test.
+ expected = ["enterRule a",
+ "location 6:1",
+ "location 6:5",
+ "location 6:8",
+ "location 6:11",
+ "exitRule a"]
+ found = [event for event in listener.events
+ if not event.startswith("LT ")]
+ self.assertListEqual(found, expected)
+
+ def testSocketProxy(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID EOF;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['location', '6', '8'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['location', '6', '11'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+ def testRecognitionException(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID EOF;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a b",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['consumeHiddenToken', '1', '5', '99', '1', '1', '"'],
+ ['location', '6', '8'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+ ['LT', '2', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+ ['beginResync'],
+ ['consumeToken', '2', '4', '0', '1', '2', '"b'],
+ ['endResync'],
+ ['exception', 'UnwantedTokenException', '2', '1', '2'],
+ ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['location', '6', '11'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testSemPred(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : {True}? ID EOF;
+ ID : 'a'..'z'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['semanticPredicate', '1', 'True'],
+ ['location', '6', '13'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['location', '6', '16'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['location', '6', '19'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testPositiveClosureBlock(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID ( ID | INT )+ EOF;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a 1 b c 3",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'],
+ ['location', '6', '8'],
+ ['enterSubRule', '1'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['consumeToken', '2', '5', '0', '1', '2', '"1'],
+ ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+ ['consumeToken', '4', '4', '0', '1', '4', '"b'],
+ ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+ ['consumeToken', '6', '4', '0', '1', '6', '"c'],
+ ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+ ['consumeToken', '8', '5', '0', '1', '8', '"3'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['exitDecision', '1'],
+ ['exitSubRule', '1'],
+ ['location', '6', '22'],
+ ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['location', '6', '25'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testClosureBlock(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID ( ID | INT )* EOF;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a 1 b c 3",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'],
+ ['location', '6', '8'],
+ ['enterSubRule', '1'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['consumeToken', '2', '5', '0', '1', '2', '"1'],
+ ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+ ['consumeToken', '4', '4', '0', '1', '4', '"b'],
+ ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+ ['consumeToken', '6', '4', '0', '1', '6', '"c'],
+ ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+ ['exitDecision', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '8'],
+ ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+ ['consumeToken', '8', '5', '0', '1', '8', '"3'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['exitDecision', '1'],
+ ['exitSubRule', '1'],
+ ['location', '6', '22'],
+ ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '9', '"<EOF>'],
+ ['location', '6', '25'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testMismatchedSetException(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID ( ID | INT ) EOF;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['location', '6', '8'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['exception', 'MismatchedSetException', '1', '1', '1'],
+ ['exception', 'MismatchedSetException', '1', '1', '1'],
+ ['beginResync'],
+ ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+ ['endResync'],
+ ['location', '6', '24'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testBlock(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID ( b | c ) EOF;
+ b : ID;
+ c : INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a 1",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'],
+ ['location', '6', '8'],
+ ['enterSubRule', '1'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['exitDecision', '1'],
+ ['enterAlt', '2'],
+ ['location', '6', '14'],
+ ['enterRule', 'T.g', 'c'],
+ ['location', '8', '1'],
+ ['enterAlt', '1'],
+ ['location', '8', '5'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+ ['consumeToken', '2', '5', '0', '1', '2', '"1'],
+ ['location', '8', '8'],
+ ['exitRule', 'T.g', 'c'],
+ ['exitSubRule', '1'],
+ ['location', '6', '18'],
+ ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['location', '6', '21'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testNoViableAlt(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ID ( b | c ) EOF;
+ b : ID;
+ c : INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ BANG : '!' ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a !",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '5', '0', '1', '0', '"a'],
+ ['consumeHiddenToken', '1', '7', '99', '1', '1', '"'],
+ ['location', '6', '8'],
+ ['enterSubRule', '1'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+ ['exception', 'NoViableAltException', '2', '1', '2'],
+ ['exitDecision', '1'],
+ ['exitSubRule', '1'],
+ ['exception', 'NoViableAltException', '2', '1', '2'],
+ ['beginResync'],
+ ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+ ['consumeToken', '2', '4', '0', '1', '2', '"!'],
+ ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+ ['endResync'],
+ ['location', '6', '21'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testRuleBlock(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : b | c;
+ b : ID;
+ c : INT;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="1",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterDecision', '1', '0'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"1'],
+ ['exitDecision', '1'],
+ ['enterAlt', '2'],
+ ['location', '6', '9'],
+ ['enterRule', 'T.g', 'c'],
+ ['location', '8', '1'],
+ ['enterAlt', '1'],
+ ['location', '8', '5'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"1'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"1'],
+ ['consumeToken', '0', '5', '0', '1', '0', '"1'],
+ ['location', '8', '8'],
+ ['exitRule', 'T.g', 'c'],
+ ['location', '6', '10'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testRuleBlockSingleAlt(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : b;
+ b : ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['enterRule', 'T.g', 'b'],
+ ['location', '7', '1'],
+ ['enterAlt', '1'],
+ ['location', '7', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['location', '7', '7'],
+ ['exitRule', 'T.g', 'b'],
+ ['location', '6', '6'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testBlockSingleAlt(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ( b );
+ b : ID;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['enterAlt', '1'],
+ ['location', '6', '7'],
+ ['enterRule', 'T.g', 'b'],
+ ['location', '7', '1'],
+ ['enterAlt', '1'],
+ ['location', '7', '5'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+ ['location', '7', '7'],
+ ['exitRule', 'T.g', 'b'],
+ ['location', '6', '10'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testDFA(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ }
+ a : ( b | c ) EOF;
+ b : ID* INT;
+ c : ID+ BANG;
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ BANG : '!';
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ debugger = self.execParser(
+ grammar, 'a',
+ input="a!",
+ listener=None)
+
+ self.assertTrue(debugger.success)
+ expected = [['enterRule', 'T.g', 'a'],
+ ['location', '6', '1'],
+ ['enterAlt', '1'],
+ ['location', '6', '5'],
+ ['enterSubRule', '1'],
+ ['enterDecision', '1', '0'],
+ ['mark', '0'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '5', '0', '1', '0', '"a'],
+ ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+ ['consumeToken', '1', '4', '0', '1', '1', '"!'],
+ ['rewind', '0'],
+ ['exitDecision', '1'],
+ ['enterAlt', '2'],
+ ['location', '6', '11'],
+ ['enterRule', 'T.g', 'c'],
+ ['location', '8', '1'],
+ ['enterAlt', '1'],
+ ['location', '8', '5'],
+ ['enterSubRule', '3'],
+ ['enterDecision', '3', '0'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+ ['exitDecision', '3'],
+ ['enterAlt', '1'],
+ ['location', '8', '5'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+ ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+ ['consumeToken', '0', '5', '0', '1', '0', '"a'],
+ ['enterDecision', '3', '0'],
+ ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+ ['exitDecision', '3'],
+ ['exitSubRule', '3'],
+ ['location', '8', '9'],
+ ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+ ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+ ['consumeToken', '1', '4', '0', '1', '1', '"!'],
+ ['location', '8', '13'],
+ ['exitRule', 'T.g', 'c'],
+ ['exitSubRule', '1'],
+ ['location', '6', '15'],
+ ['LT', '1', '-1', '-1', '0', '1', '2', '"<EOF>'],
+ ['LT', '1', '-1', '-1', '0', '1', '2', '"<EOF>'],
+ ['consumeToken', '-1', '-1', '0', '1', '2', '"<EOF>'],
+ ['location', '6', '18'],
+ ['exitRule', 'T.g', 'a'],
+ ['terminate']]
+
+ self.assertListEqual(debugger.events, expected)
+
+
+ def testBasicAST(self):
+ grammar = textwrap.dedent(
+ r'''
+ grammar T;
+ options {
+ language=Python;
+ output=AST;
+ }
+ a : ( b | c ) EOF!;
+ b : ID* INT -> ^(INT ID*);
+ c : ID+ BANG -> ^(BANG ID+);
+ ID : 'a'..'z'+ ;
+ INT : '0'..'9'+ ;
+ BANG : '!';
+ WS : (' '|'\n') {$channel=HIDDEN;} ;
+ ''')
+
+ listener = antlr3.debug.RecordDebugEventListener()
+
+ self.execParser(
+ grammar, 'a',
+ input="a!",
+ listener=listener)
+
+ # don't check output for now (too dynamic), I'm satisfied if it
+ # doesn't crash
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t060leftrecursion.py b/antlr-3.4/runtime/Python/tests/t060leftrecursion.py
new file mode 100644
index 0000000..0c064b6
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/t060leftrecursion.py
@@ -0,0 +1,468 @@
+import unittest
+import re
+import textwrap
+import antlr3
+import testbase
+
+
+# Left-recursion resolution is not yet enabled in the tool.
+
+# class TestLeftRecursion(testbase.ANTLRTest):
+# def parserClass(self, base):
+# class TParser(base):
+# def __init__(self, *args, **kwargs):
+# base.__init__(self, *args, **kwargs)
+
+# self._output = ""
+
+
+# def capture(self, t):
+# self._output += str(t)
+
+
+# def recover(self, input, re):
+# # no error recovery yet, just crash!
+# raise
+
+# return TParser
+
+
+# def execParser(self, grammar, grammarEntry, input):
+# lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+# cStream = antlr3.StringStream(input)
+# lexer = lexerCls(cStream)
+# tStream = antlr3.CommonTokenStream(lexer)
+# parser = parserCls(tStream)
+# getattr(parser, grammarEntry)()
+# return parser._output
+
+
+# def runTests(self, grammar, tests, grammarEntry):
+# lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+# build_ast = re.search(r'output\s*=\s*AST', grammar)
+
+# for input, expecting in tests:
+# cStream = antlr3.StringStream(input)
+# lexer = lexerCls(cStream)
+# tStream = antlr3.CommonTokenStream(lexer)
+# parser = parserCls(tStream)
+# r = getattr(parser, grammarEntry)()
+# found = parser._output
+# if build_ast:
+# found += r.tree.toStringTree()
+
+# self.assertEquals(
+# expecting, found,
+# "%r != %r (for input %r)" % (expecting, found, input))
+
+
+# def testSimple(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# }
+# s : a { self.capture($a.text) } ;
+# a : a ID
+# | ID
+# ;
+# ID : 'a'..'z'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# found = self.execParser(grammar, 's', 'a b c')
+# expecting = "abc"
+# self.assertEquals(expecting, found)
+
+
+# def testSemPred(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# }
+# s : a { self.capture($a.text) } ;
+# a : a {True}? ID
+# | ID
+# ;
+# ID : 'a'..'z'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# found = self.execParser(grammar, "s", "a b c")
+# expecting = "abc"
+# self.assertEquals(expecting, found)
+
+# def testTernaryExpr(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# e : e '*'^ e
+# | e '+'^ e
+# | e '?'<assoc=right>^ e ':'! e
+# | e '='<assoc=right>^ e
+# | ID
+# ;
+# ID : 'a'..'z'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("a+b", "(+ a b)"),
+# ("a*b", "(* a b)"),
+# ("a?b:c", "(? a b c)"),
+# ("a=b=c", "(= a (= b c))"),
+# ("a?b+c:d", "(? a (+ b c) d)"),
+# ("a?b=c:d", "(? a (= b c) d)"),
+# ("a? b?c:d : e", "(? a (? b c d) e)"),
+# ("a?b: c?d:e", "(? a b (? c d e))"),
+# ]
+# self.runTests(grammar, tests, "e")
+
+
+# def testDeclarationsUsingASTOperators(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# declarator
+# : declarator '['^ e ']'!
+# | declarator '['^ ']'!
+# | declarator '('^ ')'!
+# | '*'^ declarator // binds less tight than suffixes
+# | '('! declarator ')'!
+# | ID
+# ;
+# e : INT ;
+# ID : 'a'..'z'+ ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("*a", "(* a)"),
+# ("**a", "(* (* a))"),
+# ("a[3]", "([ a 3)"),
+# ("b[]", "([ b)"),
+# ("(a)", "a"),
+# ("a[]()", "(( ([ a))"),
+# ("a[][]", "([ ([ a))"),
+# ("*a[]", "(* ([ a))"),
+# ("(*a)[]", "([ (* a))"),
+# ]
+# self.runTests(grammar, tests, "declarator")
+
+
+# def testDeclarationsUsingRewriteOperators(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# declarator
+# : declarator '[' e ']' -> ^('[' declarator e)
+# | declarator '[' ']' -> ^('[' declarator)
+# | declarator '(' ')' -> ^('(' declarator)
+# | '*' declarator -> ^('*' declarator) // binds less tight than suffixes
+# | '(' declarator ')' -> declarator
+# | ID -> ID
+# ;
+# e : INT ;
+# ID : 'a'..'z'+ ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("*a", "(* a)"),
+# ("**a", "(* (* a))"),
+# ("a[3]", "([ a 3)"),
+# ("b[]", "([ b)"),
+# ("(a)", "a"),
+# ("a[]()", "(( ([ a))"),
+# ("a[][]", "([ ([ a))"),
+# ("*a[]", "(* ([ a))"),
+# ("(*a)[]", "([ (* a))"),
+# ]
+# self.runTests(grammar, tests, "declarator")
+
+
+# def testExpressionsUsingASTOperators(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# e : e '.'^ ID
+# | e '.'^ 'this'
+# | '-'^ e
+# | e '*'^ e
+# | e ('+'^|'-'^) e
+# | INT
+# | ID
+# ;
+# ID : 'a'..'z'+ ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("1", "1"),
+# ("a+1", "(+ a 1)"),
+# ("a*1", "(* a 1)"),
+# ("a.b", "(. a b)"),
+# ("a.this", "(. a this)"),
+# ("a-b+c", "(+ (- a b) c)"),
+# ("a+b*c", "(+ a (* b c))"),
+# ("a.b+1", "(+ (. a b) 1)"),
+# ("-a", "(- a)"),
+# ("-a+b", "(+ (- a) b)"),
+# ("-a.b", "(- (. a b))"),
+# ]
+# self.runTests(grammar, tests, "e")
+
+
+# @testbase.broken(
+# "Grammar compilation returns errors", testbase.GrammarCompileError)
+# def testExpressionsUsingRewriteOperators(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# e : e '.' ID -> ^('.' e ID)
+# | e '.' 'this' -> ^('.' e 'this')
+# | '-' e -> ^('-' e)
+# | e '*' b=e -> ^('*' e $b)
+# | e (op='+'|op='-') b=e -> ^($op e $b)
+# | INT -> INT
+# | ID -> ID
+# ;
+# ID : 'a'..'z'+ ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("1", "1"),
+# ("a+1", "(+ a 1)"),
+# ("a*1", "(* a 1)"),
+# ("a.b", "(. a b)"),
+# ("a.this", "(. a this)"),
+# ("a+b*c", "(+ a (* b c))"),
+# ("a.b+1", "(+ (. a b) 1)"),
+# ("-a", "(- a)"),
+# ("-a+b", "(+ (- a) b)"),
+# ("-a.b", "(- (. a b))"),
+# ]
+# self.runTests(grammar, tests, "e")
+
+
+# def testExpressionAssociativity(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# e
+# : e '.'^ ID
+# | '-'^ e
+# | e '^'<assoc=right>^ e
+# | e '*'^ e
+# | e ('+'^|'-'^) e
+# | e ('='<assoc=right>^ |'+='<assoc=right>^) e
+# | INT
+# | ID
+# ;
+# ID : 'a'..'z'+ ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("1", "1"),
+# ("a+1", "(+ a 1)"),
+# ("a*1", "(* a 1)"),
+# ("a.b", "(. a b)"),
+# ("a-b+c", "(+ (- a b) c)"),
+# ("a+b*c", "(+ a (* b c))"),
+# ("a.b+1", "(+ (. a b) 1)"),
+# ("-a", "(- a)"),
+# ("-a+b", "(+ (- a) b)"),
+# ("-a.b", "(- (. a b))"),
+# ("a^b^c", "(^ a (^ b c))"),
+# ("a=b=c", "(= a (= b c))"),
+# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"),
+# ]
+# self.runTests(grammar, tests, "e")
+
+
+# def testJavaExpressions(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# expressionList
+# : e (','! e)*
+# ;
+# e : '('! e ')'!
+# | 'this'
+# | 'super'
+# | INT
+# | ID
+# | type '.'^ 'class'
+# | e '.'^ ID
+# | e '.'^ 'this'
+# | e '.'^ 'super' '('^ expressionList? ')'!
+# | e '.'^ 'new'^ ID '('! expressionList? ')'!
+# | 'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+) // ugly; simplified
+# | e '['^ e ']'!
+# | '('^ type ')'! e
+# | e ('++'^ | '--'^)
+# | e '('^ expressionList? ')'!
+# | ('+'^|'-'^|'++'^|'--'^) e
+# | ('~'^|'!'^) e
+# | e ('*'^|'/'^|'%'^) e
+# | e ('+'^|'-'^) e
+# | e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e
+# | e ('<='^ | '>='^ | '>'^ | '<'^) e
+# | e 'instanceof'^ e
+# | e ('=='^ | '!='^) e
+# | e '&'^ e
+# | e '^'<assoc=right>^ e
+# | e '|'^ e
+# | e '&&'^ e
+# | e '||'^ e
+# | e '?' e ':' e
+# | e ('='<assoc=right>^
+# |'+='<assoc=right>^
+# |'-='<assoc=right>^
+# |'*='<assoc=right>^
+# |'/='<assoc=right>^
+# |'&='<assoc=right>^
+# |'|='<assoc=right>^
+# |'^='<assoc=right>^
+# |'>>='<assoc=right>^
+# |'>>>='<assoc=right>^
+# |'<<='<assoc=right>^
+# |'%='<assoc=right>^) e
+# ;
+# type: ID
+# | ID '['^ ']'!
+# | 'int'
+# | 'int' '['^ ']'!
+# ;
+# ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("a", "a"),
+# ("1", "1"),
+# ("a+1", "(+ a 1)"),
+# ("a*1", "(* a 1)"),
+# ("a.b", "(. a b)"),
+# ("a-b+c", "(+ (- a b) c)"),
+# ("a+b*c", "(+ a (* b c))"),
+# ("a.b+1", "(+ (. a b) 1)"),
+# ("-a", "(- a)"),
+# ("-a+b", "(+ (- a) b)"),
+# ("-a.b", "(- (. a b))"),
+# ("a^b^c", "(^ a (^ b c))"),
+# ("a=b=c", "(= a (= b c))"),
+# ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"),
+# ("a|b&c", "(| a (& b c))"),
+# ("(a|b)&c", "(& (| a b) c)"),
+# ("a > b", "(> a b)"),
+# ("a >> b", "(> a b)"), # text is from one token
+# ("a < b", "(< a b)"),
+# ("(T)x", "(( T x)"),
+# ("new A().b", "(. (new A () b)"),
+# ("(T)t.f()", "(( (( T (. t f)))"),
+# ("a.f(x)==T.c", "(== (( (. a f) x) (. T c))"),
+# ("a.f().g(x,1)", "(( (. (( (. a f)) g) x 1)"),
+# ("new T[((n-1) * x) + 1]", "(new T [ (+ (* (- n 1) x) 1))"),
+# ]
+# self.runTests(grammar, tests, "e")
+
+
+# def testReturnValueAndActions(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# }
+# s : e { self.capture($e.v) } ;
+# e returns [v, ignored]
+# : e '*' b=e {$v *= $b.v;}
+# | e '+' b=e {$v += $b.v;}
+# | INT {$v = int($INT.text);}
+# ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("4", "4"),
+# ("1+2", "3")
+# ]
+# self.runTests(grammar, tests, "s")
+
+
+# def testReturnValueAndActionsAndASTs(self):
+# grammar = textwrap.dedent(
+# r"""
+# grammar T;
+# options {
+# language=Python;
+# output=AST;
+# }
+# s : e { self.capture("v=\%s, " \% $e.v) } ;
+# e returns [v, ignored]
+# : e '*'^ b=e {$v *= $b.v;}
+# | e '+'^ b=e {$v += $b.v;}
+# | INT {$v = int($INT.text);}
+# ;
+# INT : '0'..'9'+ ;
+# WS : (' '|'\n') {self.skip()} ;
+# """)
+
+# tests = [
+# ("4", "v=4, 4"),
+# ("1+2", "v=3, (+ 1 2)"),
+# ]
+# self.runTests(grammar, tests, "s")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/testbase.py b/antlr-3.4/runtime/Python/tests/testbase.py
new file mode 100644
index 0000000..19c7fec
--- /dev/null
+++ b/antlr-3.4/runtime/Python/tests/testbase.py
@@ -0,0 +1,450 @@
+import unittest
+import imp
+import os
+import errno
+import sys
+import glob
+import re
+import tempfile
+import shutil
+import inspect
+import hashlib
+from distutils.errors import *
+import antlr3
+
+def unlink(path):
+ try:
+ os.unlink(path)
+ except OSError, exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+
+class GrammarCompileError(Exception):
+ """Grammar failed to compile."""
+ pass
+
+
+# At least on MacOSX tempdir (/tmp) is a symlink. It's sometimes dereferences,
+# sometimes not, breaking the inspect.getmodule() function.
+testbasedir = os.path.join(
+ os.path.realpath(tempfile.gettempdir()),
+ 'antlr3-test')
+
+
+class BrokenTest(unittest.TestCase.failureException):
+ def __repr__(self):
+ name, reason = self.args
+ return '%s: %s: %s works now' % (
+ (self.__class__.__name__, name, reason))
+
+
+def broken(reason, *exceptions):
+ '''Indicates a failing (or erroneous) test case fails that should succeed.
+ If the test fails with an exception, list the exception type in args'''
+ def wrapper(test_method):
+ def replacement(*args, **kwargs):
+ try:
+ test_method(*args, **kwargs)
+ except exceptions or unittest.TestCase.failureException:
+ pass
+ else:
+ raise BrokenTest(test_method.__name__, reason)
+ replacement.__doc__ = test_method.__doc__
+ replacement.__name__ = 'XXX_' + test_method.__name__
+ replacement.todo = reason
+ return replacement
+ return wrapper
+
+
+dependencyCache = {}
+compileErrorCache = {}
+
+# setup java CLASSPATH
+if 'CLASSPATH' not in os.environ:
+ cp = []
+
+ baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+ libDir = os.path.join(baseDir, 'lib')
+
+ jar = os.path.join(libDir, 'ST-4.0.1.jar')
+ if not os.path.isfile(jar):
+ raise DistutilsFileError(
+ "Missing file '%s'. Grap it from a distribution package."
+ % jar,
+ )
+ cp.append(jar)
+
+ jar = os.path.join(libDir, 'antlr-2.7.7.jar')
+ if not os.path.isfile(jar):
+ raise DistutilsFileError(
+ "Missing file '%s'. Grap it from a distribution package."
+ % jar,
+ )
+ cp.append(jar)
+
+ jar = os.path.join(libDir, 'junit-4.2.jar')
+ if not os.path.isfile(jar):
+ raise DistutilsFileError(
+ "Missing file '%s'. Grap it from a distribution package."
+ % jar,
+ )
+ cp.append(jar)
+
+ cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build'))
+
+ classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"'
+
+else:
+ classpath = ''
+
+
+class ANTLRTest(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ unittest.TestCase.__init__(self, *args, **kwargs)
+
+ self.moduleName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0]
+ self.className = self.__class__.__name__
+ self._baseDir = None
+
+ self.lexerModule = None
+ self.parserModule = None
+
+ self.grammarName = None
+ self.grammarType = None
+
+
+ def assertListEqual(self, a, b):
+ if a == b:
+ return
+
+ import difflib
+ a = [str(l) + '\n' for l in a]
+ b = [str(l) + '\n' for l in b]
+
+ raise AssertionError(''.join(difflib.unified_diff(a, b)))
+
+
+ @property
+ def baseDir(self):
+ if self._baseDir is None:
+ testName = 'unknownTest'
+ for frame in inspect.stack():
+ code = frame[0].f_code
+ codeMod = inspect.getmodule(code)
+ if codeMod is None:
+ continue
+
+ # skip frames not in requested module
+ if codeMod is not sys.modules[self.__module__]:
+ continue
+
+ # skip some unwanted names
+ if code.co_name in ('nextToken', '<module>'):
+ continue
+
+ if code.co_name.startswith('test'):
+ testName = code.co_name
+ break
+
+ self._baseDir = os.path.join(
+ testbasedir,
+ self.moduleName, self.className, testName)
+ if not os.path.isdir(self._baseDir):
+ os.makedirs(self._baseDir)
+
+ return self._baseDir
+
+
+ def _invokeantlr(self, dir, file, options, javaOptions=''):
+ cmd = 'cd %s; java %s %s org.antlr.Tool -o . %s %s 2>&1' % (
+ dir, javaOptions, classpath, options, file
+ )
+ fp = os.popen(cmd)
+ output = ''
+ failed = False
+ for line in fp:
+ output += line
+
+ if line.startswith('error('):
+ failed = True
+
+ rc = fp.close()
+ if rc is not None:
+ failed = True
+
+ if failed:
+ raise GrammarCompileError(
+ "Failed to compile grammar '%s':\n%s\n\n" % (file, cmd)
+ + output
+ )
+
+
+ def compileGrammar(self, grammarName=None, options='', javaOptions=''):
+ if grammarName is None:
+ grammarName = self.moduleName + '.g'
+
+ self._baseDir = os.path.join(
+ testbasedir,
+ self.moduleName)
+ if not os.path.isdir(self._baseDir):
+ os.makedirs(self._baseDir)
+
+ if self.grammarName is None:
+ self.grammarName = os.path.splitext(grammarName)[0]
+
+ grammarPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), grammarName)
+
+ # get type and name from first grammar line
+ grammar = open(grammarPath, 'r').read()
+ m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE)
+ assert m is not None, grammar
+ self.grammarType = m.group(2)
+ if self.grammarType is None:
+ self.grammarType = 'combined'
+
+ if self.grammarType is None:
+ assert self.grammarType in ('lexer', 'parser', 'tree', 'combined'), self.grammarType
+
+ # don't try to rebuild grammar, if it already failed
+ if grammarName in compileErrorCache:
+ return
+
+ try:
+ # # get dependencies from antlr
+ # if grammarName in dependencyCache:
+ # dependencies = dependencyCache[grammarName]
+
+ # else:
+ # dependencies = []
+ # cmd = ('cd %s; java %s %s org.antlr.Tool -o . -depend %s 2>&1'
+ # % (self.baseDir, javaOptions, classpath, grammarPath))
+
+ # output = ""
+ # failed = False
+
+ # fp = os.popen(cmd)
+ # for line in fp:
+ # output += line
+
+ # if line.startswith('error('):
+ # failed = True
+ # elif ':' in line:
+ # a, b = line.strip().split(':', 1)
+ # dependencies.append(
+ # (os.path.join(self.baseDir, a.strip()),
+ # [os.path.join(self.baseDir, b.strip())])
+ # )
+
+ # rc = fp.close()
+ # if rc is not None:
+ # failed = True
+
+ # if failed:
+ # raise GrammarCompileError(
+ # "antlr -depend failed with code %s on grammar '%s':\n\n"
+ # % (rc, grammarName)
+ # + cmd
+ # + "\n"
+ # + output
+ # )
+
+ # # add dependencies to my .stg files
+ # templateDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'tool', 'src', 'main', 'resources', 'org', 'antlr', 'codegen', 'templates', 'Python'))
+ # templates = glob.glob(os.path.join(templateDir, '*.stg'))
+
+ # for dst, src in dependencies:
+ # src.extend(templates)
+
+ # dependencyCache[grammarName] = dependencies
+
+ # rebuild = False
+ # for dest, sources in dependencies:
+ # if not os.path.isfile(dest):
+ # rebuild = True
+ # break
+
+ # for source in sources:
+ # if os.path.getmtime(source) > os.path.getmtime(dest):
+ # rebuild = True
+ # break
+
+
+ # if rebuild:
+ # self._invokeantlr(self.baseDir, grammarPath, options, javaOptions)
+
+ self._invokeantlr(self.baseDir, grammarPath, options, javaOptions)
+
+ except:
+ # mark grammar as broken
+ compileErrorCache[grammarName] = True
+ raise
+
+
+ def lexerClass(self, base):
+ """Optionally build a subclass of generated lexer class"""
+
+ return base
+
+
+ def parserClass(self, base):
+ """Optionally build a subclass of generated parser class"""
+
+ return base
+
+
+ def walkerClass(self, base):
+ """Optionally build a subclass of generated walker class"""
+
+ return base
+
+
+ def __load_module(self, name):
+ modFile, modPathname, modDescription \
+ = imp.find_module(name, [self.baseDir])
+
+ return imp.load_module(
+ name, modFile, modPathname, modDescription
+ )
+
+
+ def getLexer(self, *args, **kwargs):
+ """Build lexer instance. Arguments are passed to lexer.__init__()."""
+
+ if self.grammarType == 'lexer':
+ self.lexerModule = self.__load_module(self.grammarName)
+ cls = getattr(self.lexerModule, self.grammarName)
+ else:
+ self.lexerModule = self.__load_module(self.grammarName + 'Lexer')
+ cls = getattr(self.lexerModule, self.grammarName + 'Lexer')
+
+ cls = self.lexerClass(cls)
+
+ lexer = cls(*args, **kwargs)
+
+ return lexer
+
+
+ def getParser(self, *args, **kwargs):
+ """Build parser instance. Arguments are passed to parser.__init__()."""
+
+ if self.grammarType == 'parser':
+ self.lexerModule = self.__load_module(self.grammarName)
+ cls = getattr(self.lexerModule, self.grammarName)
+ else:
+ self.parserModule = self.__load_module(self.grammarName + 'Parser')
+ cls = getattr(self.parserModule, self.grammarName + 'Parser')
+ cls = self.parserClass(cls)
+
+ parser = cls(*args, **kwargs)
+
+ return parser
+
+
+ def getWalker(self, *args, **kwargs):
+ """Build walker instance. Arguments are passed to walker.__init__()."""
+
+ self.walkerModule = self.__load_module(self.grammarName + 'Walker')
+ cls = getattr(self.walkerModule, self.grammarName + 'Walker')
+ cls = self.walkerClass(cls)
+
+ walker = cls(*args, **kwargs)
+
+ return walker
+
+
+ def writeInlineGrammar(self, grammar):
+ # Create a unique ID for this test and use it as the grammar name,
+ # to avoid class name reuse. This kinda sucks. Need to find a way so
+ # tests can use the same grammar name without messing up the namespace.
+ # Well, first I should figure out what the exact problem is...
+ id = hashlib.md5(self.baseDir).hexdigest()[-8:]
+ grammar = grammar.replace('$TP', 'TP' + id)
+ grammar = grammar.replace('$T', 'T' + id)
+
+ # get type and name from first grammar line
+ m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE)
+ assert m is not None, grammar
+ grammarType = m.group(2)
+ if grammarType is None:
+ grammarType = 'combined'
+ grammarName = m.group(3)
+
+ assert grammarType in ('lexer', 'parser', 'tree', 'combined'), grammarType
+
+ grammarPath = os.path.join(self.baseDir, grammarName + '.g')
+
+ # dump temp grammar file
+ fp = open(grammarPath, 'w')
+ fp.write(grammar)
+ fp.close()
+
+ return grammarName, grammarPath, grammarType
+
+
+ def writeFile(self, name, contents):
+ testDir = os.path.dirname(os.path.abspath(__file__))
+ path = os.path.join(self.baseDir, name)
+
+ fp = open(path, 'w')
+ fp.write(contents)
+ fp.close()
+
+ return path
+
+
+ def compileInlineGrammar(self, grammar, options='', javaOptions='',
+ returnModule=False):
+ # write grammar file
+ grammarName, grammarPath, grammarType = self.writeInlineGrammar(grammar)
+
+ # compile it
+ self._invokeantlr(
+ os.path.dirname(grammarPath),
+ os.path.basename(grammarPath),
+ options,
+ javaOptions
+ )
+
+ if grammarType == 'combined':
+ lexerMod = self.__load_module(grammarName + 'Lexer')
+ parserMod = self.__load_module(grammarName + 'Parser')
+ if returnModule:
+ return lexerMod, parserMod
+
+ lexerCls = getattr(lexerMod, grammarName + 'Lexer')
+ lexerCls = self.lexerClass(lexerCls)
+ parserCls = getattr(parserMod, grammarName + 'Parser')
+ parserCls = self.parserClass(parserCls)
+
+ return lexerCls, parserCls
+
+ if grammarType == 'lexer':
+ lexerMod = self.__load_module(grammarName)
+ if returnModule:
+ return lexerMod
+
+ lexerCls = getattr(lexerMod, grammarName)
+ lexerCls = self.lexerClass(lexerCls)
+
+ return lexerCls
+
+ if grammarType == 'parser':
+ parserMod = self.__load_module(grammarName)
+ if returnModule:
+ return parserMod
+
+ parserCls = getattr(parserMod, grammarName)
+ parserCls = self.parserClass(parserCls)
+
+ return parserCls
+
+ if grammarType == 'tree':
+ walkerMod = self.__load_module(grammarName)
+ if returnModule:
+ return walkerMod
+
+ walkerCls = getattr(walkerMod, grammarName)
+ walkerCls = self.walkerClass(walkerCls)
+
+ return walkerCls