python-antlr3-3.5.2/0000755000175000017500000000000012653072315012751 5ustar zigozigopython-antlr3-3.5.2/setup.py0000644000175000017500000002170212653072152014464 0ustar zigozigo# bootstrapping setuptools import ez_setup ez_setup.use_setuptools() import os import sys import textwrap from distutils.errors import * from distutils.command.clean import clean as _clean from distutils.cmd import Command from setuptools import setup from distutils import log from distutils.core import setup class clean(_clean): """Also cleanup local temp files.""" def run(self): _clean.run(self) import fnmatch # kill temporary files patterns = [ # generic tempfiles '*~', '*.bak', '*.pyc', # tempfiles generated by ANTLR runs 't[0-9]*Lexer.py', 't[0-9]*Parser.py', '*.tokens', '*__.g', ] for path in ('antlr3', 'unittests', 'tests'): path = os.path.join(os.path.dirname(__file__), path) if os.path.isdir(path): for root, dirs, files in os.walk(path, topdown=True): graveyard = [] for pat in patterns: graveyard.extend(fnmatch.filter(files, pat)) for name in graveyard: filePath = os.path.join(root, name) try: log.info("removing '%s'", filePath) os.unlink(filePath) except OSError, exc: log.warn( "Failed to delete '%s': %s", filePath, exc ) class TestError(DistutilsError): pass # grml.. the class name appears in the --help output: # ... # Options for 'CmdUnitTest' command # ... # so I have to use a rather ugly name... class unittest(Command): """Run unit tests for package""" description = "run unit tests for package" user_options = [ ('xml-output=', None, "Directory for JUnit compatible XML files."), ] boolean_options = [] def initialize_options(self): self.xml_output = None def finalize_options(self): pass def run(self): testDir = os.path.join(os.path.dirname(__file__), 'unittests') if not os.path.isdir(testDir): raise DistutilsFileError( "There is not 'unittests' directory. Did you fetch the " "development version?", ) import glob import imp import unittest import traceback import StringIO suite = unittest.TestSuite() loadFailures = [] # collect tests from all unittests/test*.py files testFiles = [] for testPath in glob.glob(os.path.join(testDir, 'test*.py')): testFiles.append(testPath) testFiles.sort() for testPath in testFiles: testID = os.path.basename(testPath)[:-3] try: modFile, modPathname, modDescription \ = imp.find_module(testID, [testDir]) testMod = imp.load_module( testID, modFile, modPathname, modDescription ) suite.addTests( unittest.defaultTestLoader.loadTestsFromModule(testMod) ) except Exception: buf = StringIO.StringIO() traceback.print_exc(file=buf) loadFailures.append( (os.path.basename(testPath), buf.getvalue()) ) if self.xml_output: import xmlrunner runner = xmlrunner.XMLTestRunner( stream=open(os.path.join(self.xml_output, 'unittest.xml'), 'w')) else: runner = unittest.TextTestRunner(verbosity=2) result = runner.run(suite) for testName, error in loadFailures: sys.stderr.write('\n' + '='*70 + '\n') sys.stderr.write( "Failed to load test module %s\n" % testName ) sys.stderr.write(error) sys.stderr.write('\n') if not result.wasSuccessful() or loadFailures: raise TestError( "Unit test suite failed!", ) class functest(Command): """Run functional tests for package""" description = "run functional tests for package" user_options = [ ('testcase=', None, "testcase to run [default: run all]"), ('antlr-version=', None, "ANTLR version to use [default: HEAD (in ../../build)]"), ('antlr-jar=', None, "Explicit path to an antlr jar (overrides --antlr-version)"), ('xml-output=', None, "Directory for JUnit compatible XML files."), ] boolean_options = [] def initialize_options(self): self.testcase = None self.antlr_version = 'HEAD' self.antlr_jar = None self.xml_output = None def finalize_options(self): pass def run(self): import glob import imp import unittest import traceback import StringIO testDir = os.path.join(os.path.dirname(__file__), 'tests') if not os.path.isdir(testDir): raise DistutilsFileError( "There is not 'tests' directory. Did you fetch the " "development version?", ) # make sure, relative imports from testcases work sys.path.insert(0, testDir) rootDir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) if self.antlr_jar is not None: classpath = [self.antlr_jar] elif self.antlr_version == 'HEAD': classpath = [ os.path.join(rootDir, 'tool', 'target', 'classes'), os.path.join(rootDir, 'runtime', 'Java', 'target', 'classes') ] else: classpath = [ os.path.join(rootDir, 'archive', 'antlr-%s.jar' % self.antlr_version) ] classpath.extend([ os.path.join(rootDir, 'lib', 'antlr-2.7.7.jar'), os.path.join(rootDir, 'lib', 'stringtemplate-3.2.1.jar'), os.path.join(rootDir, 'lib', 'ST-4.0.2.jar'), os.path.join(rootDir, 'lib', 'junit-4.2.jar') ]) os.environ['CLASSPATH'] = ':'.join(classpath) os.environ['ANTLRVERSION'] = self.antlr_version suite = unittest.TestSuite() loadFailures = [] # collect tests from all tests/t*.py files testFiles = [] test_glob = 't[0-9][0-9][0-9]*.py' for testPath in glob.glob(os.path.join(testDir, test_glob)): if testPath.endswith('Lexer.py') or testPath.endswith('Parser.py'): continue # if a single testcase has been selected, filter out all other # tests if (self.testcase is not None and not os.path.basename(testPath)[:-3].startswith(self.testcase)): continue testFiles.append(testPath) testFiles.sort() for testPath in testFiles: testID = os.path.basename(testPath)[:-3] try: modFile, modPathname, modDescription \ = imp.find_module(testID, [testDir]) testMod = imp.load_module( testID, modFile, modPathname, modDescription) suite.addTests( unittest.defaultTestLoader.loadTestsFromModule(testMod)) except Exception: buf = StringIO.StringIO() traceback.print_exc(file=buf) loadFailures.append( (os.path.basename(testPath), buf.getvalue())) if self.xml_output: import xmlrunner runner = xmlrunner.XMLTestRunner( stream=open(os.path.join(self.xml_output, 'functest.xml'), 'w')) else: runner = unittest.TextTestRunner(verbosity=2) result = runner.run(suite) for testName, error in loadFailures: sys.stderr.write('\n' + '='*70 + '\n') sys.stderr.write( "Failed to load test module %s\n" % testName ) sys.stderr.write(error) sys.stderr.write('\n') if not result.wasSuccessful() or loadFailures: raise TestError( "Functional test suite failed!", ) setup(name='antlr_python_runtime', version='3.4', packages=['antlr3'], author="Benjamin Niemann", author_email="pink@odahoda.de", url="http://www.antlr.org/", download_url="http://www.antlr.org/download.html", license="BSD", description="Runtime package for ANTLR3", long_description=textwrap.dedent('''\ This is the runtime package for ANTLR3, which is required to use parsers generated by ANTLR3. '''), cmdclass={'unittest': unittest, 'functest': functest, 'clean': clean }, ) python-antlr3-3.5.2/pylintrc0000644000175000017500000002222212653072152014537 0ustar zigozigo# lint Python modules using external checkers. # # This is the main checker controling the other ones and the reports # generation. It is itself both a raw checker and an astng checker in order # to: # * handle message activation / deactivation at the module level # * handle some basic but necessary stats'data (number of classes, methods...) # [MASTER] # Specify a configuration file. #rcfile= # Profiled execution. profile=no # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=CVS # Pickle collected data for later comparisons. persistent=yes # Set the cache size for astng objects. cache-size=500 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= [COMMANDS] # Display a help message for the given message id and exit. The value may be a # comma separated list of message ids. #help-msg= [MESSAGES CONTROL] # Enable only checker(s) with the given id(s). This option conflict with the # disable-checker option #enable-checker= # Enable all checker(s) except those with the given id(s). This option conflict # with the disable-checker option #disable-checker= # Enable all messages in the listed categories. #enable-msg-cat= # Disable all messages in the listed categories. #disable-msg-cat= # Enable the message(s) with the given id(s). #enable-msg= # Disable the message(s) with the given id(s). # W0622: Redefining built-in '...' # C0103: Invalid name # R0904: Too many public methods # R0201: Method could be a function # C0302: Too many lines in module # R0902: Too many instance attributes # R0913: Too many arguments # R0912: Too many branches # R0903: To few public methods # C0111: Missing docstring # W0403: Relative import # W0401: Wildcard import # W0142: */** magic disable-msg=W0622, C0103, R0904, R0201, C0302, R0902, R0913, R0912, R0903, C0111, W0403, W0401, W0142 [REPORTS] # set the output format. Available formats are text, parseable, colorized and # html output-format=text # Include message's id in output include-ids=yes # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells wether to display a full report or only the messages reports=yes # Python expression which should return a note less than 10 (10 is the highest # note).You have access to the variables errors warning, statement which # respectivly contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (R0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Add a comment according to your evaluation note. This is used by the global # evaluation report (R0004). comment=no # Enable the report(s) with the given id(s). #enable-report= # Disable the report(s) with the given id(s). #disable-report= # try to find bugs in the code using type inference # [TYPECHECK] # Tells wether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # When zope mode is activated, consider the acquired-members option to ignore # access to some undefined attributes. zope=no # List of members which are usually get through zope's acquisition mecanism and # so shouldn't trigger E0201 when accessed (need zope=yes to be considered). acquired-members=REQUEST,acl_users,aq_parent # checks for # * unused variables / imports # * undefined variables # * redefinition of variable from builtins or from an outer scope # * use of variable before assigment # [VARIABLES] # Tells wether we should check for unused import in __init__ files. init-import=no # A regular expression matching names used for dummy variables (i.e. not used). dummy-variables-rgx=_|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= # checks for : # * doc strings # * modules / classes / functions / methods / arguments / variables name # * number of arguments, local variables, branchs, returns and statements in # functions, methods # * required module attributes # * dangerous default values as arguments # * redefinition of function / method / class # * uses of the global statement # [BASIC] # Required attributes for module, separated by a comma required-attributes= # Regular expression which should only match functions or classes name which do # not require a docstring no-docstring-rgx=__.*__ # Regular expression which should only match correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$ # Regular expression which should only match correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Regular expression which should only match correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct method names method-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct instance attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct list comprehension / # generator expression variable names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_ # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # List of builtins function names that should not be used, separated by a comma bad-functions=map,filter,apply,input # checks for sign of poor/misdesign: # * number of methods, attributes, local variables... # * size, complexity of functions, methods # [DESIGN] # Maximum number of arguments for function / method max-args=5 # Maximum number of locals for function / method body max-locals=15 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body max-branchs=12 # Maximum number of statements in function / method body max-statements=50 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # checks for : # * methods without self as first argument # * overridden methods signature # * access only to existant members via self # * attributes not defined in the __init__ method # * supported interfaces implementation # * unreachable code # [CLASSES] # List of interface methods to ignore, separated by a comma. This is used for # instance to not check methods defines in Zope's Interface base class. ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # checks for # * external modules dependencies # * relative / wildcard imports # * cyclic imports # * uses of deprecated modules # [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,string,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report R0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report R0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report R0402 must # not be disabled) int-import-graph= # checks for similarities and duplicated code. This computation may be # memory / CPU intensive, so you should disable it if you experiments some # problems. # [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # checks for : # * unauthorized constructions # * strict indentation # * line length # * use of <> instead of != # [FORMAT] # Maximum number of characters on a single line. max-line-length=80 # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # checks for: # * warning notes in the code like FIXME, XXX # * PEP 263: source code with non ascii character but no encoding declaration # [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO python-antlr3-3.5.2/hudson-build.sh0000755000175000017500000000360412653072152015707 0ustar zigozigo#!/bin/bash ANTLR_JOB=${1:-ANTLR_Tool} ST_VERSION=3.1 ANTLR2_VERSION=2.7.7 # find the antlr.jar from the upstream project JAR=$(ls $WORKSPACE/../../$ANTLR_JOB/lastSuccessful/org.antlr\$antlr/archive/org.antlr/antlr/*/antlr-*-jar-with-dependencies.jar) echo "antlr.jar=$JAR" if [ ! -f "$JAR" ]; then echo "Could not find antlr.jar" exit 1 fi echo "************************************************************************" echo "Setting up dependencies" echo rm -fr $WORKSPACE/tmp mkdir -p $WORKSPACE/tmp cd $WORKSPACE # stringtemplate3 if [ ! -f stringtemplate3-$ST_VERSION.tar.gz ]; then wget http://pypi.python.org/packages/source/s/stringtemplate3/stringtemplate3-$ST_VERSION.tar.gz fi (cd tmp; tar xzf ../stringtemplate3-$ST_VERSION.tar.gz) (cd tmp/stringtemplate3-$ST_VERSION; python setup.py install --install-lib=$WORKSPACE) # antlr2 if [ ! -f antlr-$ANTLR2_VERSION.tar.gz ]; then wget http://www.antlr2.org/download/antlr-$ANTLR2_VERSION.tar.gz fi (cd tmp; tar xzf ../antlr-$ANTLR2_VERSION.tar.gz) (cd tmp/antlr-$ANTLR2_VERSION/lib/python; python setup.py install --install-lib=$WORKSPACE) export CLASSPATH=$JAR echo "************************************************************************" echo "Running the testsuite" echo cd $WORKSPACE rm -fr testout/ mkdir -p testout/ python setup.py unittest --xml-output=testout/ python setup.py functest --xml-output=testout/ --antlr-jar="$JAR" echo "************************************************************************" echo "Running pylint" echo cd $WORKSPACE pylint --rcfile=pylintrc --output-format=parseable --include-ids=yes antlr3 | tee pylint-report.txt echo "************************************************************************" echo "Building dist files" echo cd $WORKSPACE rm -f dist/* cp -f $JAR dist/ python setup.py sdist --formats=gztar,zip for PYTHON in /usr/bin/python2.?; do $PYTHON setup.py bdist_egg done python-antlr3-3.5.2/mkdoxy.sh0000755000175000017500000000063612653072152014627 0ustar zigozigo#!/bin/bash if [ -e doxygen.sh ]; then . doxygen.sh fi rm -fr build/doc mkdir -p build/doc/antlr3 for f in __init__ exceptions constants dfa tokens streams recognizers; do sed -e '/begin\[licence\]/,/end\[licence\]/d' antlr3/$f.py \ >>build/doc/antlr3.py done touch build/doc/antlr3/__init__.py cp -f antlr3/tree.py build/doc/antlr3 cp -f antlr3/treewizard.py build/doc/antlr3 doxygen doxyfile python-antlr3-3.5.2/MANIFEST.in0000644000175000017500000000004512653072152014505 0ustar zigozigoinclude LICENSE AUTHORS ez_setup.py python-antlr3-3.5.2/xmlrunner.py0000644000175000017500000003037512653072152015364 0ustar zigozigo""" XML Test Runner for PyUnit """ # Written by Sebastian Rittau and placed in # the Public Domain. With contributions by Paolo Borelli. __revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $" import os.path import re import sys import time import traceback import unittest from StringIO import StringIO from xml.sax.saxutils import escape from StringIO import StringIO class _TestInfo(object): """Information about a particular test. Used by _XMLTestResult. """ def __init__(self, test, time): (self._class, self._method) = test.id().rsplit(".", 1) self._time = time self._error = None self._failure = None @staticmethod def create_success(test, time): """Create a _TestInfo instance for a successful test.""" return _TestInfo(test, time) @staticmethod def create_failure(test, time, failure): """Create a _TestInfo instance for a failed test.""" info = _TestInfo(test, time) info._failure = failure return info @staticmethod def create_error(test, time, error): """Create a _TestInfo instance for an erroneous test.""" info = _TestInfo(test, time) info._error = error return info def print_report(self, stream): """Print information about this test case in XML format to the supplied stream. """ stream.write(' ' % \ { "class": self._class, "method": self._method, "time": self._time, }) if self._failure != None: self._print_error(stream, 'failure', self._failure) if self._error != None: self._print_error(stream, 'error', self._error) stream.write('\n') def _print_error(self, stream, tagname, error): """Print information from a failure or error to the supplied stream.""" text = escape(str(error[1])) stream.write('\n') stream.write(' <%s type="%s">%s\n' \ % (tagname, str(error[0]), text)) tb_stream = StringIO() traceback.print_tb(error[2], None, tb_stream) stream.write(escape(tb_stream.getvalue())) stream.write(' \n' % tagname) stream.write(' ') class _XMLTestResult(unittest.TestResult): """A test result class that stores result as XML. Used by XMLTestRunner. """ def __init__(self, classname): unittest.TestResult.__init__(self) self._test_name = classname self._start_time = None self._tests = [] self._error = None self._failure = None def startTest(self, test): unittest.TestResult.startTest(self, test) self._error = None self._failure = None self._start_time = time.time() def stopTest(self, test): time_taken = time.time() - self._start_time unittest.TestResult.stopTest(self, test) if self._error: info = _TestInfo.create_error(test, time_taken, self._error) elif self._failure: info = _TestInfo.create_failure(test, time_taken, self._failure) else: info = _TestInfo.create_success(test, time_taken) self._tests.append(info) def addError(self, test, err): unittest.TestResult.addError(self, test, err) self._error = err def addFailure(self, test, err): unittest.TestResult.addFailure(self, test, err) self._failure = err def print_report(self, stream, time_taken, out, err): """Prints the XML report to the supplied stream. The time the tests took to perform as well as the captured standard output and standard error streams must be passed in.a """ stream.write('\n' % \ { "n": self._test_name, "t": self.testsRun, "time": time_taken, }) for info in self._tests: info.print_report(stream) stream.write(' \n' % out) stream.write(' \n' % err) stream.write('\n') class XMLTestRunner(object): """A test runner that stores results in XML format compatible with JUnit. XMLTestRunner(stream=None) -> XML test runner The XML file is written to the supplied stream. If stream is None, the results are stored in a file called TEST-..xml in the current working directory (if not overridden with the path property), where and are the module and class name of the test class. """ def __init__(self, stream=None): self._stream = stream self._path = "." def run(self, test): """Run the given test case or test suite.""" class_ = test.__class__ classname = class_.__module__ + "." + class_.__name__ if self._stream == None: filename = "TEST-%s.xml" % classname stream = file(os.path.join(self._path, filename), "w") stream.write('\n') else: stream = self._stream result = _XMLTestResult(classname) start_time = time.time() # TODO: Python 2.5: Use the with statement old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = StringIO() sys.stderr = StringIO() try: test(result) try: out_s = sys.stdout.getvalue() except AttributeError: out_s = "" try: err_s = sys.stderr.getvalue() except AttributeError: err_s = "" finally: sys.stdout = old_stdout sys.stderr = old_stderr time_taken = time.time() - start_time result.print_report(stream, time_taken, out_s, err_s) if self._stream == None: stream.close() return result def _set_path(self, path): self._path = path path = property(lambda self: self._path, _set_path, None, """The path where the XML files are stored. This property is ignored when the XML file is written to a file stream.""") class XMLTestRunnerTest(unittest.TestCase): def setUp(self): self._stream = StringIO() def _try_test_run(self, test_class, expected): """Run the test suite against the supplied test class and compare the XML result against the expected XML string. Fail if the expected string doesn't match the actual string. All time attribute in the expected string should have the value "0.000". All error and failure messages are reduced to "Foobar". """ runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(test_class)) got = self._stream.getvalue() # Replace all time="X.YYY" attributes by time="0.000" to enable a # simple string comparison. got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got) # Likewise, replace all failure and error messages by a simple "Foobar" # string. got = re.sub(r'(?s).*?', r'Foobar', got) got = re.sub(r'(?s).*?', r'Foobar', got) self.assertEqual(expected, got) def test_no_tests(self): """Regression test: Check whether a test run without any tests matches a previous run. """ class TestTest(unittest.TestCase): pass self._try_test_run(TestTest, """ """) def test_success(self): """Regression test: Check whether a test run with a successful test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): pass self._try_test_run(TestTest, """ """) def test_failure(self): """Regression test: Check whether a test run with a failing test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): self.assert_(False) self._try_test_run(TestTest, """ Foobar """) def test_error(self): """Regression test: Check whether a test run with a erroneous test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): raise IndexError() self._try_test_run(TestTest, """ Foobar """) def test_stdout_capture(self): """Regression test: Check whether a test run with output to stdout matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): print "Test" self._try_test_run(TestTest, """ """) def test_stderr_capture(self): """Regression test: Check whether a test run with output to stderr matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): print >>sys.stderr, "Test" self._try_test_run(TestTest, """ """) class NullStream(object): """A file-like object that discards everything written to it.""" def write(self, buffer): pass def test_unittests_changing_stdout(self): """Check whether the XMLTestRunner recovers gracefully from unit tests that change stdout, but don't change it back properly. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stdout = XMLTestRunnerTest.NullStream() runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(TestTest)) def test_unittests_changing_stderr(self): """Check whether the XMLTestRunner recovers gracefully from unit tests that change stderr, but don't change it back properly. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stderr = XMLTestRunnerTest.NullStream() runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(TestTest)) class XMLTestProgram(unittest.TestProgram): def runTests(self): if self.testRunner is None: self.testRunner = XMLTestRunner() unittest.TestProgram.runTests(self) main = XMLTestProgram if __name__ == "__main__": main(module=None) python-antlr3-3.5.2/antlr3/0000755000175000017500000000000012653072152014153 5ustar zigozigopython-antlr3-3.5.2/antlr3/dfa.py0000644000175000017500000001671712653072152015273 0ustar zigozigo"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licensc] from antlr3.constants import EOF from antlr3.exceptions import NoViableAltException, BacktrackingFailed class DFA(object): """@brief A DFA implemented as a set of transition tables. Any state that has a semantic predicate edge is special; those states are generated with if-then-else structures in a specialStateTransition() which is generated by cyclicDFA template. """ def __init__( self, recognizer, decisionNumber, eot, eof, min, max, accept, special, transition ): ## Which recognizer encloses this DFA? Needed to check backtracking self.recognizer = recognizer self.decisionNumber = decisionNumber self.eot = eot self.eof = eof self.min = min self.max = max self.accept = accept self.special = special self.transition = transition def predict(self, input): """ From the input stream, predict what alternative will succeed using this DFA (representing the covering regular approximation to the underlying CFL). Return an alternative number 1..n. Throw an exception upon error. """ mark = input.mark() s = 0 # we always start at s0 try: for _ in xrange(50000): #print "***Current state = %d" % s specialState = self.special[s] if specialState >= 0: #print "is special" s = self.specialStateTransition(specialState, input) if s == -1: self.noViableAlt(s, input) return 0 input.consume() continue if self.accept[s] >= 1: #print "accept state for alt %d" % self.accept[s] return self.accept[s] # look for a normal char transition c = input.LA(1) #print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF') #print "range = %d..%d" % (self.min[s], self.max[s]) if c >= self.min[s] and c <= self.max[s]: # move to next state snext = self.transition[s][c-self.min[s]] #print "in range, next state = %d" % snext if snext < 0: #print "not a normal transition" # was in range but not a normal transition # must check EOT, which is like the else clause. # eot[s]>=0 indicates that an EOT edge goes to another # state. if self.eot[s] >= 0: # EOT Transition to accept state? #print "EOT trans to accept state %d" % self.eot[s] s = self.eot[s] input.consume() # TODO: I had this as return accept[eot[s]] # which assumed here that the EOT edge always # went to an accept...faster to do this, but # what about predicated edges coming from EOT # target? continue #print "no viable alt" self.noViableAlt(s, input) return 0 s = snext input.consume() continue if self.eot[s] >= 0: #print "EOT to %d" % self.eot[s] s = self.eot[s] input.consume() continue # EOF Transition to accept state? if c == EOF and self.eof[s] >= 0: #print "EOF Transition to accept state %d" \ # % self.accept[self.eof[s]] return self.accept[self.eof[s]] # not in range and not EOF/EOT, must be invalid symbol self.noViableAlt(s, input) return 0 else: raise RuntimeError("DFA bang!") finally: input.rewind(mark) def noViableAlt(self, s, input): if self.recognizer._state.backtracking > 0: raise BacktrackingFailed nvae = NoViableAltException( self.getDescription(), self.decisionNumber, s, input ) self.error(nvae) raise nvae def error(self, nvae): """A hook for debugging interface""" pass def specialStateTransition(self, s, input): return -1 def getDescription(self): return "n/a" ## def specialTransition(self, state, symbol): ## return 0 def unpack(cls, string): """@brief Unpack the runlength encoded table data. Terence implemented packed table initializers, because Java has a size restriction on .class files and the lookup tables can grow pretty large. The generated JavaLexer.java of the Java.g example would be about 15MB with uncompressed array initializers. Python does not have any size restrictions, but the compilation of such large source files seems to be pretty memory hungry. The memory consumption of the python process grew to >1.5GB when importing a 15MB lexer, eating all my swap space and I was to impacient to see, if it could finish at all. With packed initializers that are unpacked at import time of the lexer module, everything works like a charm. """ ret = [] for i in range(len(string) / 2): (n, v) = ord(string[i*2]), ord(string[i*2+1]) # Is there a bitwise operation to do this? if v == 0xFFFF: v = -1 ret += [v] * n return ret unpack = classmethod(unpack) python-antlr3-3.5.2/antlr3/main.py0000644000175000017500000002146112653072152015455 0ustar zigozigo"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] import sys import optparse import antlr3 class _Main(object): def __init__(self): self.stdin = sys.stdin self.stdout = sys.stdout self.stderr = sys.stderr def parseOptions(self, argv): optParser = optparse.OptionParser() optParser.add_option( "--encoding", action="store", type="string", dest="encoding" ) optParser.add_option( "--input", action="store", type="string", dest="input" ) optParser.add_option( "--interactive", "-i", action="store_true", dest="interactive" ) optParser.add_option( "--no-output", action="store_true", dest="no_output" ) optParser.add_option( "--profile", action="store_true", dest="profile" ) optParser.add_option( "--hotshot", action="store_true", dest="hotshot" ) optParser.add_option( "--port", type="int", dest="port", default=None ) optParser.add_option( "--debug-socket", action='store_true', dest="debug_socket", default=None ) self.setupOptions(optParser) return optParser.parse_args(argv[1:]) def setupOptions(self, optParser): pass def execute(self, argv): options, args = self.parseOptions(argv) self.setUp(options) if options.interactive: while True: try: input = raw_input(">>> ") except (EOFError, KeyboardInterrupt): self.stdout.write("\nBye.\n") break inStream = antlr3.ANTLRStringStream(input) self.parseStream(options, inStream) else: if options.input is not None: inStream = antlr3.ANTLRStringStream(options.input) elif len(args) == 1 and args[0] != '-': inStream = antlr3.ANTLRFileStream( args[0], encoding=options.encoding ) else: inStream = antlr3.ANTLRInputStream( self.stdin, encoding=options.encoding ) if options.profile: try: import cProfile as profile except ImportError: import profile profile.runctx( 'self.parseStream(options, inStream)', globals(), locals(), 'profile.dat' ) import pstats stats = pstats.Stats('profile.dat') stats.strip_dirs() stats.sort_stats('time') stats.print_stats(100) elif options.hotshot: import hotshot profiler = hotshot.Profile('hotshot.dat') profiler.runctx( 'self.parseStream(options, inStream)', globals(), locals() ) else: self.parseStream(options, inStream) def setUp(self, options): pass def parseStream(self, options, inStream): raise NotImplementedError def write(self, options, text): if not options.no_output: self.stdout.write(text) def writeln(self, options, text): self.write(options, text + '\n') class LexerMain(_Main): def __init__(self, lexerClass): _Main.__init__(self) self.lexerClass = lexerClass def parseStream(self, options, inStream): lexer = self.lexerClass(inStream) for token in lexer: self.writeln(options, str(token)) class ParserMain(_Main): def __init__(self, lexerClassName, parserClass): _Main.__init__(self) self.lexerClassName = lexerClassName self.lexerClass = None self.parserClass = parserClass def setupOptions(self, optParser): optParser.add_option( "--lexer", action="store", type="string", dest="lexerClass", default=self.lexerClassName ) optParser.add_option( "--rule", action="store", type="string", dest="parserRule" ) def setUp(self, options): lexerMod = __import__(options.lexerClass) self.lexerClass = getattr(lexerMod, options.lexerClass) def parseStream(self, options, inStream): kwargs = {} if options.port is not None: kwargs['port'] = options.port if options.debug_socket is not None: kwargs['debug_socket'] = sys.stderr lexer = self.lexerClass(inStream) tokenStream = antlr3.CommonTokenStream(lexer) parser = self.parserClass(tokenStream, **kwargs) result = getattr(parser, options.parserRule)() if result is not None: if hasattr(result, 'tree') and result.tree is not None: self.writeln(options, result.tree.toStringTree()) else: self.writeln(options, repr(result)) class WalkerMain(_Main): def __init__(self, walkerClass): _Main.__init__(self) self.lexerClass = None self.parserClass = None self.walkerClass = walkerClass def setupOptions(self, optParser): optParser.add_option( "--lexer", action="store", type="string", dest="lexerClass", default=None ) optParser.add_option( "--parser", action="store", type="string", dest="parserClass", default=None ) optParser.add_option( "--parser-rule", action="store", type="string", dest="parserRule", default=None ) optParser.add_option( "--rule", action="store", type="string", dest="walkerRule" ) def setUp(self, options): lexerMod = __import__(options.lexerClass) self.lexerClass = getattr(lexerMod, options.lexerClass) parserMod = __import__(options.parserClass) self.parserClass = getattr(parserMod, options.parserClass) def parseStream(self, options, inStream): lexer = self.lexerClass(inStream) tokenStream = antlr3.CommonTokenStream(lexer) parser = self.parserClass(tokenStream) result = getattr(parser, options.parserRule)() if result is not None: assert hasattr(result, 'tree'), "Parser did not return an AST" nodeStream = antlr3.tree.CommonTreeNodeStream(result.tree) nodeStream.setTokenStream(tokenStream) walker = self.walkerClass(nodeStream) result = getattr(walker, options.walkerRule)() if result is not None: if hasattr(result, 'tree'): self.writeln(options, result.tree.toStringTree()) else: self.writeln(options, repr(result)) python-antlr3-3.5.2/antlr3/streams.py0000644000175000017500000013030012653072152016200 0ustar zigozigo"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] import codecs from StringIO import StringIO from antlr3.constants import DEFAULT_CHANNEL, EOF from antlr3.tokens import Token, CommonToken ############################################################################ # # basic interfaces # IntStream # +- CharStream # \- TokenStream # # subclasses must implemented all methods # ############################################################################ class IntStream(object): """ @brief Base interface for streams of integer values. A simple stream of integers used when all I care about is the char or token type sequence (such as interpretation). """ def consume(self): raise NotImplementedError def LA(self, i): """Get int at current input pointer + i ahead where i=1 is next int. Negative indexes are allowed. LA(-1) is previous token (token just matched). LA(-i) where i is before first token should yield -1, invalid char / EOF. """ raise NotImplementedError def mark(self): """ Tell the stream to start buffering if it hasn't already. Return current input position, index(), or some other marker so that when passed to rewind() you get back to the same spot. rewind(mark()) should not affect the input cursor. The Lexer track line/col info as well as input index so its markers are not pure input indexes. Same for tree node streams. """ raise NotImplementedError def index(self): """ Return the current input symbol index 0..n where n indicates the last symbol has been read. The index is the symbol about to be read not the most recently read symbol. """ raise NotImplementedError def rewind(self, marker=None): """ Reset the stream so that next call to index would return marker. The marker will usually be index() but it doesn't have to be. It's just a marker to indicate what state the stream was in. This is essentially calling release() and seek(). If there are markers created after this marker argument, this routine must unroll them like a stack. Assume the state the stream was in when this marker was created. If marker is None: Rewind to the input position of the last marker. Used currently only after a cyclic DFA and just before starting a sem/syn predicate to get the input position back to the start of the decision. Do not "pop" the marker off the state. mark(i) and rewind(i) should balance still. It is like invoking rewind(last marker) but it should not "pop" the marker off. It's like seek(last marker's input position). """ raise NotImplementedError def release(self, marker=None): """ You may want to commit to a backtrack but don't want to force the stream to keep bookkeeping objects around for a marker that is no longer necessary. This will have the same behavior as rewind() except it releases resources without the backward seek. This must throw away resources for all markers back to the marker argument. So if you're nested 5 levels of mark(), and then release(2) you have to release resources for depths 2..5. """ raise NotImplementedError def seek(self, index): """ Set the input cursor to the position indicated by index. This is normally used to seek ahead in the input stream. No buffering is required to do this unless you know your stream will use seek to move backwards such as when backtracking. This is different from rewind in its multi-directional requirement and in that its argument is strictly an input cursor (index). For char streams, seeking forward must update the stream state such as line number. For seeking backwards, you will be presumably backtracking using the mark/rewind mechanism that restores state and so this method does not need to update state when seeking backwards. Currently, this method is only used for efficient backtracking using memoization, but in the future it may be used for incremental parsing. The index is 0..n-1. A seek to position i means that LA(1) will return the ith symbol. So, seeking to 0 means LA(1) will return the first element in the stream. """ raise NotImplementedError def size(self): """ Only makes sense for streams that buffer everything up probably, but might be useful to display the entire stream or for testing. This value includes a single EOF. """ raise NotImplementedError def getSourceName(self): """ Where are you getting symbols from? Normally, implementations will pass the buck all the way to the lexer who can ask its input stream for the file name or whatever. """ raise NotImplementedError class CharStream(IntStream): """ @brief A source of characters for an ANTLR lexer. This is an abstract class that must be implemented by a subclass. """ # pylint does not realize that this is an interface, too #pylint: disable-msg=W0223 EOF = -1 def substring(self, start, stop): """ For infinite streams, you don't need this; primarily I'm providing a useful interface for action code. Just make sure actions don't use this on streams that don't support it. """ raise NotImplementedError def LT(self, i): """ Get the ith character of lookahead. This is the same usually as LA(i). This will be used for labels in the generated lexer code. I'd prefer to return a char here type-wise, but it's probably better to be 32-bit clean and be consistent with LA. """ raise NotImplementedError def getLine(self): """ANTLR tracks the line information automatically""" raise NotImplementedError def setLine(self, line): """ Because this stream can rewind, we need to be able to reset the line """ raise NotImplementedError def getCharPositionInLine(self): """ The index of the character relative to the beginning of the line 0..n-1 """ raise NotImplementedError def setCharPositionInLine(self, pos): raise NotImplementedError class TokenStream(IntStream): """ @brief A stream of tokens accessing tokens from a TokenSource This is an abstract class that must be implemented by a subclass. """ # pylint does not realize that this is an interface, too #pylint: disable-msg=W0223 def LT(self, k): """ Get Token at current input pointer + i ahead where i=1 is next Token. i<0 indicates tokens in the past. So -1 is previous token and -2 is two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken. Return null for LT(0) and any index that results in an absolute address that is negative. """ raise NotImplementedError def range(self): """ How far ahead has the stream been asked to look? The return value is a valid index from 0..n-1. """ raise NotImplementedError def get(self, i): """ Get a token at an absolute index i; 0..n-1. This is really only needed for profiling and debugging and token stream rewriting. If you don't want to buffer up tokens, then this method makes no sense for you. Naturally you can't use the rewrite stream feature. I believe DebugTokenStream can easily be altered to not use this method, removing the dependency. """ raise NotImplementedError def getTokenSource(self): """ Where is this stream pulling tokens from? This is not the name, but the object that provides Token objects. """ raise NotImplementedError def toString(self, start=None, stop=None): """ Return the text of all tokens from start to stop, inclusive. If the stream does not buffer all the tokens then it can just return "" or null; Users should not access $ruleLabel.text in an action of course in that case. Because the user is not required to use a token with an index stored in it, we must provide a means for two token objects themselves to indicate the start/end location. Most often this will just delegate to the other toString(int,int). This is also parallel with the TreeNodeStream.toString(Object,Object). """ raise NotImplementedError ############################################################################ # # character streams for use in lexers # CharStream # \- ANTLRStringStream # ############################################################################ class ANTLRStringStream(CharStream): """ @brief CharStream that pull data from a unicode string. A pretty quick CharStream that pulls all data from an array directly. Every method call counts in the lexer. """ def __init__(self, data): """ @param data This should be a unicode string holding the data you want to parse. If you pass in a byte string, the Lexer will choke on non-ascii data. """ CharStream.__init__(self) # The data being scanned self.strdata = unicode(data) self.data = [ord(c) for c in self.strdata] # How many characters are actually in the buffer self.n = len(data) # 0..n-1 index into string of next char self.p = 0 # line number 1..n within the input self.line = 1 # The index of the character relative to the beginning of the # line 0..n-1 self.charPositionInLine = 0 # A list of CharStreamState objects that tracks the stream state # values line, charPositionInLine, and p that can change as you # move through the input stream. Indexed from 0..markDepth-1. self._markers = [ ] self.lastMarker = None self.markDepth = 0 # What is name or source of this char stream? self.name = None def reset(self): """ Reset the stream so that it's in the same state it was when the object was created *except* the data array is not touched. """ self.p = 0 self.line = 1 self.charPositionInLine = 0 self._markers = [ ] def consume(self): try: if self.data[self.p] == 10: # \n self.line += 1 self.charPositionInLine = 0 else: self.charPositionInLine += 1 self.p += 1 except IndexError: # happend when we reached EOF and self.data[self.p] fails # just do nothing pass def LA(self, i): if i == 0: return 0 # undefined if i < 0: i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1] try: return self.data[self.p+i-1] except IndexError: return EOF def LT(self, i): if i == 0: return 0 # undefined if i < 0: i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1] try: return self.strdata[self.p+i-1] except IndexError: return EOF def index(self): """ Return the current input symbol index 0..n where n indicates the last symbol has been read. The index is the index of char to be returned from LA(1). """ return self.p def size(self): return self.n def mark(self): state = (self.p, self.line, self.charPositionInLine) try: self._markers[self.markDepth] = state except IndexError: self._markers.append(state) self.markDepth += 1 self.lastMarker = self.markDepth return self.lastMarker def rewind(self, marker=None): if marker is None: marker = self.lastMarker p, line, charPositionInLine = self._markers[marker-1] self.seek(p) self.line = line self.charPositionInLine = charPositionInLine self.release(marker) def release(self, marker=None): if marker is None: marker = self.lastMarker self.markDepth = marker-1 def seek(self, index): """ consume() ahead until p==index; can't just set p=index as we must update line and charPositionInLine. """ if index <= self.p: self.p = index # just jump; don't update stream state (line, ...) return # seek forward, consume until p hits index while self.p < index: self.consume() def substring(self, start, stop): return self.strdata[start:stop+1] def getLine(self): """Using setter/getter methods is deprecated. Use o.line instead.""" return self.line def getCharPositionInLine(self): """ Using setter/getter methods is deprecated. Use o.charPositionInLine instead. """ return self.charPositionInLine def setLine(self, line): """Using setter/getter methods is deprecated. Use o.line instead.""" self.line = line def setCharPositionInLine(self, pos): """ Using setter/getter methods is deprecated. Use o.charPositionInLine instead. """ self.charPositionInLine = pos def getSourceName(self): return self.name class ANTLRFileStream(ANTLRStringStream): """ @brief CharStream that opens a file to read the data. This is a char buffer stream that is loaded from a file all at once when you construct the object. """ def __init__(self, fileName, encoding=None): """ @param fileName The path to the file to be opened. The file will be opened with mode 'rb'. @param encoding If you set the optional encoding argument, then the data will be decoded on the fly. """ self.fileName = fileName fp = codecs.open(fileName, 'rb', encoding) try: data = fp.read() finally: fp.close() ANTLRStringStream.__init__(self, data) def getSourceName(self): """Deprecated, access o.fileName directly.""" return self.fileName class ANTLRInputStream(ANTLRStringStream): """ @brief CharStream that reads data from a file-like object. This is a char buffer stream that is loaded from a file like object all at once when you construct the object. All input is consumed from the file, but it is not closed. """ def __init__(self, file, encoding=None): """ @param file A file-like object holding your input. Only the read() method must be implemented. @param encoding If you set the optional encoding argument, then the data will be decoded on the fly. """ if encoding is not None: # wrap input in a decoding reader reader = codecs.lookup(encoding)[2] file = reader(file) data = file.read() ANTLRStringStream.__init__(self, data) # I guess the ANTLR prefix exists only to avoid a name clash with some Java # mumbojumbo. A plain "StringStream" looks better to me, which should be # the preferred name in Python. StringStream = ANTLRStringStream FileStream = ANTLRFileStream InputStream = ANTLRInputStream ############################################################################ # # Token streams # TokenStream # +- CommonTokenStream # \- TokenRewriteStream # ############################################################################ class CommonTokenStream(TokenStream): """ @brief The most common stream of tokens The most common stream of tokens is one where every token is buffered up and tokens are prefiltered for a certain channel (the parser will only see these tokens and cannot change the filter channel number during the parse). """ def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): """ @param tokenSource A TokenSource instance (usually a Lexer) to pull the tokens from. @param channel Skip tokens on any channel but this one; this is how we skip whitespace... """ TokenStream.__init__(self) self.tokenSource = tokenSource # Record every single token pulled from the source so we can reproduce # chunks of it later. self.tokens = [] # Map to override some Tokens' channel numbers self.channelOverrideMap = {} # Set; discard any tokens with this type self.discardSet = set() # Skip tokens on any channel but this one; this is how we skip # whitespace... self.channel = channel # By default, track all incoming tokens self.discardOffChannelTokens = False # The index into the tokens list of the current token (next token # to consume). p==-1 indicates that the tokens list is empty self.p = -1 # Remember last marked position self.lastMarker = None # how deep have we gone? self._range = -1 def makeEOFToken(self): return self.tokenSource.makeEOFToken() def setTokenSource(self, tokenSource): """Reset this token stream by setting its token source.""" self.tokenSource = tokenSource self.tokens = [] self.p = -1 self.channel = DEFAULT_CHANNEL def reset(self): self.p = 0 self.lastMarker = None def fillBuffer(self): """ Load all tokens from the token source and put in tokens. This is done upon first LT request because you might want to set some token type / channel overrides before filling buffer. """ index = 0 t = self.tokenSource.nextToken() while t is not None and t.type != EOF: discard = False if self.discardSet is not None and t.type in self.discardSet: discard = True elif self.discardOffChannelTokens and t.channel != self.channel: discard = True # is there a channel override for token type? try: overrideChannel = self.channelOverrideMap[t.type] except KeyError: # no override for this type pass else: if overrideChannel == self.channel: t.channel = overrideChannel else: discard = True if not discard: t.index = index self.tokens.append(t) index += 1 t = self.tokenSource.nextToken() # leave p pointing at first token on channel self.p = 0 self.p = self.skipOffTokenChannels(self.p) def consume(self): """ Move the input pointer to the next incoming token. The stream must become active with LT(1) available. consume() simply moves the input pointer so that LT(1) points at the next input symbol. Consume at least one token. Walk past any token not on the channel the parser is listening to. """ if self.p < len(self.tokens): self.p += 1 self.p = self.skipOffTokenChannels(self.p) # leave p on valid token def skipOffTokenChannels(self, i): """ Given a starting index, return the index of the first on-channel token. """ try: while self.tokens[i].channel != self.channel: i += 1 except IndexError: # hit the end of token stream pass return i def skipOffTokenChannelsReverse(self, i): while i >= 0 and self.tokens[i].channel != self.channel: i -= 1 return i def setTokenTypeChannel(self, ttype, channel): """ A simple filter mechanism whereby you can tell this token stream to force all tokens of type ttype to be on channel. For example, when interpreting, we cannot exec actions so we need to tell the stream to force all WS and NEWLINE to be a different, ignored channel. """ self.channelOverrideMap[ttype] = channel def discardTokenType(self, ttype): self.discardSet.add(ttype) def getTokens(self, start=None, stop=None, types=None): """ Given a start and stop index, return a list of all tokens in the token type set. Return None if no tokens were found. This method looks at both on and off channel tokens. """ if self.p == -1: self.fillBuffer() if stop is None or stop > len(self.tokens): stop = len(self.tokens) if start is None or stop < 0: start = 0 if start > stop: return None if isinstance(types, (int, long)): # called with a single type, wrap into set types = set([types]) filteredTokens = [ token for token in self.tokens[start:stop] if types is None or token.type in types ] if len(filteredTokens) == 0: return None return filteredTokens def LT(self, k): """ Get the ith token from the current position 1..n where k=1 is the first symbol of lookahead. """ if self.p == -1: self.fillBuffer() if k == 0: return None if k < 0: return self.LB(-k) i = self.p n = 1 # find k good tokens while n < k: # skip off-channel tokens i = self.skipOffTokenChannels(i+1) # leave p on valid token n += 1 if i > self._range: self._range = i try: return self.tokens[i] except IndexError: return self.makeEOFToken() def LB(self, k): """Look backwards k tokens on-channel tokens""" if self.p == -1: self.fillBuffer() if k == 0: return None if self.p - k < 0: return None i = self.p n = 1 # find k good tokens looking backwards while n <= k: # skip off-channel tokens i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token n += 1 if i < 0: return None return self.tokens[i] def get(self, i): """ Return absolute token i; ignore which channel the tokens are on; that is, count all tokens not just on-channel tokens. """ return self.tokens[i] def slice(self, start, stop): if self.p == -1: self.fillBuffer() if start < 0 or stop < 0: return None return self.tokens[start:stop+1] def LA(self, i): return self.LT(i).type def mark(self): self.lastMarker = self.index() return self.lastMarker def release(self, marker=None): # no resources to release pass def size(self): return len(self.tokens) def range(self): return self._range def index(self): return self.p def rewind(self, marker=None): if marker is None: marker = self.lastMarker self.seek(marker) def seek(self, index): self.p = index def getTokenSource(self): return self.tokenSource def getSourceName(self): return self.tokenSource.getSourceName() def toString(self, start=None, stop=None): if self.p == -1: self.fillBuffer() if start is None: start = 0 elif not isinstance(start, int): start = start.index if stop is None: stop = len(self.tokens) - 1 elif not isinstance(stop, int): stop = stop.index if stop >= len(self.tokens): stop = len(self.tokens) - 1 return ''.join([t.text for t in self.tokens[start:stop+1]]) class RewriteOperation(object): """@brief Internal helper class.""" def __init__(self, stream, index, text): self.stream = stream # What index into rewrites List are we? self.instructionIndex = None # Token buffer index. self.index = index self.text = text def execute(self, buf): """Execute the rewrite operation by possibly adding to the buffer. Return the index of the next token to operate on. """ return self.index def toString(self): opName = self.__class__.__name__ return '<%s@%d:"%s">' % ( opName, self.index, self.text) __str__ = toString __repr__ = toString class InsertBeforeOp(RewriteOperation): """@brief Internal helper class.""" def execute(self, buf): buf.write(self.text) if self.stream.tokens[self.index].type != EOF: buf.write(self.stream.tokens[self.index].text) return self.index + 1 class ReplaceOp(RewriteOperation): """ @brief Internal helper class. I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp instructions. """ def __init__(self, stream, first, last, text): RewriteOperation.__init__(self, stream, first, text) self.lastIndex = last def execute(self, buf): if self.text is not None: buf.write(self.text) return self.lastIndex + 1 def toString(self): if self.text is None: return '' % (self.index, self.lastIndex) return '' % ( self.index, self.lastIndex, self.text) __str__ = toString __repr__ = toString class TokenRewriteStream(CommonTokenStream): """@brief CommonTokenStream that can be modified. Useful for dumping out the input stream after doing some augmentation or other manipulations. You can insert stuff, replace, and delete chunks. Note that the operations are done lazily--only if you convert the buffer to a String. This is very efficient because you are not moving data around all the time. As the buffer of tokens is converted to strings, the toString() method(s) check to see if there is an operation at the current index. If so, the operation is done and then normal String rendering continues on the buffer. This is like having multiple Turing machine instruction streams (programs) operating on a single input tape. :) Since the operations are done lazily at toString-time, operations do not screw up the token index values. That is, an insert operation at token index i does not change the index values for tokens i+1..n-1. Because operations never actually alter the buffer, you may always get the original token stream back without undoing anything. Since the instructions are queued up, you can easily simulate transactions and roll back any changes if there is an error just by removing instructions. For example, CharStream input = new ANTLRFileStream("input"); TLexer lex = new TLexer(input); TokenRewriteStream tokens = new TokenRewriteStream(lex); T parser = new T(tokens); parser.startRule(); Then in the rules, you can execute Token t,u; ... input.insertAfter(t, "text to put after t");} input.insertAfter(u, "text after u");} System.out.println(tokens.toString()); Actually, you have to cast the 'input' to a TokenRewriteStream. :( You can also have multiple "instruction streams" and get multiple rewrites from a single pass over the input. Just name the instruction streams and use that name again when printing the buffer. This could be useful for generating a C file and also its header file--all from the same buffer: tokens.insertAfter("pass1", t, "text to put after t");} tokens.insertAfter("pass2", u, "text after u");} System.out.println(tokens.toString("pass1")); System.out.println(tokens.toString("pass2")); If you don't use named rewrite streams, a "default" stream is used as the first example shows. """ DEFAULT_PROGRAM_NAME = "default" MIN_TOKEN_INDEX = 0 def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL): CommonTokenStream.__init__(self, tokenSource, channel) # You may have multiple, named streams of rewrite operations. # I'm calling these things "programs." # Maps String (name) -> rewrite (List) self.programs = {} self.programs[self.DEFAULT_PROGRAM_NAME] = [] # Map String (program name) -> Integer index self.lastRewriteTokenIndexes = {} def rollback(self, *args): """ Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED! """ if len(args) == 2: programName = args[0] instructionIndex = args[1] elif len(args) == 1: programName = self.DEFAULT_PROGRAM_NAME instructionIndex = args[0] else: raise TypeError("Invalid arguments") p = self.programs.get(programName, None) if p is not None: self.programs[programName] = ( p[self.MIN_TOKEN_INDEX:instructionIndex]) def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME): """Reset the program so that no instructions exist""" self.rollback(programName, self.MIN_TOKEN_INDEX) def insertAfter(self, *args): if len(args) == 2: programName = self.DEFAULT_PROGRAM_NAME index = args[0] text = args[1] elif len(args) == 3: programName = args[0] index = args[1] text = args[2] else: raise TypeError("Invalid arguments") if isinstance(index, Token): # index is a Token, grap the stream index from it index = index.index # to insert after, just insert before next index (even if past end) self.insertBefore(programName, index+1, text) def insertBefore(self, *args): if len(args) == 2: programName = self.DEFAULT_PROGRAM_NAME index = args[0] text = args[1] elif len(args) == 3: programName = args[0] index = args[1] text = args[2] else: raise TypeError("Invalid arguments") if isinstance(index, Token): # index is a Token, grap the stream index from it index = index.index op = InsertBeforeOp(self, index, text) rewrites = self.getProgram(programName) op.instructionIndex = len(rewrites) rewrites.append(op) def replace(self, *args): if len(args) == 2: programName = self.DEFAULT_PROGRAM_NAME first = args[0] last = args[0] text = args[1] elif len(args) == 3: programName = self.DEFAULT_PROGRAM_NAME first = args[0] last = args[1] text = args[2] elif len(args) == 4: programName = args[0] first = args[1] last = args[2] text = args[3] else: raise TypeError("Invalid arguments") if isinstance(first, Token): # first is a Token, grap the stream index from it first = first.index if isinstance(last, Token): # last is a Token, grap the stream index from it last = last.index if first > last or first < 0 or last < 0 or last >= len(self.tokens): raise ValueError( "replace: range invalid: %d..%d (size=%d)" % (first, last, len(self.tokens))) op = ReplaceOp(self, first, last, text) rewrites = self.getProgram(programName) op.instructionIndex = len(rewrites) rewrites.append(op) def delete(self, *args): self.replace(*(list(args) + [None])) def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME): return self.lastRewriteTokenIndexes.get(programName, -1) def setLastRewriteTokenIndex(self, programName, i): self.lastRewriteTokenIndexes[programName] = i def getProgram(self, name): p = self.programs.get(name, None) if p is None: p = self.initializeProgram(name) return p def initializeProgram(self, name): p = [] self.programs[name] = p return p def toOriginalString(self, start=None, end=None): if self.p == -1: self.fillBuffer() if start is None: start = self.MIN_TOKEN_INDEX if end is None: end = self.size() - 1 buf = StringIO() i = start while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens): if self.get(i).type != EOF: buf.write(self.get(i).text) i += 1 return buf.getvalue() def toString(self, *args): if self.p == -1: self.fillBuffer() if len(args) == 0: programName = self.DEFAULT_PROGRAM_NAME start = self.MIN_TOKEN_INDEX end = self.size() - 1 elif len(args) == 1: programName = args[0] start = self.MIN_TOKEN_INDEX end = self.size() - 1 elif len(args) == 2: programName = self.DEFAULT_PROGRAM_NAME start = args[0] end = args[1] if start is None: start = self.MIN_TOKEN_INDEX elif not isinstance(start, int): start = start.index if end is None: end = len(self.tokens) - 1 elif not isinstance(end, int): end = end.index # ensure start/end are in range if end >= len(self.tokens): end = len(self.tokens) - 1 if start < 0: start = 0 rewrites = self.programs.get(programName) if rewrites is None or len(rewrites) == 0: # no instructions to execute return self.toOriginalString(start, end) buf = StringIO() # First, optimize instruction stream indexToOp = self.reduceToSingleOperationPerIndex(rewrites) # Walk buffer, executing instructions and emitting tokens i = start while i <= end and i < len(self.tokens): op = indexToOp.get(i) # remove so any left have index size-1 try: del indexToOp[i] except KeyError: pass t = self.tokens[i] if op is None: # no operation at that index, just dump token if t.type != EOF: buf.write(t.text) i += 1 # move to next token else: i = op.execute(buf) # execute operation and skip # include stuff after end if it's last index in buffer # So, if they did an insertAfter(lastValidIndex, "foo"), include # foo if end==lastValidIndex. if end == len(self.tokens) - 1: # Scan any remaining operations after last token # should be included (they will be inserts). for i in sorted(indexToOp.keys()): op = indexToOp[i] if op.index >= len(self.tokens)-1: buf.write(op.text) return buf.getvalue() __str__ = toString def reduceToSingleOperationPerIndex(self, rewrites): """ We need to combine operations and report invalid operations (like overlapping replaces that are not completed nested). Inserts to same index need to be combined etc... Here are the cases: I.i.u I.j.v leave alone, nonoverlapping I.i.u I.i.v combine: Iivu R.i-j.u R.x-y.v | i-j in x-y delete first R R.i-j.u R.i-j.v delete first R R.i-j.u R.x-y.v | x-y in i-j ERROR R.i-j.u R.x-y.v | boundaries overlap ERROR Delete special case of replace (text==null): D.i-j.u D.x-y.v | boundaries overlapcombine to max(min)..max(right) I.i.u R.x-y.v | i in (x+1)-ydelete I (since insert before we're not deleting i) I.i.u R.x-y.v | i not in (x+1)-yleave alone, nonoverlapping R.x-y.v I.i.u | i in x-y ERROR R.x-y.v I.x.u R.x-y.uv (combine, delete I) R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping I.i.u = insert u before op @ index i R.x-y.u = replace x-y indexed tokens with u First we need to examine replaces. For any replace op: 1. wipe out any insertions before op within that range. 2. Drop any replace op before that is contained completely within that range. 3. Throw exception upon boundary overlap with any previous replace. Then we can deal with inserts: 1. for any inserts to same index, combine even if not adjacent. 2. for any prior replace with same left boundary, combine this insert with replace and delete this replace. 3. throw exception if index in same range as previous replace Don't actually delete; make op null in list. Easier to walk list. Later we can throw as we add to index -> op map. Note that I.2 R.2-2 will wipe out I.2 even though, technically, the inserted stuff would be before the replace range. But, if you add tokens in front of a method body '{' and then delete the method body, I think the stuff before the '{' you added should disappear too. Return a map from token index to operation. """ # WALK REPLACES for i, rop in enumerate(rewrites): if rop is None: continue if not isinstance(rop, ReplaceOp): continue # Wipe prior inserts within range for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i): if iop.index == rop.index: # E.g., insert before 2, delete 2..2; update replace # text to include insert before, kill insert rewrites[iop.instructionIndex] = None rop.text = self.catOpText(iop.text, rop.text) elif iop.index > rop.index and iop.index <= rop.lastIndex: # delete insert as it's a no-op. rewrites[j] = None # Drop any prior replaces contained within for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i): if (prevRop.index >= rop.index and prevRop.lastIndex <= rop.lastIndex): # delete replace as it's a no-op. rewrites[j] = None continue # throw exception unless disjoint or identical disjoint = (prevRop.lastIndex < rop.index or prevRop.index > rop.lastIndex) same = (prevRop.index == rop.index and prevRop.lastIndex == rop.lastIndex) # Delete special case of replace (text==null): # D.i-j.u D.x-y.v| boundaries overlapcombine to # max(min)..max(right) if prevRop.text is None and rop.text is None and not disjoint: # kill first delete rewrites[prevRop.instructionIndex] = None rop.index = min(prevRop.index, rop.index) rop.lastIndex = max(prevRop.lastIndex, rop.lastIndex) elif not disjoint and not same: raise ValueError( "replace op boundaries of %s overlap with previous %s" % (rop, prevRop)) # WALK INSERTS for i, iop in enumerate(rewrites): if iop is None: continue if not isinstance(iop, InsertBeforeOp): continue # combine current insert with prior if any at same index for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i): if prevIop.index == iop.index: # combine objects # convert to strings...we're in process of toString'ing # whole token buffer so no lazy eval issue with any # templates iop.text = self.catOpText(iop.text, prevIop.text) # delete redundant prior insert rewrites[j] = None # look for replaces where iop.index is in range; error for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i): if iop.index == rop.index: rop.text = self.catOpText(iop.text, rop.text) # delete current insert rewrites[i] = None continue if iop.index >= rop.index and iop.index <= rop.lastIndex: raise ValueError( "insert op %s within boundaries of previous %s" % (iop, rop)) m = {} for i, op in enumerate(rewrites): if op is None: # ignore deleted ops continue assert op.index not in m, "should only be one op per index" m[op.index] = op return m def catOpText(self, a, b): x = "" y = "" if a is not None: x = a if b is not None: y = b return x + y def getKindOfOps(self, rewrites, kind, before=None): """Get all operations before an index of a particular kind.""" if before is None: before = len(rewrites) elif before > len(rewrites): before = len(rewrites) for i, op in enumerate(rewrites[:before]): if op is None: # ignore deleted continue if op.__class__ == kind: yield i, op def toDebugString(self, start=None, end=None): if start is None: start = self.MIN_TOKEN_INDEX if end is None: end = self.size() - 1 buf = StringIO() i = start while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens): buf.write(self.get(i)) i += 1 return buf.getvalue() python-antlr3-3.5.2/antlr3/dottreegen.py0000644000175000017500000001550312653072152016671 0ustar zigozigo""" @package antlr3.dottreegenerator @brief ANTLR3 runtime package, tree module This module contains all support classes for AST construction and tree parsers. """ # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] # lot's of docstrings are missing, don't complain for now... # pylint: disable-msg=C0111 from antlr3.tree import CommonTreeAdaptor import stringtemplate3 class DOTTreeGenerator(object): """ A utility class to generate DOT diagrams (graphviz) from arbitrary trees. You can pass in your own templates and can pass in any kind of tree or use Tree interface method. """ _treeST = stringtemplate3.StringTemplate( template=( "digraph {\n" + " ordering=out;\n" + " ranksep=.4;\n" + " node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" + " width=.25, height=.25];\n" + " edge [arrowsize=.5]\n" + " $nodes$\n" + " $edges$\n" + "}\n") ) _nodeST = stringtemplate3.StringTemplate( template="$name$ [label=\"$text$\"];\n" ) _edgeST = stringtemplate3.StringTemplate( template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n" ) def __init__(self): ## Track node to number mapping so we can get proper node name back self.nodeToNumberMap = {} ## Track node number so we can get unique node names self.nodeNumber = 0 def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST): if adaptor is None: adaptor = CommonTreeAdaptor() treeST = treeST.getInstanceOf() self.nodeNumber = 0 self.toDOTDefineNodes(tree, adaptor, treeST) self.nodeNumber = 0 self.toDOTDefineEdges(tree, adaptor, treeST, edgeST) return treeST def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None): if knownNodes is None: knownNodes = set() if tree is None: return n = adaptor.getChildCount(tree) if n == 0: # must have already dumped as child from previous # invocation; do nothing return # define parent node number = self.getNodeNumber(tree) if number not in knownNodes: parentNodeST = self.getNodeST(adaptor, tree) treeST.setAttribute("nodes", parentNodeST) knownNodes.add(number) # for each child, do a " [label=text]" node def for i in range(n): child = adaptor.getChild(tree, i) number = self.getNodeNumber(child) if number not in knownNodes: nodeST = self.getNodeST(adaptor, child) treeST.setAttribute("nodes", nodeST) knownNodes.add(number) self.toDOTDefineNodes(child, adaptor, treeST, knownNodes) def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST): if tree is None: return n = adaptor.getChildCount(tree) if n == 0: # must have already dumped as child from previous # invocation; do nothing return parentName = "n%d" % self.getNodeNumber(tree) # for each child, do a parent -> child edge using unique node names parentText = adaptor.getText(tree) for i in range(n): child = adaptor.getChild(tree, i) childText = adaptor.getText(child) childName = "n%d" % self.getNodeNumber(child) edgeST = edgeST.getInstanceOf() edgeST.setAttribute("parent", parentName) edgeST.setAttribute("child", childName) edgeST.setAttribute("parentText", parentText) edgeST.setAttribute("childText", childText) treeST.setAttribute("edges", edgeST) self.toDOTDefineEdges(child, adaptor, treeST, edgeST) def getNodeST(self, adaptor, t): text = adaptor.getText(t) nodeST = self._nodeST.getInstanceOf() uniqueName = "n%d" % self.getNodeNumber(t) nodeST.setAttribute("name", uniqueName) if text is not None: text = text.replace('"', r'\"') nodeST.setAttribute("text", text) return nodeST def getNodeNumber(self, t): try: return self.nodeToNumberMap[t] except KeyError: self.nodeToNumberMap[t] = self.nodeNumber self.nodeNumber += 1 return self.nodeNumber - 1 def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST): """ Generate DOT (graphviz) for a whole tree not just a node. For example, 3+4*5 should generate: digraph { node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier", width=.4, height=.2]; edge [arrowsize=.7] "+"->3 "+"->"*" "*"->4 "*"->5 } Return the ST not a string in case people want to alter. Takes a Tree interface object. Example of invokation: import antlr3 import antlr3.extras input = antlr3.ANTLRInputStream(sys.stdin) lex = TLexer(input) tokens = antlr3.CommonTokenStream(lex) parser = TParser(tokens) tree = parser.e().tree print tree.toStringTree() st = antlr3.extras.toDOT(t) print st """ gen = DOTTreeGenerator() return gen.toDOT(tree, adaptor, treeST, edgeST) python-antlr3-3.5.2/antlr3/tokens.py0000644000175000017500000002726712653072152016046 0ustar zigozigo"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE ############################################################################ # # basic token interface # ############################################################################ class Token(object): """@brief Abstract token baseclass.""" def getText(self): """@brief Get the text of the token. Using setter/getter methods is deprecated. Use o.text instead. """ raise NotImplementedError def setText(self, text): """@brief Set the text of the token. Using setter/getter methods is deprecated. Use o.text instead. """ raise NotImplementedError def getType(self): """@brief Get the type of the token. Using setter/getter methods is deprecated. Use o.type instead.""" raise NotImplementedError def setType(self, ttype): """@brief Get the type of the token. Using setter/getter methods is deprecated. Use o.type instead.""" raise NotImplementedError def getLine(self): """@brief Get the line number on which this token was matched Lines are numbered 1..n Using setter/getter methods is deprecated. Use o.line instead.""" raise NotImplementedError def setLine(self, line): """@brief Set the line number on which this token was matched Using setter/getter methods is deprecated. Use o.line instead.""" raise NotImplementedError def getCharPositionInLine(self): """@brief Get the column of the tokens first character, Columns are numbered 0..n-1 Using setter/getter methods is deprecated. Use o.charPositionInLine instead.""" raise NotImplementedError def setCharPositionInLine(self, pos): """@brief Set the column of the tokens first character, Using setter/getter methods is deprecated. Use o.charPositionInLine instead.""" raise NotImplementedError def getChannel(self): """@brief Get the channel of the token Using setter/getter methods is deprecated. Use o.channel instead.""" raise NotImplementedError def setChannel(self, channel): """@brief Set the channel of the token Using setter/getter methods is deprecated. Use o.channel instead.""" raise NotImplementedError def getTokenIndex(self): """@brief Get the index in the input stream. An index from 0..n-1 of the token object in the input stream. This must be valid in order to use the ANTLRWorks debugger. Using setter/getter methods is deprecated. Use o.index instead.""" raise NotImplementedError def setTokenIndex(self, index): """@brief Set the index in the input stream. Using setter/getter methods is deprecated. Use o.index instead.""" raise NotImplementedError def getInputStream(self): """@brief From what character stream was this token created. You don't have to implement but it's nice to know where a Token comes from if you have include files etc... on the input.""" raise NotImplementedError def setInputStream(self, input): """@brief From what character stream was this token created. You don't have to implement but it's nice to know where a Token comes from if you have include files etc... on the input.""" raise NotImplementedError ############################################################################ # # token implementations # # Token # +- CommonToken # \- ClassicToken # ############################################################################ class CommonToken(Token): """@brief Basic token implementation. This implementation does not copy the text from the input stream upon creation, but keeps start/stop pointers into the stream to avoid unnecessary copy operations. """ def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None, input=None, start=None, stop=None, oldToken=None): Token.__init__(self) if oldToken is not None: self.type = oldToken.type self.line = oldToken.line self.charPositionInLine = oldToken.charPositionInLine self.channel = oldToken.channel self.index = oldToken.index self._text = oldToken._text self.input = oldToken.input if isinstance(oldToken, CommonToken): self.start = oldToken.start self.stop = oldToken.stop else: self.type = type self.input = input self.charPositionInLine = -1 # set to invalid position self.line = 0 self.channel = channel #What token number is this from 0..n-1 tokens; < 0 implies invalid index self.index = -1 # We need to be able to change the text once in a while. If # this is non-null, then getText should return this. Note that # start/stop are not affected by changing this. self._text = text # The char position into the input buffer where this token starts self.start = start # The char position into the input buffer where this token stops # This is the index of the last char, *not* the index after it! self.stop = stop def getText(self): if self._text is not None: return self._text if self.input is None: return None if self.start < self.input.size() and self.stop < self.input.size(): return self.input.substring(self.start, self.stop) return '' def setText(self, text): """ Override the text for this token. getText() will return this text rather than pulling from the buffer. Note that this does not mean that start/stop indexes are not valid. It means that that input was converted to a new string in the token object. """ self._text = text text = property(getText, setText) def getType(self): return self.type def setType(self, ttype): self.type = ttype def getTypeName(self): return str(self.type) typeName = property(lambda s: s.getTypeName()) def getLine(self): return self.line def setLine(self, line): self.line = line def getCharPositionInLine(self): return self.charPositionInLine def setCharPositionInLine(self, pos): self.charPositionInLine = pos def getChannel(self): return self.channel def setChannel(self, channel): self.channel = channel def getTokenIndex(self): return self.index def setTokenIndex(self, index): self.index = index def getInputStream(self): return self.input def setInputStream(self, input): self.input = input def __str__(self): if self.type == EOF: return "" channelStr = "" if self.channel > 0: channelStr = ",channel=" + str(self.channel) txt = self.text if txt is not None: txt = txt.replace("\n","\\\\n") txt = txt.replace("\r","\\\\r") txt = txt.replace("\t","\\\\t") else: txt = "" return "[@%d,%d:%d=%r,<%s>%s,%d:%d]" % ( self.index, self.start, self.stop, txt, self.typeName, channelStr, self.line, self.charPositionInLine ) class ClassicToken(Token): """@brief Alternative token implementation. A Token object like we'd use in ANTLR 2.x; has an actual string created and associated with this object. These objects are needed for imaginary tree nodes that have payload objects. We need to create a Token object that has a string; the tree node will point at this token. CommonToken has indexes into a char stream and hence cannot be used to introduce new strings. """ def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL, oldToken=None ): Token.__init__(self) if oldToken is not None: self.text = oldToken.text self.type = oldToken.type self.line = oldToken.line self.charPositionInLine = oldToken.charPositionInLine self.channel = oldToken.channel self.text = text self.type = type self.line = None self.charPositionInLine = None self.channel = channel self.index = None def getText(self): return self.text def setText(self, text): self.text = text def getType(self): return self.type def setType(self, ttype): self.type = ttype def getLine(self): return self.line def setLine(self, line): self.line = line def getCharPositionInLine(self): return self.charPositionInLine def setCharPositionInLine(self, pos): self.charPositionInLine = pos def getChannel(self): return self.channel def setChannel(self, channel): self.channel = channel def getTokenIndex(self): return self.index def setTokenIndex(self, index): self.index = index def getInputStream(self): return None def setInputStream(self, input): pass def toString(self): channelStr = "" if self.channel > 0: channelStr = ",channel=" + str(self.channel) txt = self.text if txt is None: txt = "" return "[@%r,%r,<%r>%s,%r:%r]" % (self.index, txt, self.type, channelStr, self.line, self.charPositionInLine ) __str__ = toString __repr__ = toString INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) # In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR # will avoid creating a token for this symbol and try to fetch another. SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) python-antlr3-3.5.2/antlr3/extras.py0000644000175000017500000000357112653072152016041 0ustar zigozigo""" @package antlr3.dottreegenerator @brief ANTLR3 runtime package, tree module This module contains all support classes for AST construction and tree parsers. """ # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] # lot's of docstrings are missing, don't complain for now... # pylint: disable-msg=C0111 from treewizard import TreeWizard try: from antlr3.dottreegen import toDOT except ImportError, exc: def toDOT(*args, **kwargs): raise exc python-antlr3-3.5.2/antlr3/debug.py0000644000175000017500000010165112653072152015617 0ustar zigozigo# begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2009 Terence Parr # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] import socket from antlr3 import Parser, TokenStream, RecognitionException, Token from antlr3.tree import CommonTreeAdaptor, TreeAdaptor, Tree class DebugParser(Parser): def __init__(self, stream, state=None, dbg=None, *args, **kwargs): # wrap token stream in DebugTokenStream (unless user already did so). if not isinstance(stream, DebugTokenStream): stream = DebugTokenStream(stream, dbg) super(DebugParser, self).__init__(stream, state, *args, **kwargs) # Who to notify when events in the parser occur. self._dbg = None self.setDebugListener(dbg) def setDebugListener(self, dbg): """Provide a new debug event listener for this parser. Notify the input stream too that it should send events to this listener. """ if hasattr(self.input, 'dbg'): self.input.dbg = dbg self._dbg = dbg def getDebugListener(self): return self._dbg dbg = property(getDebugListener, setDebugListener) def beginResync(self): self._dbg.beginResync() def endResync(self): self._dbg.endResync() def beginBacktrack(self, level): self._dbg.beginBacktrack(level) def endBacktrack(self, level, successful): self._dbg.endBacktrack(level,successful) def reportError(self, exc): Parser.reportError(self, exc) if isinstance(exc, RecognitionException): self._dbg.recognitionException(exc) class DebugTokenStream(TokenStream): def __init__(self, input, dbg=None): self.input = input self.initialStreamState = True # Track the last mark() call result value for use in rewind(). self.lastMarker = None self._dbg = None self.setDebugListener(dbg) # force TokenStream to get at least first valid token # so we know if there are any hidden tokens first in the stream self.input.LT(1) def getDebugListener(self): return self._dbg def setDebugListener(self, dbg): self._dbg = dbg dbg = property(getDebugListener, setDebugListener) def consume(self): if self.initialStreamState: self.consumeInitialHiddenTokens() a = self.input.index() t = self.input.LT(1) self.input.consume() b = self.input.index() self._dbg.consumeToken(t) if b > a+1: # then we consumed more than one token; must be off channel tokens for idx in range(a+1, b): self._dbg.consumeHiddenToken(self.input.get(idx)); def consumeInitialHiddenTokens(self): """consume all initial off-channel tokens""" firstOnChannelTokenIndex = self.input.index() for idx in range(firstOnChannelTokenIndex): self._dbg.consumeHiddenToken(self.input.get(idx)) self.initialStreamState = False def LT(self, i): if self.initialStreamState: self.consumeInitialHiddenTokens() t = self.input.LT(i) self._dbg.LT(i, t) return t def LA(self, i): if self.initialStreamState: self.consumeInitialHiddenTokens() t = self.input.LT(i) self._dbg.LT(i, t) return t.type def get(self, i): return self.input.get(i) def index(self): return self.input.index() def mark(self): self.lastMarker = self.input.mark() self._dbg.mark(self.lastMarker) return self.lastMarker def rewind(self, marker=None): self._dbg.rewind(marker) self.input.rewind(marker) def release(self, marker): pass def seek(self, index): # TODO: implement seek in dbg interface # self._dbg.seek(index); self.input.seek(index) def size(self): return self.input.size() def getTokenSource(self): return self.input.getTokenSource() def getSourceName(self): return self.getTokenSource().getSourceName() def toString(self, start=None, stop=None): return self.input.toString(start, stop) class DebugTreeAdaptor(TreeAdaptor): """A TreeAdaptor proxy that fires debugging events to a DebugEventListener delegate and uses the TreeAdaptor delegate to do the actual work. All AST events are triggered by this adaptor; no code gen changes are needed in generated rules. Debugging events are triggered *after* invoking tree adaptor routines. Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})" cannot be tracked as they might not use the adaptor to create foo, bar. The debug listener has to deal with tree node IDs for which it did not see a createNode event. A single node is sufficient even if it represents a whole tree. """ def __init__(self, dbg, adaptor): self.dbg = dbg self.adaptor = adaptor def createWithPayload(self, payload): if payload.getTokenIndex() < 0: # could be token conjured up during error recovery return self.createFromType(payload.getType(), payload.getText()) node = self.adaptor.createWithPayload(payload) self.dbg.createNode(node, payload) return node def createFromToken(self, tokenType, fromToken, text=None): node = self.adaptor.createFromToken(tokenType, fromToken, text) self.dbg.createNode(node) return node def createFromType(self, tokenType, text): node = self.adaptor.createFromType(tokenType, text) self.dbg.createNode(node) return node def errorNode(self, input, start, stop, exc): node = selfadaptor.errorNode(input, start, stop, exc) if node is not None: dbg.errorNode(node) return node def dupTree(self, tree): t = self.adaptor.dupTree(tree) # walk the tree and emit create and add child events # to simulate what dupTree has done. dupTree does not call this debug # adapter so I must simulate. self.simulateTreeConstruction(t) return t def simulateTreeConstruction(self, t): """^(A B C): emit create A, create B, add child, ...""" self.dbg.createNode(t) for i in range(self.adaptor.getChildCount(t)): child = self.adaptor.getChild(t, i) self.simulateTreeConstruction(child) self.dbg.addChild(t, child) def dupNode(self, treeNode): d = self.adaptor.dupNode(treeNode) self.dbg.createNode(d) return d def nil(self): node = self.adaptor.nil() self.dbg.nilNode(node) return node def isNil(self, tree): return self.adaptor.isNil(tree) def addChild(self, t, child): if isinstance(child, Token): n = self.createWithPayload(child) self.addChild(t, n) else: if t is None or child is None: return self.adaptor.addChild(t, child) self.dbg.addChild(t, child) def becomeRoot(self, newRoot, oldRoot): if isinstance(newRoot, Token): n = self.createWithPayload(newRoot) self.adaptor.becomeRoot(n, oldRoot) else: n = self.adaptor.becomeRoot(newRoot, oldRoot) self.dbg.becomeRoot(newRoot, oldRoot) return n def rulePostProcessing(self, root): return self.adaptor.rulePostProcessing(root) def getType(self, t): return self.adaptor.getType(t) def setType(self, t, type): self.adaptor.setType(t, type) def getText(self, t): return self.adaptor.getText(t) def setText(self, t, text): self.adaptor.setText(t, text) def getToken(self, t): return self.adaptor.getToken(t) def setTokenBoundaries(self, t, startToken, stopToken): self.adaptor.setTokenBoundaries(t, startToken, stopToken) if t is not None and startToken is not None and stopToken is not None: self.dbg.setTokenBoundaries( t, startToken.getTokenIndex(), stopToken.getTokenIndex()) def getTokenStartIndex(self, t): return self.adaptor.getTokenStartIndex(t) def getTokenStopIndex(self, t): return self.adaptor.getTokenStopIndex(t) def getChild(self, t, i): return self.adaptor.getChild(t, i) def setChild(self, t, i, child): self.adaptor.setChild(t, i, child) def deleteChild(self, t, i): return self.adaptor.deleteChild(t, i) def getChildCount(self, t): return self.adaptor.getChildCount(t) def getUniqueID(self, node): return self.adaptor.getUniqueID(node) def getParent(self, t): return self.adaptor.getParent(t) def getChildIndex(self, t): return self.adaptor.getChildIndex(t) def setParent(self, t, parent): self.adaptor.setParent(t, parent) def setChildIndex(self, t, index): self.adaptor.setChildIndex(t, index) def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): self.adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t) ## support def getDebugListener(self): return dbg def setDebugListener(self, dbg): self.dbg = dbg def getTreeAdaptor(self): return self.adaptor class DebugEventListener(object): """All debugging events that a recognizer can trigger. I did not create a separate AST debugging interface as it would create lots of extra classes and DebugParser has a dbg var defined, which makes it hard to change to ASTDebugEventListener. I looked hard at this issue and it is easier to understand as one monolithic event interface for all possible events. Hopefully, adding ST debugging stuff won't be bad. Leave for future. 4/26/2006. """ # Moved to version 2 for v3.1: added grammar name to enter/exit Rule PROTOCOL_VERSION = "2" def enterRule(self, grammarFileName, ruleName): """The parser has just entered a rule. No decision has been made about which alt is predicted. This is fired AFTER init actions have been executed. Attributes are defined and available etc... The grammarFileName allows composite grammars to jump around among multiple grammar files. """ pass def enterAlt(self, alt): """Because rules can have lots of alternatives, it is very useful to know which alt you are entering. This is 1..n for n alts. """ pass def exitRule(self, grammarFileName, ruleName): """This is the last thing executed before leaving a rule. It is executed even if an exception is thrown. This is triggered after error reporting and recovery have occurred (unless the exception is not caught in this rule). This implies an "exitAlt" event. The grammarFileName allows composite grammars to jump around among multiple grammar files. """ pass def enterSubRule(self, decisionNumber): """Track entry into any (...) subrule other EBNF construct""" pass def exitSubRule(self, decisionNumber): pass def enterDecision(self, decisionNumber, couldBacktrack): """Every decision, fixed k or arbitrary, has an enter/exit event so that a GUI can easily track what LT/consume events are associated with prediction. You will see a single enter/exit subrule but multiple enter/exit decision events, one for each loop iteration. """ pass def exitDecision(self, decisionNumber): pass def consumeToken(self, t): """An input token was consumed; matched by any kind of element. Trigger after the token was matched by things like match(), matchAny(). """ pass def consumeHiddenToken(self, t): """An off-channel input token was consumed. Trigger after the token was matched by things like match(), matchAny(). (unless of course the hidden token is first stuff in the input stream). """ pass def LT(self, i, t): """Somebody (anybody) looked ahead. Note that this actually gets triggered by both LA and LT calls. The debugger will want to know which Token object was examined. Like consumeToken, this indicates what token was seen at that depth. A remote debugger cannot look ahead into a file it doesn't have so LT events must pass the token even if the info is redundant. """ pass def mark(self, marker): """The parser is going to look arbitrarily ahead; mark this location, the token stream's marker is sent in case you need it. """ pass def rewind(self, marker=None): """After an arbitrairly long lookahead as with a cyclic DFA (or with any backtrack), this informs the debugger that stream should be rewound to the position associated with marker. """ pass def beginBacktrack(self, level): pass def endBacktrack(self, level, successful): pass def location(self, line, pos): """To watch a parser move through the grammar, the parser needs to inform the debugger what line/charPos it is passing in the grammar. For now, this does not know how to switch from one grammar to the other and back for island grammars etc... This should also allow breakpoints because the debugger can stop the parser whenever it hits this line/pos. """ pass def recognitionException(self, e): """A recognition exception occurred such as NoViableAltException. I made this a generic event so that I can alter the exception hierachy later without having to alter all the debug objects. Upon error, the stack of enter rule/subrule must be properly unwound. If no viable alt occurs it is within an enter/exit decision, which also must be rewound. Even the rewind for each mark must be unwount. In the Java target this is pretty easy using try/finally, if a bit ugly in the generated code. The rewind is generated in DFA.predict() actually so no code needs to be generated for that. For languages w/o this "finally" feature (C++?), the target implementor will have to build an event stack or something. Across a socket for remote debugging, only the RecognitionException data fields are transmitted. The token object or whatever that caused the problem was the last object referenced by LT. The immediately preceding LT event should hold the unexpected Token or char. Here is a sample event trace for grammar: b : C ({;}A|B) // {;} is there to prevent A|B becoming a set | D ; The sequence for this rule (with no viable alt in the subrule) for input 'c c' (there are 3 tokens) is: commence LT(1) enterRule b location 7 1 enter decision 3 LT(1) exit decision 3 enterAlt1 location 7 5 LT(1) consumeToken [c/<4>,1:0] location 7 7 enterSubRule 2 enter decision 2 LT(1) LT(1) recognitionException NoViableAltException 2 1 2 exit decision 2 exitSubRule 2 beginResync LT(1) consumeToken [c/<4>,1:1] LT(1) endResync LT(-1) exitRule b terminate """ pass def beginResync(self): """Indicates the recognizer is about to consume tokens to resynchronize the parser. Any consume events from here until the recovered event are not part of the parse--they are dead tokens. """ pass def endResync(self): """Indicates that the recognizer has finished consuming tokens in order to resychronize. There may be multiple beginResync/endResync pairs before the recognizer comes out of errorRecovery mode (in which multiple errors are suppressed). This will be useful in a gui where you want to probably grey out tokens that are consumed but not matched to anything in grammar. Anything between a beginResync/endResync pair was tossed out by the parser. """ pass def semanticPredicate(self, result, predicate): """A semantic predicate was evaluate with this result and action text""" pass def commence(self): """Announce that parsing has begun. Not technically useful except for sending events over a socket. A GUI for example will launch a thread to connect and communicate with a remote parser. The thread will want to notify the GUI when a connection is made. ANTLR parsers trigger this upon entry to the first rule (the ruleLevel is used to figure this out). """ pass def terminate(self): """Parsing is over; successfully or not. Mostly useful for telling remote debugging listeners that it's time to quit. When the rule invocation level goes to zero at the end of a rule, we are done parsing. """ pass ## T r e e P a r s i n g def consumeNode(self, t): """Input for a tree parser is an AST, but we know nothing for sure about a node except its type and text (obtained from the adaptor). This is the analog of the consumeToken method. Again, the ID is the hashCode usually of the node so it only works if hashCode is not implemented. If the type is UP or DOWN, then the ID is not really meaningful as it's fixed--there is just one UP node and one DOWN navigation node. """ pass def LT(self, i, t): """The tree parser lookedahead. If the type is UP or DOWN, then the ID is not really meaningful as it's fixed--there is just one UP node and one DOWN navigation node. """ pass ## A S T E v e n t s def nilNode(self, t): """A nil was created (even nil nodes have a unique ID... they are not "null" per se). As of 4/28/2006, this seems to be uniquely triggered when starting a new subtree such as when entering a subrule in automatic mode and when building a tree in rewrite mode. If you are receiving this event over a socket via RemoteDebugEventSocketListener then only t.ID is set. """ pass def errorNode(self, t): """Upon syntax error, recognizers bracket the error with an error node if they are building ASTs. """ pass def createNode(self, node, token=None): """Announce a new node built from token elements such as type etc... If you are receiving this event over a socket via RemoteDebugEventSocketListener then only t.ID, type, text are set. """ pass def becomeRoot(self, newRoot, oldRoot): """Make a node the new root of an existing root. Note: the newRootID parameter is possibly different than the TreeAdaptor.becomeRoot() newRoot parameter. In our case, it will always be the result of calling TreeAdaptor.becomeRoot() and not root_n or whatever. The listener should assume that this event occurs only when the current subrule (or rule) subtree is being reset to newRootID. If you are receiving this event over a socket via RemoteDebugEventSocketListener then only IDs are set. @see antlr3.tree.TreeAdaptor.becomeRoot() """ pass def addChild(self, root, child): """Make childID a child of rootID. If you are receiving this event over a socket via RemoteDebugEventSocketListener then only IDs are set. @see antlr3.tree.TreeAdaptor.addChild() """ pass def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): """Set the token start/stop token index for a subtree root or node. If you are receiving this event over a socket via RemoteDebugEventSocketListener then only t.ID is set. """ pass class BlankDebugEventListener(DebugEventListener): """A blank listener that does nothing; useful for real classes so they don't have to have lots of blank methods and are less sensitive to updates to debug interface. Note: this class is identical to DebugEventListener and exists purely for compatibility with Java. """ pass class TraceDebugEventListener(DebugEventListener): """A listener that simply records text representations of the events. Useful for debugging the debugging facility ;) Subclasses can override the record() method (which defaults to printing to stdout) to record the events in a different way. """ def __init__(self, adaptor=None): super(TraceDebugEventListener, self).__init__() if adaptor is None: adaptor = CommonTreeAdaptor() self.adaptor = adaptor def record(self, event): sys.stdout.write(event + '\n') def enterRule(self, grammarFileName, ruleName): self.record("enterRule "+ruleName) def exitRule(self, grammarFileName, ruleName): self.record("exitRule "+ruleName) def enterSubRule(self, decisionNumber): self.record("enterSubRule") def exitSubRule(self, decisionNumber): self.record("exitSubRule") def location(self, line, pos): self.record("location %s:%s" % (line, pos)) ## Tree parsing stuff def consumeNode(self, t): self.record("consumeNode %s %s %s" % ( self.adaptor.getUniqueID(t), self.adaptor.getText(t), self.adaptor.getType(t))) def LT(self, i, t): self.record("LT %s %s %s %s" % ( i, self.adaptor.getUniqueID(t), self.adaptor.getText(t), self.adaptor.getType(t))) ## AST stuff def nilNode(self, t): self.record("nilNode %s" % self.adaptor.getUniqueID(t)) def createNode(self, t, token=None): if token is None: self.record("create %s: %s, %s" % ( self.adaptor.getUniqueID(t), self.adaptor.getText(t), self.adaptor.getType(t))) else: self.record("create %s: %s" % ( self.adaptor.getUniqueID(t), token.getTokenIndex())) def becomeRoot(self, newRoot, oldRoot): self.record("becomeRoot %s, %s" % ( self.adaptor.getUniqueID(newRoot), self.adaptor.getUniqueID(oldRoot))) def addChild(self, root, child): self.record("addChild %s, %s" % ( self.adaptor.getUniqueID(root), self.adaptor.getUniqueID(child))) def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): self.record("setTokenBoundaries %s, %s, %s" % ( self.adaptor.getUniqueID(t), tokenStartIndex, tokenStopIndex)) class RecordDebugEventListener(TraceDebugEventListener): """A listener that records events as strings in an array.""" def __init__(self, adaptor=None): super(RecordDebugEventListener, self).__init__(adaptor) self.events = [] def record(self, event): self.events.append(event) class DebugEventSocketProxy(DebugEventListener): """A proxy debug event listener that forwards events over a socket to a debugger (or any other listener) using a simple text-based protocol; one event per line. ANTLRWorks listens on server socket with a RemoteDebugEventSocketListener instance. These two objects must therefore be kept in sync. New events must be handled on both sides of socket. """ DEFAULT_DEBUGGER_PORT = 49100 def __init__(self, recognizer, adaptor=None, port=None, debug=None): super(DebugEventSocketProxy, self).__init__() self.grammarFileName = recognizer.getGrammarFileName() # Almost certainly the recognizer will have adaptor set, but # we don't know how to cast it (Parser or TreeParser) to get # the adaptor field. Must be set with a constructor. :( self.adaptor = adaptor self.port = port or self.DEFAULT_DEBUGGER_PORT self.debug = debug self.socket = None self.connection = None self.input = None self.output = None def log(self, msg): if self.debug is not None: self.debug.write(msg + '\n') def handshake(self): if self.socket is None: # create listening socket self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(('', self.port)) self.socket.listen(1) self.log("Waiting for incoming connection on port %d" % self.port) # wait for an incoming connection self.connection, addr = self.socket.accept() self.log("Accepted connection from %s:%d" % addr) self.connection.setblocking(1) self.connection.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) # FIXME(pink): wrap into utf8 encoding stream self.output = self.connection.makefile('w', 0) self.input = self.connection.makefile('r', 0) self.write("ANTLR %s" % self.PROTOCOL_VERSION) self.write("grammar \"%s" % self.grammarFileName) self.ack() def write(self, msg): self.log("> %s" % msg) self.output.write("%s\n" % msg) self.output.flush() def ack(self): t = self.input.readline() self.log("< %s" % t.rstrip()) def transmit(self, event): self.write(event); self.ack(); def commence(self): # don't bother sending event; listener will trigger upon connection pass def terminate(self): self.transmit("terminate") self.output.close() self.input.close() self.connection.close() self.socket.close() def enterRule(self, grammarFileName, ruleName): self.transmit("enterRule\t%s\t%s" % (grammarFileName, ruleName)) def enterAlt(self, alt): self.transmit("enterAlt\t%d" % alt) def exitRule(self, grammarFileName, ruleName): self.transmit("exitRule\t%s\t%s" % (grammarFileName, ruleName)) def enterSubRule(self, decisionNumber): self.transmit("enterSubRule\t%d" % decisionNumber) def exitSubRule(self, decisionNumber): self.transmit("exitSubRule\t%d" % decisionNumber) def enterDecision(self, decisionNumber, couldBacktrack): self.transmit( "enterDecision\t%d\t%d" % (decisionNumber, couldBacktrack)) def exitDecision(self, decisionNumber): self.transmit("exitDecision\t%d" % decisionNumber) def consumeToken(self, t): self.transmit("consumeToken\t%s" % self.serializeToken(t)) def consumeHiddenToken(self, t): self.transmit("consumeHiddenToken\t%s" % self.serializeToken(t)) def LT(self, i, o): if isinstance(o, Tree): return self.LT_tree(i, o) return self.LT_token(i, o) def LT_token(self, i, t): if t is not None: self.transmit("LT\t%d\t%s" % (i, self.serializeToken(t))) def mark(self, i): self.transmit("mark\t%d" % i) def rewind(self, i=None): if i is not None: self.transmit("rewind\t%d" % i) else: self.transmit("rewind") def beginBacktrack(self, level): self.transmit("beginBacktrack\t%d" % level) def endBacktrack(self, level, successful): self.transmit("endBacktrack\t%d\t%s" % ( level, ['0', '1'][bool(successful)])) def location(self, line, pos): self.transmit("location\t%d\t%d" % (line, pos)) def recognitionException(self, exc): self.transmit('\t'.join([ "exception", exc.__class__.__name__, str(int(exc.index)), str(int(exc.line)), str(int(exc.charPositionInLine))])) def beginResync(self): self.transmit("beginResync") def endResync(self): self.transmit("endResync") def semanticPredicate(self, result, predicate): self.transmit('\t'.join([ "semanticPredicate", str(int(result)), self.escapeNewlines(predicate)])) ## A S T P a r s i n g E v e n t s def consumeNode(self, t): FIXME(31) # StringBuffer buf = new StringBuffer(50); # buf.append("consumeNode"); # serializeNode(buf, t); # transmit(buf.toString()); def LT_tree(self, i, t): FIXME(34) # int ID = adaptor.getUniqueID(t); # String text = adaptor.getText(t); # int type = adaptor.getType(t); # StringBuffer buf = new StringBuffer(50); # buf.append("LN\t"); // lookahead node; distinguish from LT in protocol # buf.append(i); # serializeNode(buf, t); # transmit(buf.toString()); def serializeNode(self, buf, t): FIXME(33) # int ID = adaptor.getUniqueID(t); # String text = adaptor.getText(t); # int type = adaptor.getType(t); # buf.append("\t"); # buf.append(ID); # buf.append("\t"); # buf.append(type); # Token token = adaptor.getToken(t); # int line = -1; # int pos = -1; # if ( token!=null ) { # line = token.getLine(); # pos = token.getCharPositionInLine(); # } # buf.append("\t"); # buf.append(line); # buf.append("\t"); # buf.append(pos); # int tokenIndex = adaptor.getTokenStartIndex(t); # buf.append("\t"); # buf.append(tokenIndex); # serializeText(buf, text); ## A S T E v e n t s def nilNode(self, t): self.transmit("nilNode\t%d" % self.adaptor.getUniqueID(t)) def errorNode(self, t): self.transmit("errorNode\t%d\t%d\t\"%s" % ( self.adaptor.getUniqueID(t), Token.INVALID_TOKEN_TYPE, self.escapeNewlines(t.toString()))) def createNode(self, node, token=None): if token is not None: self.transmit("createNode\t%d\t%d" % ( self.adaptor.getUniqueID(node), token.getTokenIndex())) else: self.transmit("createNodeFromTokenElements\t%d\t%d\t\"%s" % ( self.adaptor.getUniqueID(node), self.adaptor.getType(node), self.adaptor.getText(node))) def becomeRoot(self, newRoot, oldRoot): self.transmit("becomeRoot\t%d\t%d" % ( self.adaptor.getUniqueID(newRoot), self.adaptor.getUniqueID(oldRoot))) def addChild(self, root, child): self.transmit("addChild\t%d\t%d" % ( self.adaptor.getUniqueID(root), self.adaptor.getUniqueID(child))) def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex): self.transmit("setTokenBoundaries\t%d\t%d\t%d" % ( self.adaptor.getUniqueID(t), tokenStartIndex, tokenStopIndex)) ## support def setTreeAdaptor(self, adaptor): self.adaptor = adaptor def getTreeAdaptor(self): return self.adaptor def serializeToken(self, t): buf = [str(int(t.getTokenIndex())), str(int(t.getType())), str(int(t.getChannel())), str(int(t.getLine() or 0)), str(int(t.getCharPositionInLine() or 0)), '\"' + self.escapeNewlines(t.getText())] return '\t'.join(buf) def escapeNewlines(self, txt): if txt is None: return '' txt = txt.replace("%","%25") # escape all escape char ;) txt = txt.replace("\n","%0A") # escape \n txt = txt.replace("\r","%0D") # escape \r return txt python-antlr3-3.5.2/antlr3/compat.py0000644000175000017500000000336012653072152016012 0ustar zigozigo"""Compatibility stuff""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] try: set = set frozenset = frozenset except NameError: from sets import Set as set, ImmutableSet as frozenset try: reversed = reversed except NameError: def reversed(l): l = l[:] l.reverse() return l python-antlr3-3.5.2/antlr3/tree.py0000644000175000017500000023724112653072152015475 0ustar zigozigo""" @package antlr3.tree @brief ANTLR3 runtime package, tree module This module contains all support classes for AST construction and tree parsers. """ # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] # lot's of docstrings are missing, don't complain for now... # pylint: disable-msg=C0111 import re from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE from antlr3.recognizers import BaseRecognizer, RuleReturnScope from antlr3.streams import IntStream from antlr3.tokens import CommonToken, Token, INVALID_TOKEN from antlr3.exceptions import MismatchedTreeNodeException, \ MissingTokenException, UnwantedTokenException, MismatchedTokenException, \ NoViableAltException ############################################################################ # # tree related exceptions # ############################################################################ class RewriteCardinalityException(RuntimeError): """ @brief Base class for all exceptions thrown during AST rewrite construction. This signifies a case where the cardinality of two or more elements in a subrule are different: (ID INT)+ where |ID|!=|INT| """ def __init__(self, elementDescription): RuntimeError.__init__(self, elementDescription) self.elementDescription = elementDescription def getMessage(self): return self.elementDescription class RewriteEarlyExitException(RewriteCardinalityException): """@brief No elements within a (...)+ in a rewrite rule""" def __init__(self, elementDescription=None): RewriteCardinalityException.__init__(self, elementDescription) class RewriteEmptyStreamException(RewriteCardinalityException): """ @brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream """ pass ############################################################################ # # basic Tree and TreeAdaptor interfaces # ############################################################################ class Tree(object): """ @brief Abstract baseclass for tree nodes. What does a tree look like? ANTLR has a number of support classes such as CommonTreeNodeStream that work on these kinds of trees. You don't have to make your trees implement this interface, but if you do, you'll be able to use more support code. NOTE: When constructing trees, ANTLR can build any kind of tree; it can even use Token objects as trees if you add a child list to your tokens. This is a tree node without any payload; just navigation and factory stuff. """ def getChild(self, i): raise NotImplementedError def getChildCount(self): raise NotImplementedError def getParent(self): """Tree tracks parent and child index now > 3.0""" raise NotImplementedError def setParent(self, t): """Tree tracks parent and child index now > 3.0""" raise NotImplementedError def hasAncestor(self, ttype): """Walk upwards looking for ancestor with this token type.""" raise NotImplementedError def getAncestor(self, ttype): """Walk upwards and get first ancestor with this token type.""" raise NotImplementedError def getAncestors(self): """Return a list of all ancestors of this node. The first node of list is the root and the last is the parent of this node. """ raise NotImplementedError def getChildIndex(self): """This node is what child index? 0..n-1""" raise NotImplementedError def setChildIndex(self, index): """This node is what child index? 0..n-1""" raise NotImplementedError def freshenParentAndChildIndexes(self): """Set the parent and child index values for all children""" raise NotImplementedError def addChild(self, t): """ Add t as a child to this node. If t is null, do nothing. If t is nil, add all children of t to this' children. """ raise NotImplementedError def setChild(self, i, t): """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" raise NotImplementedError def deleteChild(self, i): raise NotImplementedError def replaceChildren(self, startChildIndex, stopChildIndex, t): """ Delete children from start to stop and replace with t even if t is a list (nil-root tree). num of children can increase or decrease. For huge child lists, inserting children can force walking rest of children to set their childindex; could be slow. """ raise NotImplementedError def isNil(self): """ Indicates the node is a nil node but may still have children, meaning the tree is a flat list. """ raise NotImplementedError def getTokenStartIndex(self): """ What is the smallest token index (indexing from 0) for this node and its children? """ raise NotImplementedError def setTokenStartIndex(self, index): raise NotImplementedError def getTokenStopIndex(self): """ What is the largest token index (indexing from 0) for this node and its children? """ raise NotImplementedError def setTokenStopIndex(self, index): raise NotImplementedError def dupNode(self): raise NotImplementedError def getType(self): """Return a token type; needed for tree parsing.""" raise NotImplementedError def getText(self): raise NotImplementedError def getLine(self): """ In case we don't have a token payload, what is the line for errors? """ raise NotImplementedError def getCharPositionInLine(self): raise NotImplementedError def toStringTree(self): raise NotImplementedError def toString(self): raise NotImplementedError class TreeAdaptor(object): """ @brief Abstract baseclass for tree adaptors. How to create and navigate trees. Rather than have a separate factory and adaptor, I've merged them. Makes sense to encapsulate. This takes the place of the tree construction code generated in the generated code in 2.x and the ASTFactory. I do not need to know the type of a tree at all so they are all generic Objects. This may increase the amount of typecasting needed. :( """ # C o n s t r u c t i o n def createWithPayload(self, payload): """ Create a tree node from Token object; for CommonTree type trees, then the token just becomes the payload. This is the most common create call. Override if you want another kind of node to be built. """ raise NotImplementedError def dupNode(self, treeNode): """Duplicate a single tree node. Override if you want another kind of node to be built.""" raise NotImplementedError def dupTree(self, tree): """Duplicate tree recursively, using dupNode() for each node""" raise NotImplementedError def nil(self): """ Return a nil node (an empty but non-null node) that can hold a list of element as the children. If you want a flat tree (a list) use "t=adaptor.nil(); t.addChild(x); t.addChild(y);" """ raise NotImplementedError def errorNode(self, input, start, stop, exc): """ Return a tree node representing an error. This node records the tokens consumed during error recovery. The start token indicates the input symbol at which the error was detected. The stop token indicates the last symbol consumed during recovery. You must specify the input stream so that the erroneous text can be packaged up in the error node. The exception could be useful to some applications; default implementation stores ptr to it in the CommonErrorNode. This only makes sense during token parsing, not tree parsing. Tree parsing should happen only when parsing and tree construction succeed. """ raise NotImplementedError def isNil(self, tree): """Is tree considered a nil node used to make lists of child nodes?""" raise NotImplementedError def addChild(self, t, child): """ Add a child to the tree t. If child is a flat tree (a list), make all in list children of t. Warning: if t has no children, but child does and child isNil then you can decide it is ok to move children to t via t.children = child.children; i.e., without copying the array. Just make sure that this is consistent with have the user will build ASTs. Do nothing if t or child is null. """ raise NotImplementedError def becomeRoot(self, newRoot, oldRoot): """ If oldRoot is a nil root, just copy or move the children to newRoot. If not a nil root, make oldRoot a child of newRoot. old=^(nil a b c), new=r yields ^(r a b c) old=^(a b c), new=r yields ^(r ^(a b c)) If newRoot is a nil-rooted single child tree, use the single child as the new root node. old=^(nil a b c), new=^(nil r) yields ^(r a b c) old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) If oldRoot was null, it's ok, just return newRoot (even if isNil). old=null, new=r yields r old=null, new=^(nil r) yields ^(nil r) Return newRoot. Throw an exception if newRoot is not a simple node or nil root with a single child node--it must be a root node. If newRoot is ^(nil x) return x as newRoot. Be advised that it's ok for newRoot to point at oldRoot's children; i.e., you don't have to copy the list. We are constructing these nodes so we should have this control for efficiency. """ raise NotImplementedError def rulePostProcessing(self, root): """ Given the root of the subtree created for this rule, post process it to do any simplifications or whatever you want. A required behavior is to convert ^(nil singleSubtree) to singleSubtree as the setting of start/stop indexes relies on a single non-nil root for non-flat trees. Flat trees such as for lists like "idlist : ID+ ;" are left alone unless there is only one ID. For a list, the start/stop indexes are set in the nil node. This method is executed after all rule tree construction and right before setTokenBoundaries(). """ raise NotImplementedError def getUniqueID(self, node): """For identifying trees. How to identify nodes so we can say "add node to a prior node"? Even becomeRoot is an issue. Use System.identityHashCode(node) usually. """ raise NotImplementedError # R e w r i t e R u l e s def createFromToken(self, tokenType, fromToken, text=None): """ Create a new node derived from a token, with a new token type and (optionally) new text. This is invoked from an imaginary node ref on right side of a rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"]. This should invoke createToken(Token). """ raise NotImplementedError def createFromType(self, tokenType, text): """Create a new node derived from a token, with a new token type. This is invoked from an imaginary node ref on right side of a rewrite rule as IMAG["IMAG"]. This should invoke createToken(int,String). """ raise NotImplementedError # C o n t e n t def getType(self, t): """For tree parsing, I need to know the token type of a node""" raise NotImplementedError def setType(self, t, type): """Node constructors can set the type of a node""" raise NotImplementedError def getText(self, t): raise NotImplementedError def setText(self, t, text): """Node constructors can set the text of a node""" raise NotImplementedError def getToken(self, t): """Return the token object from which this node was created. Currently used only for printing an error message. The error display routine in BaseRecognizer needs to display where the input the error occurred. If your tree of limitation does not store information that can lead you to the token, you can create a token filled with the appropriate information and pass that back. See BaseRecognizer.getErrorMessage(). """ raise NotImplementedError def setTokenBoundaries(self, t, startToken, stopToken): """ Where are the bounds in the input token stream for this node and all children? Each rule that creates AST nodes will call this method right before returning. Flat trees (i.e., lists) will still usually have a nil root node just to hold the children list. That node would contain the start/stop indexes then. """ raise NotImplementedError def getTokenStartIndex(self, t): """ Get the token start index for this subtree; return -1 if no such index """ raise NotImplementedError def getTokenStopIndex(self, t): """ Get the token stop index for this subtree; return -1 if no such index """ raise NotImplementedError # N a v i g a t i o n / T r e e P a r s i n g def getChild(self, t, i): """Get a child 0..n-1 node""" raise NotImplementedError def setChild(self, t, i, child): """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" raise NotImplementedError def deleteChild(self, t, i): """Remove ith child and shift children down from right.""" raise NotImplementedError def getChildCount(self, t): """How many children? If 0, then this is a leaf node""" raise NotImplementedError def getParent(self, t): """ Who is the parent node of this node; if null, implies node is root. If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def setParent(self, t, parent): """ Who is the parent node of this node; if null, implies node is root. If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def getChildIndex(self, t): """ What index is this node in the child list? Range: 0..n-1 If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def setChildIndex(self, t, index): """ What index is this node in the child list? Range: 0..n-1 If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): """ Replace from start to stop child index of parent with t, which might be a list. Number of children may be different after this call. If parent is null, don't do anything; must be at root of overall tree. Can't replace whatever points to the parent externally. Do nothing. """ raise NotImplementedError # Misc def create(self, *args): """ Deprecated, use createWithPayload, createFromToken or createFromType. This method only exists to mimic the Java interface of TreeAdaptor. """ if len(args) == 1 and isinstance(args[0], Token): # Object create(Token payload); ## warnings.warn( ## "Using create() is deprecated, use createWithPayload()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createWithPayload(args[0]) if (len(args) == 2 and isinstance(args[0], (int, long)) and isinstance(args[1], Token) ): # Object create(int tokenType, Token fromToken); ## warnings.warn( ## "Using create() is deprecated, use createFromToken()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createFromToken(args[0], args[1]) if (len(args) == 3 and isinstance(args[0], (int, long)) and isinstance(args[1], Token) and isinstance(args[2], basestring) ): # Object create(int tokenType, Token fromToken, String text); ## warnings.warn( ## "Using create() is deprecated, use createFromToken()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createFromToken(args[0], args[1], args[2]) if (len(args) == 2 and isinstance(args[0], (int, long)) and isinstance(args[1], basestring) ): # Object create(int tokenType, String text); ## warnings.warn( ## "Using create() is deprecated, use createFromType()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createFromType(args[0], args[1]) raise TypeError( "No create method with this signature found: %s" % (', '.join(type(v).__name__ for v in args)) ) ############################################################################ # # base implementation of Tree and TreeAdaptor # # Tree # \- BaseTree # # TreeAdaptor # \- BaseTreeAdaptor # ############################################################################ class BaseTree(Tree): """ @brief A generic tree implementation with no payload. You must subclass to actually have any user data. ANTLR v3 uses a list of children approach instead of the child-sibling approach in v2. A flat tree (a list) is an empty node whose children represent the list. An empty, but non-null node is called "nil". """ # BaseTree is abstract, no need to complain about not implemented abstract # methods # pylint: disable-msg=W0223 def __init__(self, node=None): """ Create a new node from an existing node does nothing for BaseTree as there are no fields other than the children list, which cannot be copied as the children are not considered part of this node. """ Tree.__init__(self) self.children = [] self.parent = None self.childIndex = 0 def getChild(self, i): try: return self.children[i] except IndexError: return None def getChildren(self): """@brief Get the children internal List Note that if you directly mess with the list, do so at your own risk. """ # FIXME: mark as deprecated return self.children def getFirstChildWithType(self, treeType): for child in self.children: if child.getType() == treeType: return child return None def getChildCount(self): return len(self.children) def addChild(self, childTree): """Add t as child of this node. Warning: if t has no children, but child does and child isNil then this routine moves children to t via t.children = child.children; i.e., without copying the array. """ # this implementation is much simpler and probably less efficient # than the mumbo-jumbo that Ter did for the Java runtime. if childTree is None: return if childTree.isNil(): # t is an empty node possibly with children if self.children is childTree.children: raise ValueError("attempt to add child list to itself") # fix parent pointer and childIndex for new children for idx, child in enumerate(childTree.children): child.parent = self child.childIndex = len(self.children) + idx self.children += childTree.children else: # child is not nil (don't care about children) self.children.append(childTree) childTree.parent = self childTree.childIndex = len(self.children) - 1 def addChildren(self, children): """Add all elements of kids list as children of this node""" self.children += children def setChild(self, i, t): if t is None: return if t.isNil(): raise ValueError("Can't set single child to a list") self.children[i] = t t.parent = self t.childIndex = i def deleteChild(self, i): killed = self.children[i] del self.children[i] # walk rest and decrement their child indexes for idx, child in enumerate(self.children[i:]): child.childIndex = i + idx return killed def replaceChildren(self, startChildIndex, stopChildIndex, newTree): """ Delete children from start to stop and replace with t even if t is a list (nil-root tree). num of children can increase or decrease. For huge child lists, inserting children can force walking rest of children to set their childindex; could be slow. """ if (startChildIndex >= len(self.children) or stopChildIndex >= len(self.children) ): raise IndexError("indexes invalid") replacingHowMany = stopChildIndex - startChildIndex + 1 # normalize to a list of children to add: newChildren if newTree.isNil(): newChildren = newTree.children else: newChildren = [newTree] replacingWithHowMany = len(newChildren) delta = replacingHowMany - replacingWithHowMany if delta == 0: # if same number of nodes, do direct replace for idx, child in enumerate(newChildren): self.children[idx + startChildIndex] = child child.parent = self child.childIndex = idx + startChildIndex else: # length of children changes... # ...delete replaced segment... del self.children[startChildIndex:stopChildIndex+1] # ...insert new segment... self.children[startChildIndex:startChildIndex] = newChildren # ...and fix indeces self.freshenParentAndChildIndexes(startChildIndex) def isNil(self): return False def freshenParentAndChildIndexes(self, offset=0): for idx, child in enumerate(self.children[offset:]): child.childIndex = idx + offset child.parent = self def sanityCheckParentAndChildIndexes(self, parent=None, i=-1): if parent != self.parent: raise ValueError( "parents don't match; expected %r found %r" % (parent, self.parent) ) if i != self.childIndex: raise ValueError( "child indexes don't match; expected %d found %d" % (i, self.childIndex) ) for idx, child in enumerate(self.children): child.sanityCheckParentAndChildIndexes(self, idx) def getChildIndex(self): """BaseTree doesn't track child indexes.""" return 0 def setChildIndex(self, index): """BaseTree doesn't track child indexes.""" pass def getParent(self): """BaseTree doesn't track parent pointers.""" return None def setParent(self, t): """BaseTree doesn't track parent pointers.""" pass def hasAncestor(self, ttype): """Walk upwards looking for ancestor with this token type.""" return self.getAncestor(ttype) is not None def getAncestor(self, ttype): """Walk upwards and get first ancestor with this token type.""" t = self.getParent() while t is not None: if t.getType() == ttype: return t t = t.getParent() return None def getAncestors(self): """Return a list of all ancestors of this node. The first node of list is the root and the last is the parent of this node. """ if selfgetParent() is None: return None ancestors = [] t = self.getParent() while t is not None: ancestors.insert(0, t) # insert at start t = t.getParent() return ancestors def toStringTree(self): """Print out a whole tree not just a node""" if len(self.children) == 0: return self.toString() buf = [] if not self.isNil(): buf.append('(') buf.append(self.toString()) buf.append(' ') for i, child in enumerate(self.children): if i > 0: buf.append(' ') buf.append(child.toStringTree()) if not self.isNil(): buf.append(')') return ''.join(buf) def getLine(self): return 0 def getCharPositionInLine(self): return 0 def toString(self): """Override to say how a node (not a tree) should look as text""" raise NotImplementedError class BaseTreeAdaptor(TreeAdaptor): """ @brief A TreeAdaptor that works with any Tree implementation. """ # BaseTreeAdaptor is abstract, no need to complain about not implemented # abstract methods # pylint: disable-msg=W0223 def nil(self): return self.createWithPayload(None) def errorNode(self, input, start, stop, exc): """ create tree node that holds the start and stop tokens associated with an error. If you specify your own kind of tree nodes, you will likely have to override this method. CommonTree returns Token.INVALID_TOKEN_TYPE if no token payload but you might have to set token type for diff node type. You don't have to subclass CommonErrorNode; you will likely need to subclass your own tree node class to avoid class cast exception. """ return CommonErrorNode(input, start, stop, exc) def isNil(self, tree): return tree.isNil() def dupTree(self, t, parent=None): """ This is generic in the sense that it will work with any kind of tree (not just Tree interface). It invokes the adaptor routines not the tree node routines to do the construction. """ if t is None: return None newTree = self.dupNode(t) # ensure new subtree root has parent/child index set # same index in new tree self.setChildIndex(newTree, self.getChildIndex(t)) self.setParent(newTree, parent) for i in range(self.getChildCount(t)): child = self.getChild(t, i) newSubTree = self.dupTree(child, t) self.addChild(newTree, newSubTree) return newTree def addChild(self, tree, child): """ Add a child to the tree t. If child is a flat tree (a list), make all in list children of t. Warning: if t has no children, but child does and child isNil then you can decide it is ok to move children to t via t.children = child.children; i.e., without copying the array. Just make sure that this is consistent with have the user will build ASTs. """ #if isinstance(child, Token): # child = self.createWithPayload(child) if tree is not None and child is not None: tree.addChild(child) def becomeRoot(self, newRoot, oldRoot): """ If oldRoot is a nil root, just copy or move the children to newRoot. If not a nil root, make oldRoot a child of newRoot. old=^(nil a b c), new=r yields ^(r a b c) old=^(a b c), new=r yields ^(r ^(a b c)) If newRoot is a nil-rooted single child tree, use the single child as the new root node. old=^(nil a b c), new=^(nil r) yields ^(r a b c) old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) If oldRoot was null, it's ok, just return newRoot (even if isNil). old=null, new=r yields r old=null, new=^(nil r) yields ^(nil r) Return newRoot. Throw an exception if newRoot is not a simple node or nil root with a single child node--it must be a root node. If newRoot is ^(nil x) return x as newRoot. Be advised that it's ok for newRoot to point at oldRoot's children; i.e., you don't have to copy the list. We are constructing these nodes so we should have this control for efficiency. """ if isinstance(newRoot, Token): newRoot = self.create(newRoot) if oldRoot is None: return newRoot if not isinstance(newRoot, CommonTree): newRoot = self.createWithPayload(newRoot) # handle ^(nil real-node) if newRoot.isNil(): nc = newRoot.getChildCount() if nc == 1: newRoot = newRoot.getChild(0) elif nc > 1: # TODO: make tree run time exceptions hierarchy raise RuntimeError("more than one node as root") # add oldRoot to newRoot; addChild takes care of case where oldRoot # is a flat list (i.e., nil-rooted tree). All children of oldRoot # are added to newRoot. newRoot.addChild(oldRoot) return newRoot def rulePostProcessing(self, root): """Transform ^(nil x) to x and nil to null""" if root is not None and root.isNil(): if root.getChildCount() == 0: root = None elif root.getChildCount() == 1: root = root.getChild(0) # whoever invokes rule will set parent and child index root.setParent(None) root.setChildIndex(-1) return root def createFromToken(self, tokenType, fromToken, text=None): if fromToken is None: return self.createFromType(tokenType, text) assert isinstance(tokenType, (int, long)), type(tokenType).__name__ assert isinstance(fromToken, Token), type(fromToken).__name__ assert text is None or isinstance(text, basestring), type(text).__name__ fromToken = self.createToken(fromToken) fromToken.type = tokenType if text is not None: fromToken.text = text t = self.createWithPayload(fromToken) return t def createFromType(self, tokenType, text): assert isinstance(tokenType, (int, long)), type(tokenType).__name__ assert isinstance(text, basestring) or text is None, type(text).__name__ fromToken = self.createToken(tokenType=tokenType, text=text) t = self.createWithPayload(fromToken) return t def getType(self, t): return t.getType() def setType(self, t, type): raise RuntimeError("don't know enough about Tree node") def getText(self, t): return t.getText() def setText(self, t, text): raise RuntimeError("don't know enough about Tree node") def getChild(self, t, i): return t.getChild(i) def setChild(self, t, i, child): t.setChild(i, child) def deleteChild(self, t, i): return t.deleteChild(i) def getChildCount(self, t): return t.getChildCount() def getUniqueID(self, node): return hash(node) def createToken(self, fromToken=None, tokenType=None, text=None): """ Tell me how to create a token for use with imaginary token nodes. For example, there is probably no input symbol associated with imaginary token DECL, but you need to create it as a payload or whatever for the DECL node as in ^(DECL type ID). If you care what the token payload objects' type is, you should override this method and any other createToken variant. """ raise NotImplementedError ############################################################################ # # common tree implementation # # Tree # \- BaseTree # \- CommonTree # \- CommonErrorNode # # TreeAdaptor # \- BaseTreeAdaptor # \- CommonTreeAdaptor # ############################################################################ class CommonTree(BaseTree): """@brief A tree node that is wrapper for a Token object. After 3.0 release while building tree rewrite stuff, it became clear that computing parent and child index is very difficult and cumbersome. Better to spend the space in every tree node. If you don't want these extra fields, it's easy to cut them out in your own BaseTree subclass. """ def __init__(self, payload): BaseTree.__init__(self) # What token indexes bracket all tokens associated with this node # and below? self.startIndex = -1 self.stopIndex = -1 # Who is the parent node of this node; if null, implies node is root self.parent = None # What index is this node in the child list? Range: 0..n-1 self.childIndex = -1 # A single token is the payload if payload is None: self.token = None elif isinstance(payload, CommonTree): self.token = payload.token self.startIndex = payload.startIndex self.stopIndex = payload.stopIndex elif payload is None or isinstance(payload, Token): self.token = payload else: raise TypeError(type(payload).__name__) def getToken(self): return self.token def dupNode(self): return CommonTree(self) def isNil(self): return self.token is None def getType(self): if self.token is None: return INVALID_TOKEN_TYPE return self.token.getType() type = property(getType) def getText(self): if self.token is None: return None return self.token.text text = property(getText) def getLine(self): if self.token is None or self.token.getLine() == 0: if self.getChildCount(): return self.getChild(0).getLine() else: return 0 return self.token.getLine() line = property(getLine) def getCharPositionInLine(self): if self.token is None or self.token.getCharPositionInLine() == -1: if self.getChildCount(): return self.getChild(0).getCharPositionInLine() else: return 0 else: return self.token.getCharPositionInLine() charPositionInLine = property(getCharPositionInLine) def getTokenStartIndex(self): if self.startIndex == -1 and self.token is not None: return self.token.getTokenIndex() return self.startIndex def setTokenStartIndex(self, index): self.startIndex = index tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex) def getTokenStopIndex(self): if self.stopIndex == -1 and self.token is not None: return self.token.getTokenIndex() return self.stopIndex def setTokenStopIndex(self, index): self.stopIndex = index tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex) def setUnknownTokenBoundaries(self): """For every node in this subtree, make sure it's start/stop token's are set. Walk depth first, visit bottom up. Only updates nodes with at least one token index < 0. """ if self.children is None: if self.startIndex < 0 or self.stopIndex < 0: self.startIndex = self.stopIndex = self.token.getTokenIndex() return for child in self.children: child.setUnknownTokenBoundaries() if self.startIndex >= 0 and self.stopIndex >= 0: # already set return if self.children: firstChild = self.children[0] lastChild = self.children[-1] self.startIndex = firstChild.getTokenStartIndex() self.stopIndex = lastChild.getTokenStopIndex() def getChildIndex(self): #FIXME: mark as deprecated return self.childIndex def setChildIndex(self, idx): #FIXME: mark as deprecated self.childIndex = idx def getParent(self): #FIXME: mark as deprecated return self.parent def setParent(self, t): #FIXME: mark as deprecated self.parent = t def toString(self): if self.isNil(): return "nil" if self.getType() == INVALID_TOKEN_TYPE: return "" return self.token.text __str__ = toString def toStringTree(self): if not self.children: return self.toString() ret = '' if not self.isNil(): ret += '(%s ' % (self.toString()) ret += ' '.join([child.toStringTree() for child in self.children]) if not self.isNil(): ret += ')' return ret INVALID_NODE = CommonTree(INVALID_TOKEN) class CommonErrorNode(CommonTree): """A node representing erroneous token range in token stream""" def __init__(self, input, start, stop, exc): CommonTree.__init__(self, None) if (stop is None or (stop.getTokenIndex() < start.getTokenIndex() and stop.getType() != EOF ) ): # sometimes resync does not consume a token (when LT(1) is # in follow set. So, stop will be 1 to left to start. adjust. # Also handle case where start is the first token and no token # is consumed during recovery; LT(-1) will return null. stop = start self.input = input self.start = start self.stop = stop self.trappedException = exc def isNil(self): return False def getType(self): return INVALID_TOKEN_TYPE def getText(self): if isinstance(self.start, Token): i = self.start.getTokenIndex() j = self.stop.getTokenIndex() if self.stop.getType() == EOF: j = self.input.size() badText = self.input.toString(i, j) elif isinstance(self.start, Tree): badText = self.input.toString(self.start, self.stop) else: # people should subclass if they alter the tree type so this # next one is for sure correct. badText = "" return badText def toString(self): if isinstance(self.trappedException, MissingTokenException): return ("") elif isinstance(self.trappedException, UnwantedTokenException): return ("") elif isinstance(self.trappedException, MismatchedTokenException): return ("") elif isinstance(self.trappedException, NoViableAltException): return ("") return "" class CommonTreeAdaptor(BaseTreeAdaptor): """ @brief A TreeAdaptor that works with any Tree implementation. It provides really just factory methods; all the work is done by BaseTreeAdaptor. If you would like to have different tokens created than ClassicToken objects, you need to override this and then set the parser tree adaptor to use your subclass. To get your parser to build nodes of a different type, override create(Token), errorNode(), and to be safe, YourTreeClass.dupNode(). dupNode is called to duplicate nodes during rewrite operations. """ def dupNode(self, treeNode): """ Duplicate a node. This is part of the factory; override if you want another kind of node to be built. I could use reflection to prevent having to override this but reflection is slow. """ if treeNode is None: return None return treeNode.dupNode() def createWithPayload(self, payload): return CommonTree(payload) def createToken(self, fromToken=None, tokenType=None, text=None): """ Tell me how to create a token for use with imaginary token nodes. For example, there is probably no input symbol associated with imaginary token DECL, but you need to create it as a payload or whatever for the DECL node as in ^(DECL type ID). If you care what the token payload objects' type is, you should override this method and any other createToken variant. """ if fromToken is not None: return CommonToken(oldToken=fromToken) return CommonToken(type=tokenType, text=text) def setTokenBoundaries(self, t, startToken, stopToken): """ Track start/stop token for subtree root created for a rule. Only works with Tree nodes. For rules that match nothing, seems like this will yield start=i and stop=i-1 in a nil node. Might be useful info so I'll not force to be i..i. """ if t is None: return start = 0 stop = 0 if startToken is not None: start = startToken.index if stopToken is not None: stop = stopToken.index t.setTokenStartIndex(start) t.setTokenStopIndex(stop) def getTokenStartIndex(self, t): if t is None: return -1 return t.getTokenStartIndex() def getTokenStopIndex(self, t): if t is None: return -1 return t.getTokenStopIndex() def getText(self, t): if t is None: return None return t.getText() def getType(self, t): if t is None: return INVALID_TOKEN_TYPE return t.getType() def getToken(self, t): """ What is the Token associated with this node? If you are not using CommonTree, then you must override this in your own adaptor. """ if isinstance(t, CommonTree): return t.getToken() return None # no idea what to do def getChild(self, t, i): if t is None: return None return t.getChild(i) def getChildCount(self, t): if t is None: return 0 return t.getChildCount() def getParent(self, t): return t.getParent() def setParent(self, t, parent): t.setParent(parent) def getChildIndex(self, t): if t is None: return 0 return t.getChildIndex() def setChildIndex(self, t, index): t.setChildIndex(index) def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): if parent is not None: parent.replaceChildren(startChildIndex, stopChildIndex, t) ############################################################################ # # streams # # TreeNodeStream # \- BaseTree # \- CommonTree # # TreeAdaptor # \- BaseTreeAdaptor # \- CommonTreeAdaptor # ############################################################################ class TreeNodeStream(IntStream): """@brief A stream of tree nodes It accessing nodes from a tree of some kind. """ # TreeNodeStream is abstract, no need to complain about not implemented # abstract methods # pylint: disable-msg=W0223 def get(self, i): """Get a tree node at an absolute index i; 0..n-1. If you don't want to buffer up nodes, then this method makes no sense for you. """ raise NotImplementedError def LT(self, k): """ Get tree node at current input pointer + i ahead where i=1 is next node. i<0 indicates nodes in the past. So LT(-1) is previous node, but implementations are not required to provide results for k < -1. LT(0) is undefined. For i>=n, return null. Return null for LT(0) and any index that results in an absolute address that is negative. This is analogus to the LT() method of the TokenStream, but this returns a tree node instead of a token. Makes code gen identical for both parser and tree grammars. :) """ raise NotImplementedError def getTreeSource(self): """ Where is this stream pulling nodes from? This is not the name, but the object that provides node objects. """ raise NotImplementedError def getTokenStream(self): """ If the tree associated with this stream was created from a TokenStream, you can specify it here. Used to do rule $text attribute in tree parser. Optional unless you use tree parser rule text attribute or output=template and rewrite=true options. """ raise NotImplementedError def getTreeAdaptor(self): """ What adaptor can tell me how to interpret/navigate nodes and trees. E.g., get text of a node. """ raise NotImplementedError def setUniqueNavigationNodes(self, uniqueNavigationNodes): """ As we flatten the tree, we use UP, DOWN nodes to represent the tree structure. When debugging we need unique nodes so we have to instantiate new ones. When doing normal tree parsing, it's slow and a waste of memory to create unique navigation nodes. Default should be false; """ raise NotImplementedError def reset(self): """ Reset the tree node stream in such a way that it acts like a freshly constructed stream. """ raise NotImplementedError def toString(self, start, stop): """ Return the text of all nodes from start to stop, inclusive. If the stream does not buffer all the nodes then it can still walk recursively from start until stop. You can always return null or "" too, but users should not access $ruleLabel.text in an action of course in that case. """ raise NotImplementedError # REWRITING TREES (used by tree parser) def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): """ Replace from start to stop child index of parent with t, which might be a list. Number of children may be different after this call. The stream is notified because it is walking the tree and might need to know you are monkeying with the underlying tree. Also, it might be able to modify the node stream to avoid restreaming for future phases. If parent is null, don't do anything; must be at root of overall tree. Can't replace whatever points to the parent externally. Do nothing. """ raise NotImplementedError class CommonTreeNodeStream(TreeNodeStream): """@brief A buffered stream of tree nodes. Nodes can be from a tree of ANY kind. This node stream sucks all nodes out of the tree specified in the constructor during construction and makes pointers into the tree using an array of Object pointers. The stream necessarily includes pointers to DOWN and UP and EOF nodes. This stream knows how to mark/release for backtracking. This stream is most suitable for tree interpreters that need to jump around a lot or for tree parsers requiring speed (at cost of memory). There is some duplicated functionality here with UnBufferedTreeNodeStream but just in bookkeeping, not tree walking etc... @see UnBufferedTreeNodeStream """ def __init__(self, *args): TreeNodeStream.__init__(self) if len(args) == 1: adaptor = CommonTreeAdaptor() tree = args[0] nodes = None down = None up = None eof = None elif len(args) == 2: adaptor = args[0] tree = args[1] nodes = None down = None up = None eof = None elif len(args) == 3: parent = args[0] start = args[1] stop = args[2] adaptor = parent.adaptor tree = parent.root nodes = parent.nodes[start:stop] down = parent.down up = parent.up eof = parent.eof else: raise TypeError("Invalid arguments") # all these navigation nodes are shared and hence they # cannot contain any line/column info if down is not None: self.down = down else: self.down = adaptor.createFromType(DOWN, "DOWN") if up is not None: self.up = up else: self.up = adaptor.createFromType(UP, "UP") if eof is not None: self.eof = eof else: self.eof = adaptor.createFromType(EOF, "EOF") # The complete mapping from stream index to tree node. # This buffer includes pointers to DOWN, UP, and EOF nodes. # It is built upon ctor invocation. The elements are type # Object as we don't what the trees look like. # Load upon first need of the buffer so we can set token types # of interest for reverseIndexing. Slows us down a wee bit to # do all of the if p==-1 testing everywhere though. if nodes is not None: self.nodes = nodes else: self.nodes = [] # Pull nodes from which tree? self.root = tree # IF this tree (root) was created from a token stream, track it. self.tokens = None # What tree adaptor was used to build these trees self.adaptor = adaptor # Reuse same DOWN, UP navigation nodes unless this is true self.uniqueNavigationNodes = False # The index into the nodes list of the current node (next node # to consume). If -1, nodes array not filled yet. self.p = -1 # Track the last mark() call result value for use in rewind(). self.lastMarker = None # Stack of indexes used for push/pop calls self.calls = [] def __iter__(self): return TreeIterator(self.root, self.adaptor) def fillBuffer(self): """Walk tree with depth-first-search and fill nodes buffer. Don't do DOWN, UP nodes if its a list (t is isNil). """ self._fillBuffer(self.root) self.p = 0 # buffer of nodes intialized now def _fillBuffer(self, t): nil = self.adaptor.isNil(t) if not nil: self.nodes.append(t) # add this node # add DOWN node if t has children n = self.adaptor.getChildCount(t) if not nil and n > 0: self.addNavigationNode(DOWN) # and now add all its children for c in range(n): self._fillBuffer(self.adaptor.getChild(t, c)) # add UP node if t has children if not nil and n > 0: self.addNavigationNode(UP) def getNodeIndex(self, node): """What is the stream index for node? 0..n-1 Return -1 if node not found. """ if self.p == -1: self.fillBuffer() for i, t in enumerate(self.nodes): if t == node: return i return -1 def addNavigationNode(self, ttype): """ As we flatten the tree, we use UP, DOWN nodes to represent the tree structure. When debugging we need unique nodes so instantiate new ones when uniqueNavigationNodes is true. """ navNode = None if ttype == DOWN: if self.hasUniqueNavigationNodes(): navNode = self.adaptor.createFromType(DOWN, "DOWN") else: navNode = self.down else: if self.hasUniqueNavigationNodes(): navNode = self.adaptor.createFromType(UP, "UP") else: navNode = self.up self.nodes.append(navNode) def get(self, i): if self.p == -1: self.fillBuffer() return self.nodes[i] def LT(self, k): if self.p == -1: self.fillBuffer() if k == 0: return None if k < 0: return self.LB(-k) if self.p + k - 1 >= len(self.nodes): return self.eof return self.nodes[self.p + k - 1] def getCurrentSymbol(self): return self.LT(1) def LB(self, k): """Look backwards k nodes""" if k == 0: return None if self.p - k < 0: return None return self.nodes[self.p - k] def isEOF(self, obj): return self.adaptor.getType(obj) == EOF def getTreeSource(self): return self.root def getSourceName(self): return self.getTokenStream().getSourceName() def getTokenStream(self): return self.tokens def setTokenStream(self, tokens): self.tokens = tokens def getTreeAdaptor(self): return self.adaptor def hasUniqueNavigationNodes(self): return self.uniqueNavigationNodes def setUniqueNavigationNodes(self, uniqueNavigationNodes): self.uniqueNavigationNodes = uniqueNavigationNodes def consume(self): if self.p == -1: self.fillBuffer() self.p += 1 def LA(self, i): return self.adaptor.getType(self.LT(i)) def mark(self): if self.p == -1: self.fillBuffer() self.lastMarker = self.index() return self.lastMarker def release(self, marker=None): # no resources to release pass def index(self): return self.p def rewind(self, marker=None): if marker is None: marker = self.lastMarker self.seek(marker) def seek(self, index): if self.p == -1: self.fillBuffer() self.p = index def push(self, index): """ Make stream jump to a new location, saving old location. Switch back with pop(). """ self.calls.append(self.p) # save current index self.seek(index) def pop(self): """ Seek back to previous index saved during last push() call. Return top of stack (return index). """ ret = self.calls.pop(-1) self.seek(ret) return ret def reset(self): self.p = 0 self.lastMarker = 0 self.calls = [] def size(self): if self.p == -1: self.fillBuffer() return len(self.nodes) # TREE REWRITE INTERFACE def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): if parent is not None: self.adaptor.replaceChildren( parent, startChildIndex, stopChildIndex, t ) def __str__(self): """Used for testing, just return the token type stream""" if self.p == -1: self.fillBuffer() return ' '.join([str(self.adaptor.getType(node)) for node in self.nodes ]) def toString(self, start, stop): if start is None or stop is None: return None if self.p == -1: self.fillBuffer() #System.out.println("stop: "+stop); #if ( start instanceof CommonTree ) # System.out.print("toString: "+((CommonTree)start).getToken()+", "); #else # System.out.println(start); #if ( stop instanceof CommonTree ) # System.out.println(((CommonTree)stop).getToken()); #else # System.out.println(stop); # if we have the token stream, use that to dump text in order if self.tokens is not None: beginTokenIndex = self.adaptor.getTokenStartIndex(start) endTokenIndex = self.adaptor.getTokenStopIndex(stop) # if it's a tree, use start/stop index from start node # else use token range from start/stop nodes if self.adaptor.getType(stop) == UP: endTokenIndex = self.adaptor.getTokenStopIndex(start) elif self.adaptor.getType(stop) == EOF: endTokenIndex = self.size() -2 # don't use EOF return self.tokens.toString(beginTokenIndex, endTokenIndex) # walk nodes looking for start i, t = 0, None for i, t in enumerate(self.nodes): if t == start: break # now walk until we see stop, filling string buffer with text buf = [] t = self.nodes[i] while t != stop: text = self.adaptor.getText(t) if text is None: text = " " + self.adaptor.getType(t) buf.append(text) i += 1 t = self.nodes[i] # include stop node too text = self.adaptor.getText(stop) if text is None: text = " " +self.adaptor.getType(stop) buf.append(text) return ''.join(buf) ## iterator interface def __iter__(self): if self.p == -1: self.fillBuffer() for node in self.nodes: yield node ############################################################################# # # tree parser # ############################################################################# class TreeParser(BaseRecognizer): """@brief Baseclass for generated tree parsers. A parser for a stream of tree nodes. "tree grammars" result in a subclass of this. All the error reporting and recovery is shared with Parser via the BaseRecognizer superclass. """ def __init__(self, input, state=None): BaseRecognizer.__init__(self, state) self.input = None self.setTreeNodeStream(input) def reset(self): BaseRecognizer.reset(self) # reset all recognizer state variables if self.input is not None: self.input.seek(0) # rewind the input def setTreeNodeStream(self, input): """Set the input stream""" self.input = input def getTreeNodeStream(self): return self.input def getSourceName(self): return self.input.getSourceName() def getCurrentInputSymbol(self, input): return input.LT(1) def getMissingSymbol(self, input, e, expectedTokenType, follow): tokenText = "" adaptor = input.adaptor return adaptor.createToken( CommonToken(type=expectedTokenType, text=tokenText)) # precompiled regex used by inContext dotdot = ".*[^.]\\.\\.[^.].*" doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*" dotdotPattern = re.compile(dotdot) doubleEtcPattern = re.compile(doubleEtc) def inContext(self, context, adaptor=None, tokenName=None, t=None): """Check if current node in input has a context. Context means sequence of nodes towards root of tree. For example, you might say context is "MULT" which means my parent must be MULT. "CLASS VARDEF" says current node must be child of a VARDEF and whose parent is a CLASS node. You can use "..." to mean zero-or-more nodes. "METHOD ... VARDEF" means my parent is VARDEF and somewhere above that is a METHOD node. The first node in the context is not necessarily the root. The context matcher stops matching and returns true when it runs out of context. There is no way to force the first node to be the root. """ return _inContext( self.input.getTreeAdaptor(), self.getTokenNames(), self.input.LT(1), context) @classmethod def _inContext(cls, adaptor, tokenNames, t, context): """The worker for inContext. It's static and full of parameters for testing purposes. """ if cls.dotdotPattern.match(context): # don't allow "..", must be "..." raise ValueError("invalid syntax: ..") if cls.doubleEtcPattern.match(context): # don't allow double "..." raise ValueError("invalid syntax: ... ...") # ensure spaces around ... context = context.replace("...", " ... ") context = context.strip() nodes = context.split() ni = len(nodes) - 1 t = adaptor.getParent(t) while ni >= 0 and t is not None: if nodes[ni] == "...": # walk upwards until we see nodes[ni-1] then continue walking if ni == 0: # ... at start is no-op return True goal = nodes[ni-1] ancestor = cls._getAncestor(adaptor, tokenNames, t, goal) if ancestor is None: return False t = ancestor ni -= 1 name = tokenNames[adaptor.getType(t)] if name != nodes[ni]: return False # advance to parent and to previous element in context node list ni -= 1 t = adaptor.getParent(t) # at root but more nodes to match if t is None and ni >= 0: return False return True @staticmethod def _getAncestor(adaptor, tokenNames, t, goal): """Helper for static inContext.""" while t is not None: name = tokenNames[adaptor.getType(t)] if name == goal: return t t = adaptor.getParent(t) return None def matchAny(self, ignore): # ignore stream, copy of this.input """ Match '.' in tree parser has special meaning. Skip node or entire tree if node has children. If children, scan until corresponding UP node. """ self._state.errorRecovery = False look = self.input.LT(1) if self.input.getTreeAdaptor().getChildCount(look) == 0: self.input.consume() # not subtree, consume 1 node and return return # current node is a subtree, skip to corresponding UP. # must count nesting level to get right UP level = 0 tokenType = self.input.getTreeAdaptor().getType(look) while tokenType != EOF and not (tokenType == UP and level==0): self.input.consume() look = self.input.LT(1) tokenType = self.input.getTreeAdaptor().getType(look) if tokenType == DOWN: level += 1 elif tokenType == UP: level -= 1 self.input.consume() # consume UP def mismatch(self, input, ttype, follow): """ We have DOWN/UP nodes in the stream that have no line info; override. plus we want to alter the exception type. Don't try to recover from tree parser errors inline... """ raise MismatchedTreeNodeException(ttype, input) def getErrorHeader(self, e): """ Prefix error message with the grammar name because message is always intended for the programmer because the parser built the input tree not the user. """ return (self.getGrammarFileName() + ": node from %sline %s:%s" % (['', "after "][e.approximateLineInfo], e.line, e.charPositionInLine ) ) def getErrorMessage(self, e, tokenNames): """ Tree parsers parse nodes they usually have a token object as payload. Set the exception token and do the default behavior. """ if isinstance(self, TreeParser): adaptor = e.input.getTreeAdaptor() e.token = adaptor.getToken(e.node) if e.token is not None: # could be an UP/DOWN node e.token = CommonToken( type=adaptor.getType(e.node), text=adaptor.getText(e.node) ) return BaseRecognizer.getErrorMessage(self, e, tokenNames) def traceIn(self, ruleName, ruleIndex): BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1)) def traceOut(self, ruleName, ruleIndex): BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1)) ############################################################################# # # tree visitor # ############################################################################# class TreeVisitor(object): """Do a depth first walk of a tree, applying pre() and post() actions we go. """ def __init__(self, adaptor=None): if adaptor is not None: self.adaptor = adaptor else: self.adaptor = CommonTreeAdaptor() def visit(self, t, pre_action=None, post_action=None): """Visit every node in tree t and trigger an action for each node before/after having visited all of its children. Bottom up walk. Execute both actions even if t has no children. Ignore return results from transforming children since they will have altered the child list of this node (their parent). Return result of applying post action to this node. The Python version differs from the Java version by taking two callables 'pre_action' and 'post_action' instead of a class instance that wraps those methods. Those callables must accept a TreeNode as their single argument and return the (potentially transformed or replaced) TreeNode. """ isNil = self.adaptor.isNil(t) if pre_action is not None and not isNil: # if rewritten, walk children of new t t = pre_action(t) idx = 0 while idx < self.adaptor.getChildCount(t): child = self.adaptor.getChild(t, idx) self.visit(child, pre_action, post_action) idx += 1 if post_action is not None and not isNil: t = post_action(t) return t ############################################################################# # # tree iterator # ############################################################################# class TreeIterator(object): """ Return a node stream from a doubly-linked tree whose nodes know what child index they are. Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure. """ def __init__(self, tree, adaptor=None): if adaptor is None: adaptor = CommonTreeAdaptor() self.root = tree self.adaptor = adaptor self.first_time = True self.tree = tree # If we emit UP/DOWN nodes, we need to spit out multiple nodes per # next() call. self.nodes = [] # navigation nodes to return during walk and at end self.down = adaptor.createFromType(DOWN, "DOWN") self.up = adaptor.createFromType(UP, "UP") self.eof = adaptor.createFromType(EOF, "EOF") def reset(self): self.first_time = True self.tree = self.root self.nodes = [] def __iter__(self): return self def has_next(self): if self.first_time: return self.root is not None if len(self.nodes) > 0: return True if self.tree is None: return False if self.adaptor.getChildCount(self.tree) > 0: return True # back at root? return self.adaptor.getParent(self.tree) is not None def next(self): if not self.has_next(): raise StopIteration if self.first_time: # initial condition self.first_time = False if self.adaptor.getChildCount(self.tree) == 0: # single node tree (special) self.nodes.append(self.eof) return self.tree return self.tree # if any queued up, use those first if len(self.nodes) > 0: return self.nodes.pop(0) # no nodes left? if self.tree is None: return self.eof # next node will be child 0 if any children if self.adaptor.getChildCount(self.tree) > 0: self.tree = self.adaptor.getChild(self.tree, 0) # real node is next after DOWN self.nodes.append(self.tree) return self.down # if no children, look for next sibling of tree or ancestor parent = self.adaptor.getParent(self.tree) # while we're out of siblings, keep popping back up towards root while (parent is not None and self.adaptor.getChildIndex(self.tree)+1 >= self.adaptor.getChildCount(parent)): # we're moving back up self.nodes.append(self.up) self.tree = parent parent = self.adaptor.getParent(self.tree) # no nodes left? if parent is None: self.tree = None # back at root? nothing left then self.nodes.append(self.eof) # add to queue, might have UP nodes in there return self.nodes.pop(0) # must have found a node with an unvisited sibling # move to it and return it nextSiblingIndex = self.adaptor.getChildIndex(self.tree) + 1 self.tree = self.adaptor.getChild(parent, nextSiblingIndex) self.nodes.append(self.tree) # add to queue, might have UP nodes in there return self.nodes.pop(0) ############################################################################# # # streams for rule rewriting # ############################################################################# class RewriteRuleElementStream(object): """@brief Internal helper class. A generic list of elements tracked in an alternative to be used in a -> rewrite rule. We need to subclass to fill in the next() method, which returns either an AST node wrapped around a token payload or an existing subtree. Once you start next()ing, do not try to add more elements. It will break the cursor tracking I believe. @see org.antlr.runtime.tree.RewriteRuleSubtreeStream @see org.antlr.runtime.tree.RewriteRuleTokenStream TODO: add mechanism to detect/puke on modification after reading from stream """ def __init__(self, adaptor, elementDescription, elements=None): # Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(), # which bumps it to 1 meaning no more elements. self.cursor = 0 # Track single elements w/o creating a list. Upon 2nd add, alloc list self.singleElement = None # The list of tokens or subtrees we are tracking self.elements = None # Once a node / subtree has been used in a stream, it must be dup'd # from then on. Streams are reset after subrules so that the streams # can be reused in future subrules. So, reset must set a dirty bit. # If dirty, then next() always returns a dup. self.dirty = False # The element or stream description; usually has name of the token or # rule reference that this list tracks. Can include rulename too, but # the exception would track that info. self.elementDescription = elementDescription self.adaptor = adaptor if isinstance(elements, (list, tuple)): # Create a stream, but feed off an existing list self.singleElement = None self.elements = elements else: # Create a stream with one element self.add(elements) def reset(self): """ Reset the condition of this stream so that it appears we have not consumed any of its elements. Elements themselves are untouched. Once we reset the stream, any future use will need duplicates. Set the dirty bit. """ self.cursor = 0 self.dirty = True def add(self, el): if el is None: return if self.elements is not None: # if in list, just add self.elements.append(el) return if self.singleElement is None: # no elements yet, track w/o list self.singleElement = el return # adding 2nd element, move to list self.elements = [] self.elements.append(self.singleElement) self.singleElement = None self.elements.append(el) def nextTree(self): """ Return the next element in the stream. If out of elements, throw an exception unless size()==1. If size is 1, then return elements[0]. Return a duplicate node/subtree if stream is out of elements and size==1. If we've already used the element, dup (dirty bit set). """ if (self.dirty or (self.cursor >= len(self) and len(self) == 1) ): # if out of elements and size is 1, dup el = self._next() return self.dup(el) # test size above then fetch el = self._next() return el def _next(self): """ do the work of getting the next element, making sure that it's a tree node or subtree. Deal with the optimization of single- element list versus list of size > 1. Throw an exception if the stream is empty or we're out of elements and size>1. protected so you can override in a subclass if necessary. """ if len(self) == 0: raise RewriteEmptyStreamException(self.elementDescription) if self.cursor >= len(self): # out of elements? if len(self) == 1: # if size is 1, it's ok; return and we'll dup return self.toTree(self.singleElement) # out of elements and size was not 1, so we can't dup raise RewriteCardinalityException(self.elementDescription) # we have elements if self.singleElement is not None: self.cursor += 1 # move cursor even for single element list return self.toTree(self.singleElement) # must have more than one in list, pull from elements o = self.toTree(self.elements[self.cursor]) self.cursor += 1 return o def dup(self, el): """ When constructing trees, sometimes we need to dup a token or AST subtree. Dup'ing a token means just creating another AST node around it. For trees, you must call the adaptor.dupTree() unless the element is for a tree root; then it must be a node dup. """ raise NotImplementedError def toTree(self, el): """ Ensure stream emits trees; tokens must be converted to AST nodes. AST nodes can be passed through unmolested. """ return el def hasNext(self): return ( (self.singleElement is not None and self.cursor < 1) or (self.elements is not None and self.cursor < len(self.elements) ) ) def size(self): if self.singleElement is not None: return 1 if self.elements is not None: return len(self.elements) return 0 __len__ = size def getDescription(self): """Deprecated. Directly access elementDescription attribute""" return self.elementDescription class RewriteRuleTokenStream(RewriteRuleElementStream): """@brief Internal helper class.""" def toTree(self, el): # Don't convert to a tree unless they explicitly call nextTree. # This way we can do hetero tree nodes in rewrite. return el def nextNode(self): t = self._next() return self.adaptor.createWithPayload(t) def nextToken(self): return self._next() def dup(self, el): raise TypeError("dup can't be called for a token stream.") class RewriteRuleSubtreeStream(RewriteRuleElementStream): """@brief Internal helper class.""" def nextNode(self): """ Treat next element as a single node even if it's a subtree. This is used instead of next() when the result has to be a tree root node. Also prevents us from duplicating recently-added children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration must dup the type node, but ID has been added. Referencing a rule result twice is ok; dup entire tree as we can't be adding trees as root; e.g., expr expr. Hideous code duplication here with super.next(). Can't think of a proper way to refactor. This needs to always call dup node and super.next() doesn't know which to call: dup node or dup tree. """ if (self.dirty or (self.cursor >= len(self) and len(self) == 1) ): # if out of elements and size is 1, dup (at most a single node # since this is for making root nodes). el = self._next() return self.adaptor.dupNode(el) # test size above then fetch el = self._next() while self.adaptor.isNil(el) and self.adaptor.getChildCount(el) == 1: el = self.adaptor.getChild(el, 0) # dup just the root (want node here) return self.adaptor.dupNode(el) def dup(self, el): return self.adaptor.dupTree(el) class RewriteRuleNodeStream(RewriteRuleElementStream): """ Queues up nodes matched on left side of -> in a tree parser. This is the analog of RewriteRuleTokenStream for normal parsers. """ def nextNode(self): return self._next() def toTree(self, el): return self.adaptor.dupNode(el) def dup(self, el): # we dup every node, so don't have to worry about calling dup; short- #circuited next() so it doesn't call. raise TypeError("dup can't be called for a node stream.") class TreeRuleReturnScope(RuleReturnScope): """ This is identical to the ParserRuleReturnScope except that the start property is a tree nodes not Token object when you are parsing trees. To be generic the tree node types have to be Object. """ def __init__(self): self.start = None self.tree = None def getStart(self): return self.start def getTree(self): return self.tree python-antlr3-3.5.2/antlr3/treewizard.py0000644000175000017500000004342012653072152016710 0ustar zigozigo""" @package antlr3.tree @brief ANTLR3 runtime package, treewizard module A utility module to create ASTs at runtime. See for an overview. Note that the API of the Python implementation is slightly different. """ # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] from antlr3.constants import INVALID_TOKEN_TYPE from antlr3.tokens import CommonToken from antlr3.tree import CommonTree, CommonTreeAdaptor def computeTokenTypes(tokenNames): """ Compute a dict that is an inverted index of tokenNames (which maps int token types to names). """ if tokenNames is None: return {} return dict((name, type) for type, name in enumerate(tokenNames)) ## token types for pattern parser EOF = -1 BEGIN = 1 END = 2 ID = 3 ARG = 4 PERCENT = 5 COLON = 6 DOT = 7 class TreePatternLexer(object): def __init__(self, pattern): ## The tree pattern to lex like "(A B C)" self.pattern = pattern ## Index into input string self.p = -1 ## Current char self.c = None ## How long is the pattern in char? self.n = len(pattern) ## Set when token type is ID or ARG self.sval = None self.error = False self.consume() __idStartChar = frozenset( 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_' ) __idChar = __idStartChar | frozenset('0123456789') def nextToken(self): self.sval = "" while self.c != EOF: if self.c in (' ', '\n', '\r', '\t'): self.consume() continue if self.c in self.__idStartChar: self.sval += self.c self.consume() while self.c in self.__idChar: self.sval += self.c self.consume() return ID if self.c == '(': self.consume() return BEGIN if self.c == ')': self.consume() return END if self.c == '%': self.consume() return PERCENT if self.c == ':': self.consume() return COLON if self.c == '.': self.consume() return DOT if self.c == '[': # grab [x] as a string, returning x self.consume() while self.c != ']': if self.c == '\\': self.consume() if self.c != ']': self.sval += '\\' self.sval += self.c else: self.sval += self.c self.consume() self.consume() return ARG self.consume() self.error = True return EOF return EOF def consume(self): self.p += 1 if self.p >= self.n: self.c = EOF else: self.c = self.pattern[self.p] class TreePatternParser(object): def __init__(self, tokenizer, wizard, adaptor): self.tokenizer = tokenizer self.wizard = wizard self.adaptor = adaptor self.ttype = tokenizer.nextToken() # kickstart def pattern(self): if self.ttype == BEGIN: return self.parseTree() elif self.ttype == ID: node = self.parseNode() if self.ttype == EOF: return node return None # extra junk on end return None def parseTree(self): if self.ttype != BEGIN: return None self.ttype = self.tokenizer.nextToken() root = self.parseNode() if root is None: return None while self.ttype in (BEGIN, ID, PERCENT, DOT): if self.ttype == BEGIN: subtree = self.parseTree() self.adaptor.addChild(root, subtree) else: child = self.parseNode() if child is None: return None self.adaptor.addChild(root, child) if self.ttype != END: return None self.ttype = self.tokenizer.nextToken() return root def parseNode(self): # "%label:" prefix label = None if self.ttype == PERCENT: self.ttype = self.tokenizer.nextToken() if self.ttype != ID: return None label = self.tokenizer.sval self.ttype = self.tokenizer.nextToken() if self.ttype != COLON: return None self.ttype = self.tokenizer.nextToken() # move to ID following colon # Wildcard? if self.ttype == DOT: self.ttype = self.tokenizer.nextToken() wildcardPayload = CommonToken(0, ".") node = WildcardTreePattern(wildcardPayload) if label is not None: node.label = label return node # "ID" or "ID[arg]" if self.ttype != ID: return None tokenName = self.tokenizer.sval self.ttype = self.tokenizer.nextToken() if tokenName == "nil": return self.adaptor.nil() text = tokenName # check for arg arg = None if self.ttype == ARG: arg = self.tokenizer.sval text = arg self.ttype = self.tokenizer.nextToken() # create node treeNodeType = self.wizard.getTokenType(tokenName) if treeNodeType == INVALID_TOKEN_TYPE: return None node = self.adaptor.createFromType(treeNodeType, text) if label is not None and isinstance(node, TreePattern): node.label = label if arg is not None and isinstance(node, TreePattern): node.hasTextArg = True return node class TreePattern(CommonTree): """ When using %label:TOKENNAME in a tree for parse(), we must track the label. """ def __init__(self, payload): CommonTree.__init__(self, payload) self.label = None self.hasTextArg = None def toString(self): if self.label is not None: return '%' + self.label + ':' + CommonTree.toString(self) else: return CommonTree.toString(self) class WildcardTreePattern(TreePattern): pass class TreePatternTreeAdaptor(CommonTreeAdaptor): """This adaptor creates TreePattern objects for use during scan()""" def createWithPayload(self, payload): return TreePattern(payload) class TreeWizard(object): """ Build and navigate trees with this object. Must know about the names of tokens so you have to pass in a map or array of token names (from which this class can build the map). I.e., Token DECL means nothing unless the class can translate it to a token type. In order to create nodes and navigate, this class needs a TreeAdaptor. This class can build a token type -> node index for repeated use or for iterating over the various nodes with a particular type. This class works in conjunction with the TreeAdaptor rather than moving all this functionality into the adaptor. An adaptor helps build and navigate trees using methods. This class helps you do it with string patterns like "(A B C)". You can create a tree from that pattern or match subtrees against it. """ def __init__(self, adaptor=None, tokenNames=None, typeMap=None): if adaptor is None: self.adaptor = CommonTreeAdaptor() else: self.adaptor = adaptor if typeMap is None: self.tokenNameToTypeMap = computeTokenTypes(tokenNames) else: if tokenNames is not None: raise ValueError("Can't have both tokenNames and typeMap") self.tokenNameToTypeMap = typeMap def getTokenType(self, tokenName): """Using the map of token names to token types, return the type.""" try: return self.tokenNameToTypeMap[tokenName] except KeyError: return INVALID_TOKEN_TYPE def create(self, pattern): """ Create a tree or node from the indicated tree pattern that closely follows ANTLR tree grammar tree element syntax: (root child1 ... child2). You can also just pass in a node: ID Any node can have a text argument: ID[foo] (notice there are no quotes around foo--it's clear it's a string). nil is a special name meaning "give me a nil node". Useful for making lists: (nil A B C) is a list of A B C. """ tokenizer = TreePatternLexer(pattern) parser = TreePatternParser(tokenizer, self, self.adaptor) return parser.pattern() def index(self, tree): """Walk the entire tree and make a node name to nodes mapping. For now, use recursion but later nonrecursive version may be more efficient. Returns a dict int -> list where the list is of your AST node type. The int is the token type of the node. """ m = {} self._index(tree, m) return m def _index(self, t, m): """Do the work for index""" if t is None: return ttype = self.adaptor.getType(t) elements = m.get(ttype) if elements is None: m[ttype] = elements = [] elements.append(t) for i in range(self.adaptor.getChildCount(t)): child = self.adaptor.getChild(t, i) self._index(child, m) def find(self, tree, what): """Return a list of matching token. what may either be an integer specifzing the token type to find or a string with a pattern that must be matched. """ if isinstance(what, (int, long)): return self._findTokenType(tree, what) elif isinstance(what, basestring): return self._findPattern(tree, what) else: raise TypeError("'what' must be string or integer") def _findTokenType(self, t, ttype): """Return a List of tree nodes with token type ttype""" nodes = [] def visitor(tree, parent, childIndex, labels): nodes.append(tree) self.visit(t, ttype, visitor) return nodes def _findPattern(self, t, pattern): """Return a List of subtrees matching pattern.""" subtrees = [] # Create a TreePattern from the pattern tokenizer = TreePatternLexer(pattern) parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) tpattern = parser.pattern() # don't allow invalid patterns if (tpattern is None or tpattern.isNil() or isinstance(tpattern, WildcardTreePattern)): return None rootTokenType = tpattern.getType() def visitor(tree, parent, childIndex, label): if self._parse(tree, tpattern, None): subtrees.append(tree) self.visit(t, rootTokenType, visitor) return subtrees def visit(self, tree, what, visitor): """Visit every node in tree matching what, invoking the visitor. If what is a string, it is parsed as a pattern and only matching subtrees will be visited. The implementation uses the root node of the pattern in combination with visit(t, ttype, visitor) so nil-rooted patterns are not allowed. Patterns with wildcard roots are also not allowed. If what is an integer, it is used as a token type and visit will match all nodes of that type (this is faster than the pattern match). The labels arg of the visitor action method is never set (it's None) since using a token type rather than a pattern doesn't let us set a label. """ if isinstance(what, (int, long)): self._visitType(tree, None, 0, what, visitor) elif isinstance(what, basestring): self._visitPattern(tree, what, visitor) else: raise TypeError("'what' must be string or integer") def _visitType(self, t, parent, childIndex, ttype, visitor): """Do the recursive work for visit""" if t is None: return if self.adaptor.getType(t) == ttype: visitor(t, parent, childIndex, None) for i in range(self.adaptor.getChildCount(t)): child = self.adaptor.getChild(t, i) self._visitType(child, t, i, ttype, visitor) def _visitPattern(self, tree, pattern, visitor): """ For all subtrees that match the pattern, execute the visit action. """ # Create a TreePattern from the pattern tokenizer = TreePatternLexer(pattern) parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) tpattern = parser.pattern() # don't allow invalid patterns if (tpattern is None or tpattern.isNil() or isinstance(tpattern, WildcardTreePattern)): return rootTokenType = tpattern.getType() def rootvisitor(tree, parent, childIndex, labels): labels = {} if self._parse(tree, tpattern, labels): visitor(tree, parent, childIndex, labels) self.visit(tree, rootTokenType, rootvisitor) def parse(self, t, pattern, labels=None): """ Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels on the various nodes and '.' (dot) as the node/subtree wildcard, return true if the pattern matches and fill the labels Map with the labels pointing at the appropriate nodes. Return false if the pattern is malformed or the tree does not match. If a node specifies a text arg in pattern, then that must match for that node in t. """ tokenizer = TreePatternLexer(pattern) parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor()) tpattern = parser.pattern() return self._parse(t, tpattern, labels) def _parse(self, t1, tpattern, labels): """ Do the work for parse. Check to see if the tpattern fits the structure and token types in t1. Check text if the pattern has text arguments on nodes. Fill labels map with pointers to nodes in tree matched against nodes in pattern with labels. """ # make sure both are non-null if t1 is None or tpattern is None: return False # check roots (wildcard matches anything) if not isinstance(tpattern, WildcardTreePattern): if self.adaptor.getType(t1) != tpattern.getType(): return False # if pattern has text, check node text if (tpattern.hasTextArg and self.adaptor.getText(t1) != tpattern.getText()): return False if tpattern.label is not None and labels is not None: # map label in pattern to node in t1 labels[tpattern.label] = t1 # check children n1 = self.adaptor.getChildCount(t1) n2 = tpattern.getChildCount() if n1 != n2: return False for i in range(n1): child1 = self.adaptor.getChild(t1, i) child2 = tpattern.getChild(i) if not self._parse(child1, child2, labels): return False return True def equals(self, t1, t2, adaptor=None): """ Compare t1 and t2; return true if token types/text, structure match exactly. The trees are examined in their entirety so that (A B) does not match (A B C) nor (A (B C)). """ if adaptor is None: adaptor = self.adaptor return self._equals(t1, t2, adaptor) def _equals(self, t1, t2, adaptor): # make sure both are non-null if t1 is None or t2 is None: return False # check roots if adaptor.getType(t1) != adaptor.getType(t2): return False if adaptor.getText(t1) != adaptor.getText(t2): return False # check children n1 = adaptor.getChildCount(t1) n2 = adaptor.getChildCount(t2) if n1 != n2: return False for i in range(n1): child1 = adaptor.getChild(t1, i) child2 = adaptor.getChild(t2, i) if not self._equals(child1, child2, adaptor): return False return True python-antlr3-3.5.2/antlr3/exceptions.py0000644000175000017500000003070312653072152016711 0ustar zigozigo"""ANTLR3 exception hierarchy""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] from antlr3.constants import INVALID_TOKEN_TYPE class BacktrackingFailed(Exception): """@brief Raised to signal failed backtrack attempt""" pass class RecognitionException(Exception): """@brief The root of the ANTLR exception hierarchy. To avoid English-only error messages and to generally make things as flexible as possible, these exceptions are not created with strings, but rather the information necessary to generate an error. Then the various reporting methods in Parser and Lexer can be overridden to generate a localized error message. For example, MismatchedToken exceptions are built with the expected token type. So, don't expect getMessage() to return anything. Note that as of Java 1.4, you can access the stack trace, which means that you can compute the complete trace of rules from the start symbol. This gives you considerable context information with which to generate useful error messages. ANTLR generates code that throws exceptions upon recognition error and also generates code to catch these exceptions in each rule. If you want to quit upon first error, you can turn off the automatic error handling mechanism using rulecatch action, but you still need to override methods mismatch and recoverFromMismatchSet. In general, the recognition exceptions can track where in a grammar a problem occurred and/or what was the expected input. While the parser knows its state (such as current input symbol and line info) that state can change before the exception is reported so current token index is computed and stored at exception time. From this info, you can perhaps print an entire line of input not just a single token, for example. Better to just say the recognizer had a problem and then let the parser figure out a fancy report. """ def __init__(self, input=None): Exception.__init__(self) # What input stream did the error occur in? self.input = None # What is index of token/char were we looking at when the error # occurred? self.index = None # The current Token when an error occurred. Since not all streams # can retrieve the ith Token, we have to track the Token object. # For parsers. Even when it's a tree parser, token might be set. self.token = None # If this is a tree parser exception, node is set to the node with # the problem. self.node = None # The current char when an error occurred. For lexers. self.c = None # Track the line at which the error occurred in case this is # generated from a lexer. We need to track this since the # unexpected char doesn't carry the line info. self.line = None self.charPositionInLine = None # If you are parsing a tree node stream, you will encounter som # imaginary nodes w/o line/col info. We now search backwards looking # for most recent token with line/col info, but notify getErrorHeader() # that info is approximate. self.approximateLineInfo = False if input is not None: self.input = input self.index = input.index() # late import to avoid cyclic dependencies from antlr3.streams import TokenStream, CharStream from antlr3.tree import TreeNodeStream if isinstance(self.input, TokenStream): self.token = self.input.LT(1) self.line = self.token.line self.charPositionInLine = self.token.charPositionInLine if isinstance(self.input, TreeNodeStream): self.extractInformationFromTreeNodeStream(self.input) else: if isinstance(self.input, CharStream): self.c = self.input.LT(1) self.line = self.input.line self.charPositionInLine = self.input.charPositionInLine else: self.c = self.input.LA(1) def extractInformationFromTreeNodeStream(self, nodes): from antlr3.tree import Tree, CommonTree from antlr3.tokens import CommonToken self.node = nodes.LT(1) adaptor = nodes.adaptor payload = adaptor.getToken(self.node) if payload is not None: self.token = payload if payload.line <= 0: # imaginary node; no line/pos info; scan backwards i = -1 priorNode = nodes.LT(i) while priorNode is not None: priorPayload = adaptor.getToken(priorNode) if priorPayload is not None and priorPayload.line > 0: # we found the most recent real line / pos info self.line = priorPayload.line self.charPositionInLine = priorPayload.charPositionInLine self.approximateLineInfo = True break i -= 1 priorNode = nodes.LT(i) else: # node created from real token self.line = payload.line self.charPositionInLine = payload.charPositionInLine elif isinstance(self.node, Tree): self.line = self.node.line self.charPositionInLine = self.node.charPositionInLine if isinstance(self.node, CommonTree): self.token = self.node.token else: type = adaptor.getType(self.node) text = adaptor.getText(self.node) self.token = CommonToken(type=type, text=text) def getUnexpectedType(self): """Return the token type or char of the unexpected input element""" from antlr3.streams import TokenStream from antlr3.tree import TreeNodeStream if isinstance(self.input, TokenStream): return self.token.type elif isinstance(self.input, TreeNodeStream): adaptor = self.input.treeAdaptor return adaptor.getType(self.node) else: return self.c unexpectedType = property(getUnexpectedType) class MismatchedTokenException(RecognitionException): """@brief A mismatched char or Token or tree node.""" def __init__(self, expecting, input): RecognitionException.__init__(self, input) self.expecting = expecting def __str__(self): #return "MismatchedTokenException("+self.expecting+")" return "MismatchedTokenException(%r!=%r)" % ( self.getUnexpectedType(), self.expecting ) __repr__ = __str__ class UnwantedTokenException(MismatchedTokenException): """An extra token while parsing a TokenStream""" def getUnexpectedToken(self): return self.token def __str__(self): exp = ", expected %s" % self.expecting if self.expecting == INVALID_TOKEN_TYPE: exp = "" if self.token is None: return "UnwantedTokenException(found=%s%s)" % (None, exp) return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp) __repr__ = __str__ class MissingTokenException(MismatchedTokenException): """ We were expecting a token but it's not found. The current token is actually what we wanted next. """ def __init__(self, expecting, input, inserted): MismatchedTokenException.__init__(self, expecting, input) self.inserted = inserted def getMissingType(self): return self.expecting def __str__(self): if self.inserted is not None and self.token is not None: return "MissingTokenException(inserted %r at %r)" % ( self.inserted, self.token.text) if self.token is not None: return "MissingTokenException(at %r)" % self.token.text return "MissingTokenException" __repr__ = __str__ class MismatchedRangeException(RecognitionException): """@brief The next token does not match a range of expected types.""" def __init__(self, a, b, input): RecognitionException.__init__(self, input) self.a = a self.b = b def __str__(self): return "MismatchedRangeException(%r not in [%r..%r])" % ( self.getUnexpectedType(), self.a, self.b ) __repr__ = __str__ class MismatchedSetException(RecognitionException): """@brief The next token does not match a set of expected types.""" def __init__(self, expecting, input): RecognitionException.__init__(self, input) self.expecting = expecting def __str__(self): return "MismatchedSetException(%r not in %r)" % ( self.getUnexpectedType(), self.expecting ) __repr__ = __str__ class MismatchedNotSetException(MismatchedSetException): """@brief Used for remote debugger deserialization""" def __str__(self): return "MismatchedNotSetException(%r!=%r)" % ( self.getUnexpectedType(), self.expecting ) __repr__ = __str__ class NoViableAltException(RecognitionException): """@brief Unable to decide which alternative to choose.""" def __init__( self, grammarDecisionDescription, decisionNumber, stateNumber, input ): RecognitionException.__init__(self, input) self.grammarDecisionDescription = grammarDecisionDescription self.decisionNumber = decisionNumber self.stateNumber = stateNumber def __str__(self): return "NoViableAltException(%r!=[%r])" % ( self.unexpectedType, self.grammarDecisionDescription ) __repr__ = __str__ class EarlyExitException(RecognitionException): """@brief The recognizer did not match anything for a (..)+ loop.""" def __init__(self, decisionNumber, input): RecognitionException.__init__(self, input) self.decisionNumber = decisionNumber class FailedPredicateException(RecognitionException): """@brief A semantic predicate failed during validation. Validation of predicates occurs when normally parsing the alternative just like matching a token. Disambiguating predicate evaluation occurs when we hoist a predicate into a prediction decision. """ def __init__(self, input, ruleName, predicateText): RecognitionException.__init__(self, input) self.ruleName = ruleName self.predicateText = predicateText def __str__(self): return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)" __repr__ = __str__ class MismatchedTreeNodeException(RecognitionException): """@brief The next tree mode does not match the expected type.""" def __init__(self, expecting, input): RecognitionException.__init__(self, input) self.expecting = expecting def __str__(self): return "MismatchedTreeNodeException(%r!=%r)" % ( self.getUnexpectedType(), self.expecting ) __repr__ = __str__ python-antlr3-3.5.2/antlr3/constants.py0000644000175000017500000000407712653072152016551 0ustar zigozigo"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] EOF = -1 ## All tokens go to the parser (unless skip() is called in that rule) # on a particular "channel". The parser tunes to a particular channel # so that whitespace etc... can go to the parser on a "hidden" channel. DEFAULT_CHANNEL = 0 ## Anything on different channel than DEFAULT_CHANNEL is not parsed # by parser. HIDDEN_CHANNEL = 99 # Predefined token types EOR_TOKEN_TYPE = 1 ## # imaginary tree navigation type; traverse "get child" link DOWN = 2 ## #imaginary tree navigation type; finish with a child list UP = 3 MIN_TOKEN_TYPE = UP+1 INVALID_TOKEN_TYPE = 0 python-antlr3-3.5.2/antlr3/__init__.py0000644000175000017500000001226012653072152016265 0ustar zigozigo""" @package antlr3 @brief ANTLR3 runtime package This module contains all support classes, which are needed to use recognizers generated by ANTLR3. @mainpage \note Please be warned that the line numbers in the API documentation do not match the real locations in the source code of the package. This is an unintended artifact of doxygen, which I could only convince to use the correct module names by concatenating all files from the package into a single module file... Here is a little overview over the most commonly used classes provided by this runtime: @section recognizers Recognizers These recognizers are baseclasses for the code which is generated by ANTLR3. - BaseRecognizer: Base class with common recognizer functionality. - Lexer: Base class for lexers. - Parser: Base class for parsers. - tree.TreeParser: Base class for %tree parser. @section streams Streams Each recognizer pulls its input from one of the stream classes below. Streams handle stuff like buffering, look-ahead and seeking. A character stream is usually the first element in the pipeline of a typical ANTLR3 application. It is used as the input for a Lexer. - ANTLRStringStream: Reads from a string objects. The input should be a unicode object, or ANTLR3 will have trouble decoding non-ascii data. - ANTLRFileStream: Opens a file and read the contents, with optional character decoding. - ANTLRInputStream: Reads the date from a file-like object, with optional character decoding. A Parser needs a TokenStream as input (which in turn is usually fed by a Lexer): - CommonTokenStream: A basic and most commonly used TokenStream implementation. - TokenRewriteStream: A modification of CommonTokenStream that allows the stream to be altered (by the Parser). See the 'tweak' example for a usecase. And tree.TreeParser finally fetches its input from a tree.TreeNodeStream: - tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream implementation. @section tokenstrees Tokens and Trees A Lexer emits Token objects which are usually buffered by a TokenStream. A Parser can build a Tree, if the output=AST option has been set in the grammar. The runtime provides these Token implementations: - CommonToken: A basic and most commonly used Token implementation. - ClassicToken: A Token object as used in ANTLR 2.x, used to %tree construction. Tree objects are wrapper for Token objects. - tree.CommonTree: A basic and most commonly used Tree implementation. A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the input Token objects. - tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor implementation. @section Exceptions RecognitionException are generated, when a recognizer encounters incorrect or unexpected input. - RecognitionException - MismatchedRangeException - MismatchedSetException - MismatchedNotSetException . - MismatchedTokenException - MismatchedTreeNodeException - NoViableAltException - EarlyExitException - FailedPredicateException . . A tree.RewriteCardinalityException is raised, when the parsers hits a cardinality mismatch during AST construction. Although this is basically a bug in your grammar, it can only be detected at runtime. - tree.RewriteCardinalityException - tree.RewriteEarlyExitException - tree.RewriteEmptyStreamException . . """ # tree.RewriteRuleElementStream # tree.RewriteRuleSubtreeStream # tree.RewriteRuleTokenStream # CharStream # DFA # TokenSource # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __version__ = '3.4' # This runtime is compatible with generated parsers using the following # API versions. 'HEAD' is only used by unittests. compatible_api_versions = ['HEAD', 1] from constants import * from dfa import * from exceptions import * from recognizers import * from streams import * from tokens import * python-antlr3-3.5.2/antlr3/recognizers.py0000644000175000017500000014367712653072152017101 0ustar zigozigo"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] import sys import inspect from antlr3 import compatible_api_versions from antlr3.constants import DEFAULT_CHANNEL, HIDDEN_CHANNEL, EOF, \ EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE from antlr3.exceptions import RecognitionException, MismatchedTokenException, \ MismatchedRangeException, MismatchedTreeNodeException, \ NoViableAltException, EarlyExitException, MismatchedSetException, \ MismatchedNotSetException, FailedPredicateException, \ BacktrackingFailed, UnwantedTokenException, MissingTokenException from antlr3.tokens import CommonToken, SKIP_TOKEN from antlr3.compat import set, frozenset, reversed class RecognizerSharedState(object): """ The set of fields needed by an abstract recognizer to recognize input and recover from errors etc... As a separate state object, it can be shared among multiple grammars; e.g., when one grammar imports another. These fields are publically visible but the actual state pointer per parser is protected. """ def __init__(self): # Track the set of token types that can follow any rule invocation. # Stack grows upwards. self.following = [] # This is true when we see an error and before having successfully # matched a token. Prevents generation of more than one error message # per error. self.errorRecovery = False # The index into the input stream where the last error occurred. # This is used to prevent infinite loops where an error is found # but no token is consumed during recovery...another error is found, # ad naseum. This is a failsafe mechanism to guarantee that at least # one token/tree node is consumed for two errors. self.lastErrorIndex = -1 # If 0, no backtracking is going on. Safe to exec actions etc... # If >0 then it's the level of backtracking. self.backtracking = 0 # An array[size num rules] of Map that tracks # the stop token index for each rule. ruleMemo[ruleIndex] is # the memoization table for ruleIndex. For key ruleStartIndex, you # get back the stop token for associated rule or MEMO_RULE_FAILED. # # This is only used if rule memoization is on (which it is by default). self.ruleMemo = None ## Did the recognizer encounter a syntax error? Track how many. self.syntaxErrors = 0 # LEXER FIELDS (must be in same state object to avoid casting # constantly in generated code and Lexer object) :( ## The goal of all lexer rules/methods is to create a token object. # This is an instance variable as multiple rules may collaborate to # create a single token. nextToken will return this object after # matching lexer rule(s). If you subclass to allow multiple token # emissions, then set this to the last token to be matched or # something nonnull so that the auto token emit mechanism will not # emit another token. self.token = None ## What character index in the stream did the current token start at? # Needed, for example, to get the text for current token. Set at # the start of nextToken. self.tokenStartCharIndex = -1 ## The line on which the first character of the token resides self.tokenStartLine = None ## The character position of first character within the line self.tokenStartCharPositionInLine = None ## The channel number for the current token self.channel = None ## The token type for the current token self.type = None ## You can set the text for the current token to override what is in # the input char buffer. Use setText() or can set this instance var. self.text = None class BaseRecognizer(object): """ @brief Common recognizer functionality. A generic recognizer that can handle recognizers generated from lexer, parser, and tree grammars. This is all the parsing support code essentially; most of it is error recovery stuff and backtracking. """ MEMO_RULE_FAILED = -2 MEMO_RULE_UNKNOWN = -1 # copies from Token object for convenience in actions DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL # for convenience in actions HIDDEN = HIDDEN_CHANNEL # overridden by generated subclasses tokenNames = None # The api_version attribute has been introduced in 3.3. If it is not # overwritten in the generated recognizer, we assume a default of v0. api_version = 0 def __init__(self, state=None): # Input stream of the recognizer. Must be initialized by a subclass. self.input = None ## State of a lexer, parser, or tree parser are collected into a state # object so the state can be shared. This sharing is needed to # have one grammar import others and share same error variables # and other state variables. It's a kind of explicit multiple # inheritance via delegation of methods and shared state. if state is None: state = RecognizerSharedState() self._state = state if self.api_version not in compatible_api_versions: raise RuntimeError( ("ANTLR version mismatch: " "The recognizer has been generated with API V%s, " "but this runtime does not support this.") % self.api_version) # this one only exists to shut up pylint :( def setInput(self, input): self.input = input def reset(self): """ reset the parser's state; subclasses must rewinds the input stream """ # wack everything related to error recovery if self._state is None: # no shared state work to do return self._state.following = [] self._state.errorRecovery = False self._state.lastErrorIndex = -1 self._state.syntaxErrors = 0 # wack everything related to backtracking and memoization self._state.backtracking = 0 if self._state.ruleMemo is not None: self._state.ruleMemo = {} def match(self, input, ttype, follow): """ Match current input symbol against ttype. Attempt single token insertion or deletion error recovery. If that fails, throw MismatchedTokenException. To turn off single token insertion or deletion error recovery, override recoverFromMismatchedToken() and have it throw an exception. See TreeParser.recoverFromMismatchedToken(). This way any error in a rule will cause an exception and immediate exit from rule. Rule would recover by resynchronizing to the set of symbols that can follow rule ref. """ matchedSymbol = self.getCurrentInputSymbol(input) if self.input.LA(1) == ttype: self.input.consume() self._state.errorRecovery = False return matchedSymbol if self._state.backtracking > 0: # FIXME: need to return matchedSymbol here as well. damn!! raise BacktrackingFailed matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow) return matchedSymbol def matchAny(self, input): """Match the wildcard: in a symbol""" self._state.errorRecovery = False self.input.consume() def mismatchIsUnwantedToken(self, input, ttype): return input.LA(2) == ttype def mismatchIsMissingToken(self, input, follow): if follow is None: # we have no information about the follow; we can only consume # a single token and hope for the best return False # compute what can follow this grammar element reference if EOR_TOKEN_TYPE in follow: viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW() follow = follow | viableTokensFollowingThisRule if len(self._state.following) > 0: # remove EOR if we're not the start symbol follow = follow - set([EOR_TOKEN_TYPE]) # if current token is consistent with what could come after set # then we know we're missing a token; error recovery is free to # "insert" the missing token if input.LA(1) in follow or EOR_TOKEN_TYPE in follow: return True return False def reportError(self, e): """Report a recognition problem. This method sets errorRecovery to indicate the parser is recovering not parsing. Once in recovery mode, no errors are generated. To get out of recovery mode, the parser must successfully match a token (after a resync). So it will go: 1. error occurs 2. enter recovery mode, report error 3. consume until token found in resynch set 4. try to resume parsing 5. next match() will reset errorRecovery mode If you override, make sure to update syntaxErrors if you care about that. """ # if we've already reported an error and have not matched a token # yet successfully, don't report any errors. if self._state.errorRecovery: return self._state.syntaxErrors += 1 # don't count spurious self._state.errorRecovery = True self.displayRecognitionError(self.tokenNames, e) def displayRecognitionError(self, tokenNames, e): hdr = self.getErrorHeader(e) msg = self.getErrorMessage(e, tokenNames) self.emitErrorMessage(hdr+" "+msg) def getErrorMessage(self, e, tokenNames): """ What error message should be generated for the various exception types? Not very object-oriented code, but I like having all error message generation within one method rather than spread among all of the exception classes. This also makes it much easier for the exception handling because the exception classes do not have to have pointers back to this object to access utility routines and so on. Also, changing the message for an exception type would be difficult because you would have to subclassing exception, but then somehow get ANTLR to make those kinds of exception objects instead of the default. This looks weird, but trust me--it makes the most sense in terms of flexibility. For grammar debugging, you will want to override this to add more information such as the stack frame with getRuleInvocationStack(e, this.getClass().getName()) and, for no viable alts, the decision description and state etc... Override this to change the message generated for one or more exception types. """ if isinstance(e, UnwantedTokenException): tokenName = "" if e.expecting == EOF: tokenName = "EOF" else: tokenName = self.tokenNames[e.expecting] msg = "extraneous input %s expecting %s" % ( self.getTokenErrorDisplay(e.getUnexpectedToken()), tokenName ) elif isinstance(e, MissingTokenException): tokenName = "" if e.expecting == EOF: tokenName = "EOF" else: tokenName = self.tokenNames[e.expecting] msg = "missing %s at %s" % ( tokenName, self.getTokenErrorDisplay(e.token) ) elif isinstance(e, MismatchedTokenException): tokenName = "" if e.expecting == EOF: tokenName = "EOF" else: tokenName = self.tokenNames[e.expecting] msg = "mismatched input " \ + self.getTokenErrorDisplay(e.token) \ + " expecting " \ + tokenName elif isinstance(e, MismatchedTreeNodeException): tokenName = "" if e.expecting == EOF: tokenName = "EOF" else: tokenName = self.tokenNames[e.expecting] msg = "mismatched tree node: %s expecting %s" \ % (e.node, tokenName) elif isinstance(e, NoViableAltException): msg = "no viable alternative at input " \ + self.getTokenErrorDisplay(e.token) elif isinstance(e, EarlyExitException): msg = "required (...)+ loop did not match anything at input " \ + self.getTokenErrorDisplay(e.token) elif isinstance(e, MismatchedSetException): msg = "mismatched input " \ + self.getTokenErrorDisplay(e.token) \ + " expecting set " \ + repr(e.expecting) elif isinstance(e, MismatchedNotSetException): msg = "mismatched input " \ + self.getTokenErrorDisplay(e.token) \ + " expecting set " \ + repr(e.expecting) elif isinstance(e, FailedPredicateException): msg = "rule " \ + e.ruleName \ + " failed predicate: {" \ + e.predicateText \ + "}?" else: msg = str(e) return msg def getNumberOfSyntaxErrors(self): """ Get number of recognition errors (lexer, parser, tree parser). Each recognizer tracks its own number. So parser and lexer each have separate count. Does not count the spurious errors found between an error and next valid token match See also reportError() """ return self._state.syntaxErrors def getErrorHeader(self, e): """ What is the error header, normally line/character position information? """ source_name = self.getSourceName() if source_name is not None: return "%s line %d:%d" % (source_name, e.line, e.charPositionInLine) return "line %d:%d" % (e.line, e.charPositionInLine) def getTokenErrorDisplay(self, t): """ How should a token be displayed in an error message? The default is to display just the text, but during development you might want to have a lot of information spit out. Override in that case to use t.toString() (which, for CommonToken, dumps everything about the token). This is better than forcing you to override a method in your token objects because you don't have to go modify your lexer so that it creates a new Java type. """ s = t.text if s is None: if t.type == EOF: s = "" else: s = "<"+t.type+">" return repr(s) def emitErrorMessage(self, msg): """Override this method to change where error messages go""" sys.stderr.write(msg + '\n') def recover(self, input, re): """ Recover from an error found on the input stream. This is for NoViableAlt and mismatched symbol exceptions. If you enable single token insertion and deletion, this will usually not handle mismatched symbol exceptions but there could be a mismatched token that the match() routine could not recover from. """ # PROBLEM? what if input stream is not the same as last time # perhaps make lastErrorIndex a member of input if self._state.lastErrorIndex == input.index(): # uh oh, another error at same token index; must be a case # where LT(1) is in the recovery token set so nothing is # consumed; consume a single token so at least to prevent # an infinite loop; this is a failsafe. input.consume() self._state.lastErrorIndex = input.index() followSet = self.computeErrorRecoverySet() self.beginResync() self.consumeUntil(input, followSet) self.endResync() def beginResync(self): """ A hook to listen in on the token consumption during error recovery. The DebugParser subclasses this to fire events to the listenter. """ pass def endResync(self): """ A hook to listen in on the token consumption during error recovery. The DebugParser subclasses this to fire events to the listenter. """ pass def computeErrorRecoverySet(self): """ Compute the error recovery set for the current rule. During rule invocation, the parser pushes the set of tokens that can follow that rule reference on the stack; this amounts to computing FIRST of what follows the rule reference in the enclosing rule. This local follow set only includes tokens from within the rule; i.e., the FIRST computation done by ANTLR stops at the end of a rule. EXAMPLE When you find a "no viable alt exception", the input is not consistent with any of the alternatives for rule r. The best thing to do is to consume tokens until you see something that can legally follow a call to r *or* any rule that called r. You don't want the exact set of viable next tokens because the input might just be missing a token--you might consume the rest of the input looking for one of the missing tokens. Consider grammar: a : '[' b ']' | '(' b ')' ; b : c '^' INT ; c : ID | INT ; At each rule invocation, the set of tokens that could follow that rule is pushed on a stack. Here are the various "local" follow sets: FOLLOW(b1_in_a) = FIRST(']') = ']' FOLLOW(b2_in_a) = FIRST(')') = ')' FOLLOW(c_in_b) = FIRST('^') = '^' Upon erroneous input "[]", the call chain is a -> b -> c and, hence, the follow context stack is: depth local follow set after call to rule 0 \ a (from main()) 1 ']' b 3 '^' c Notice that ')' is not included, because b would have to have been called from a different context in rule a for ')' to be included. For error recovery, we cannot consider FOLLOW(c) (context-sensitive or otherwise). We need the combined set of all context-sensitive FOLLOW sets--the set of all tokens that could follow any reference in the call chain. We need to resync to one of those tokens. Note that FOLLOW(c)='^' and if we resync'd to that token, we'd consume until EOF. We need to sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. In this case, for input "[]", LA(1) is in this set so we would not consume anything and after printing an error rule c would return normally. It would not find the required '^' though. At this point, it gets a mismatched token error and throws an exception (since LA(1) is not in the viable following token set). The rule exception handler tries to recover, but finds the same recovery set and doesn't consume anything. Rule b exits normally returning to rule a. Now it finds the ']' (and with the successful match exits errorRecovery mode). So, you cna see that the parser walks up call chain looking for the token that was a member of the recovery set. Errors are not generated in errorRecovery mode. ANTLR's error recovery mechanism is based upon original ideas: "Algorithms + Data Structures = Programs" by Niklaus Wirth and "A note on error recovery in recursive descent parsers": http://portal.acm.org/citation.cfm?id=947902.947905 Later, Josef Grosch had some good ideas: "Efficient and Comfortable Error Recovery in Recursive Descent Parsers": ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip Like Grosch I implemented local FOLLOW sets that are combined at run-time upon error to avoid overhead during parsing. """ return self.combineFollows(False) def computeContextSensitiveRuleFOLLOW(self): """ Compute the context-sensitive FOLLOW set for current rule. This is set of token types that can follow a specific rule reference given a specific call chain. You get the set of viable tokens that can possibly come next (lookahead depth 1) given the current call chain. Contrast this with the definition of plain FOLLOW for rule r: FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)} where x in T* and alpha, beta in V*; T is set of terminals and V is the set of terminals and nonterminals. In other words, FOLLOW(r) is the set of all tokens that can possibly follow references to r in *any* sentential form (context). At runtime, however, we know precisely which context applies as we have the call chain. We may compute the exact (rather than covering superset) set of following tokens. For example, consider grammar: stat : ID '=' expr ';' // FOLLOW(stat)=={EOF} | "return" expr '.' ; expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'} atom : INT // FOLLOW(atom)=={'+',')',';','.'} | '(' expr ')' ; The FOLLOW sets are all inclusive whereas context-sensitive FOLLOW sets are precisely what could follow a rule reference. For input input "i=(3);", here is the derivation: stat => ID '=' expr ';' => ID '=' atom ('+' atom)* ';' => ID '=' '(' expr ')' ('+' atom)* ';' => ID '=' '(' atom ')' ('+' atom)* ';' => ID '=' '(' INT ')' ('+' atom)* ';' => ID '=' '(' INT ')' ';' At the "3" token, you'd have a call chain of stat -> expr -> atom -> expr -> atom What can follow that specific nested ref to atom? Exactly ')' as you can see by looking at the derivation of this specific input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}. You want the exact viable token set when recovering from a token mismatch. Upon token mismatch, if LA(1) is member of the viable next token set, then you know there is most likely a missing token in the input stream. "Insert" one by just not throwing an exception. """ return self.combineFollows(True) def combineFollows(self, exact): followSet = set() for idx, localFollowSet in reversed(list(enumerate(self._state.following))): followSet |= localFollowSet if exact: # can we see end of rule? if EOR_TOKEN_TYPE in localFollowSet: # Only leave EOR in set if at top (start rule); this lets # us know if have to include follow(start rule); i.e., EOF if idx > 0: followSet.remove(EOR_TOKEN_TYPE) else: # can't see end of rule, quit break return followSet def recoverFromMismatchedToken(self, input, ttype, follow): """Attempt to recover from a single missing or extra token. EXTRA TOKEN LA(1) is not what we are looking for. If LA(2) has the right token, however, then assume LA(1) is some extra spurious token. Delete it and LA(2) as if we were doing a normal match(), which advances the input. MISSING TOKEN If current token is consistent with what could come after ttype then it is ok to 'insert' the missing token, else throw exception For example, Input 'i=(3;' is clearly missing the ')'. When the parser returns from the nested call to expr, it will have call chain: stat -> expr -> atom and it will be trying to match the ')' at this point in the derivation: => ID '=' '(' INT ')' ('+' atom)* ';' ^ match() will see that ';' doesn't match ')' and report a mismatched token error. To recover, it sees that LA(1)==';' is in the set of tokens that can follow the ')' token reference in rule atom. It can assume that you forgot the ')'. """ e = None # if next token is what we are looking for then "delete" this token if self.mismatchIsUnwantedToken(input, ttype): e = UnwantedTokenException(ttype, input) self.beginResync() input.consume() # simply delete extra token self.endResync() # report after consuming so AW sees the token in the exception self.reportError(e) # we want to return the token we're actually matching matchedSymbol = self.getCurrentInputSymbol(input) # move past ttype token as if all were ok input.consume() return matchedSymbol # can't recover with single token deletion, try insertion if self.mismatchIsMissingToken(input, follow): inserted = self.getMissingSymbol(input, e, ttype, follow) e = MissingTokenException(ttype, input, inserted) # report after inserting so AW sees the token in the exception self.reportError(e) return inserted # even that didn't work; must throw the exception e = MismatchedTokenException(ttype, input) raise e def recoverFromMismatchedSet(self, input, e, follow): """Not currently used""" if self.mismatchIsMissingToken(input, follow): self.reportError(e) # we don't know how to conjure up a token for sets yet return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow) # TODO do single token deletion like above for Token mismatch raise e def getCurrentInputSymbol(self, input): """ Match needs to return the current input symbol, which gets put into the label for the associated token ref; e.g., x=ID. Token and tree parsers need to return different objects. Rather than test for input stream type or change the IntStream interface, I use a simple method to ask the recognizer to tell me what the current input symbol is. This is ignored for lexers. """ return None def getMissingSymbol(self, input, e, expectedTokenType, follow): """Conjure up a missing token during error recovery. The recognizer attempts to recover from single missing symbols. But, actions might refer to that missing symbol. For example, x=ID {f($x);}. The action clearly assumes that there has been an identifier matched previously and that $x points at that token. If that token is missing, but the next token in the stream is what we want we assume that this token is missing and we keep going. Because we have to return some token to replace the missing token, we have to conjure one up. This method gives the user control over the tokens returned for missing tokens. Mostly, you will want to create something special for identifier tokens. For literals such as '{' and ',', the default action in the parser or tree parser works. It simply creates a CommonToken of the appropriate type. The text will be the token. If you change what tokens must be created by the lexer, override this method to create the appropriate tokens. """ return None ## def recoverFromMissingElement(self, input, e, follow): ## """ ## This code is factored out from mismatched token and mismatched set ## recovery. It handles "single token insertion" error recovery for ## both. No tokens are consumed to recover from insertions. Return ## true if recovery was possible else return false. ## """ ## if self.mismatchIsMissingToken(input, follow): ## self.reportError(e) ## return True ## # nothing to do; throw exception ## return False def consumeUntil(self, input, tokenTypes): """ Consume tokens until one matches the given token or token set tokenTypes can be a single token type or a set of token types """ if not isinstance(tokenTypes, (set, frozenset)): tokenTypes = frozenset([tokenTypes]) ttype = input.LA(1) while ttype != EOF and ttype not in tokenTypes: input.consume() ttype = input.LA(1) def getRuleInvocationStack(self): """ Return List of the rules in your parser instance leading up to a call to this method. You could override if you want more details such as the file/line info of where in the parser java code a rule is invoked. This is very useful for error messages and for context-sensitive error recovery. You must be careful, if you subclass a generated recognizers. The default implementation will only search the module of self for rules, but the subclass will not contain any rules. You probably want to override this method to look like def getRuleInvocationStack(self): return self._getRuleInvocationStack(.__module__) where is the class of the generated recognizer, e.g. the superclass of self. """ return self._getRuleInvocationStack(self.__module__) def _getRuleInvocationStack(cls, module): """ A more general version of getRuleInvocationStack where you can pass in, for example, a RecognitionException to get it's rule stack trace. This routine is shared with all recognizers, hence, static. TODO: move to a utility class or something; weird having lexer call this """ # mmmhhh,... perhaps look at the first argument # (f_locals[co_varnames[0]]?) and test if it's a (sub)class of # requested recognizer... rules = [] for frame in reversed(inspect.stack()): code = frame[0].f_code codeMod = inspect.getmodule(code) if codeMod is None: continue # skip frames not in requested module if codeMod.__name__ != module: continue # skip some unwanted names if code.co_name in ('nextToken', ''): continue rules.append(code.co_name) return rules _getRuleInvocationStack = classmethod(_getRuleInvocationStack) def getBacktrackingLevel(self): return self._state.backtracking def setBacktrackingLevel(self, n): self._state.backtracking = n def getGrammarFileName(self): """For debugging and other purposes, might want the grammar name. Have ANTLR generate an implementation for this method. """ return self.grammarFileName def getSourceName(self): raise NotImplementedError def toStrings(self, tokens): """A convenience method for use most often with template rewrites. Convert a List to List """ if tokens is None: return None return [token.text for token in tokens] def getRuleMemoization(self, ruleIndex, ruleStartIndex): """ Given a rule number and a start token index number, return MEMO_RULE_UNKNOWN if the rule has not parsed input starting from start index. If this rule has parsed input starting from the start index before, then return where the rule stopped parsing. It returns the index of the last token matched by the rule. """ if ruleIndex not in self._state.ruleMemo: self._state.ruleMemo[ruleIndex] = {} return self._state.ruleMemo[ruleIndex].get( ruleStartIndex, self.MEMO_RULE_UNKNOWN ) def alreadyParsedRule(self, input, ruleIndex): """ Has this rule already parsed input at the current index in the input stream? Return the stop token index or MEMO_RULE_UNKNOWN. If we attempted but failed to parse properly before, return MEMO_RULE_FAILED. This method has a side-effect: if we have seen this input for this rule and successfully parsed before, then seek ahead to 1 past the stop token matched for this rule last time. """ stopIndex = self.getRuleMemoization(ruleIndex, input.index()) if stopIndex == self.MEMO_RULE_UNKNOWN: return False if stopIndex == self.MEMO_RULE_FAILED: raise BacktrackingFailed else: input.seek(stopIndex + 1) return True def memoize(self, input, ruleIndex, ruleStartIndex, success): """ Record whether or not this rule parsed the input at this position successfully. """ if success: stopTokenIndex = input.index() - 1 else: stopTokenIndex = self.MEMO_RULE_FAILED if ruleIndex in self._state.ruleMemo: self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex def traceIn(self, ruleName, ruleIndex, inputSymbol): sys.stdout.write("enter %s %s" % (ruleName, inputSymbol)) if self._state.backtracking > 0: sys.stdout.write(" backtracking=%s" % self._state.backtracking) sys.stdout.write('\n') def traceOut(self, ruleName, ruleIndex, inputSymbol): sys.stdout.write("exit %s %s" % (ruleName, inputSymbol)) if self._state.backtracking > 0: sys.stdout.write(" backtracking=%s" % self._state.backtracking) # mmmm... we use BacktrackingFailed exceptions now. So how could we # get that information here? #if self._state.failed: # sys.stdout.write(" failed") #else: # sys.stdout.write(" succeeded") sys.stdout.write('\n') class TokenSource(object): """ @brief Abstract baseclass for token producers. A source of tokens must provide a sequence of tokens via nextToken() and also must reveal it's source of characters; CommonToken's text is computed from a CharStream; it only store indices into the char stream. Errors from the lexer are never passed to the parser. Either you want to keep going or you do not upon token recognition error. If you do not want to continue lexing then you do not want to continue parsing. Just throw an exception not under RecognitionException and Java will naturally toss you all the way out of the recognizers. If you want to continue lexing then you should not throw an exception to the parser--it has already requested a token. Keep lexing until you get a valid one. Just report errors and keep going, looking for a valid token. """ def nextToken(self): """Return a Token object from your input stream (usually a CharStream). Do not fail/return upon lexing error; keep chewing on the characters until you get a good one; errors are not passed through to the parser. """ raise NotImplementedError def __iter__(self): """The TokenSource is an interator. The iteration will not include the final EOF token, see also the note for the next() method. """ return self def next(self): """Return next token or raise StopIteration. Note that this will raise StopIteration when hitting the EOF token, so EOF will not be part of the iteration. """ token = self.nextToken() if token is None or token.type == EOF: raise StopIteration return token class Lexer(BaseRecognizer, TokenSource): """ @brief Baseclass for generated lexer classes. A lexer is recognizer that draws input symbols from a character stream. lexer grammars result in a subclass of this object. A Lexer object uses simplified match() and error recovery mechanisms in the interest of speed. """ def __init__(self, input, state=None): BaseRecognizer.__init__(self, state) TokenSource.__init__(self) # Where is the lexer drawing characters from? self.input = input def reset(self): BaseRecognizer.reset(self) # reset all recognizer state variables if self.input is not None: # rewind the input self.input.seek(0) if self._state is None: # no shared state work to do return # wack Lexer state variables self._state.token = None self._state.type = INVALID_TOKEN_TYPE self._state.channel = DEFAULT_CHANNEL self._state.tokenStartCharIndex = -1 self._state.tokenStartLine = -1 self._state.tokenStartCharPositionInLine = -1 self._state.text = None def makeEOFToken(self): eof = CommonToken( type=EOF, channel=DEFAULT_CHANNEL, input=self.input, start=self.input.index(), stop=self.input.index()) eof.line = self.input.line eof.charPositionInLine = self.input.charPositionInLine return eof def nextToken(self): """ Return a token from this source; i.e., match a token on the char stream. """ while 1: self._state.token = None self._state.channel = DEFAULT_CHANNEL self._state.tokenStartCharIndex = self.input.index() self._state.tokenStartCharPositionInLine = self.input.charPositionInLine self._state.tokenStartLine = self.input.line self._state.text = None if self.input.LA(1) == EOF: return self.makeEOFToken() try: self.mTokens() if self._state.token is None: self.emit() elif self._state.token == SKIP_TOKEN: continue return self._state.token except NoViableAltException, re: self.reportError(re) self.recover(re) # throw out current char and try again except RecognitionException, re: self.reportError(re) # match() routine has already called recover() def skip(self): """ Instruct the lexer to skip creating a token for current lexer rule and look for another token. nextToken() knows to keep looking when a lexer rule finishes with token set to SKIP_TOKEN. Recall that if token==null at end of any token rule, it creates one for you and emits it. """ self._state.token = SKIP_TOKEN def mTokens(self): """This is the lexer entry point that sets instance var 'token'""" # abstract method raise NotImplementedError def setCharStream(self, input): """Set the char stream and reset the lexer""" self.input = None self.reset() self.input = input def getSourceName(self): return self.input.getSourceName() def emit(self, token=None): """ The standard method called to automatically emit a token at the outermost lexical rule. The token object should point into the char buffer start..stop. If there is a text override in 'text', use that to set the token's text. Override this method to emit custom Token objects. If you are building trees, then you should also override Parser or TreeParser.getMissingSymbol(). """ if token is None: token = CommonToken( input=self.input, type=self._state.type, channel=self._state.channel, start=self._state.tokenStartCharIndex, stop=self.getCharIndex()-1 ) token.line = self._state.tokenStartLine token.text = self._state.text token.charPositionInLine = self._state.tokenStartCharPositionInLine self._state.token = token return token def match(self, s): if isinstance(s, basestring): for c in s: if self.input.LA(1) != ord(c): if self._state.backtracking > 0: raise BacktrackingFailed mte = MismatchedTokenException(c, self.input) self.recover(mte) raise mte self.input.consume() else: if self.input.LA(1) != s: if self._state.backtracking > 0: raise BacktrackingFailed mte = MismatchedTokenException(unichr(s), self.input) self.recover(mte) # don't really recover; just consume in lexer raise mte self.input.consume() def matchAny(self): self.input.consume() def matchRange(self, a, b): if self.input.LA(1) < a or self.input.LA(1) > b: if self._state.backtracking > 0: raise BacktrackingFailed mre = MismatchedRangeException(unichr(a), unichr(b), self.input) self.recover(mre) raise mre self.input.consume() def getLine(self): return self.input.line def getCharPositionInLine(self): return self.input.charPositionInLine def getCharIndex(self): """What is the index of the current character of lookahead?""" return self.input.index() def getText(self): """ Return the text matched so far for the current token or any text override. """ if self._state.text is not None: return self._state.text return self.input.substring( self._state.tokenStartCharIndex, self.getCharIndex()-1 ) def setText(self, text): """ Set the complete text of this token; it wipes any previous changes to the text. """ self._state.text = text text = property(getText, setText) def reportError(self, e): ## TODO: not thought about recovery in lexer yet. ## # if we've already reported an error and have not matched a token ## # yet successfully, don't report any errors. ## if self.errorRecovery: ## #System.err.print("[SPURIOUS] "); ## return; ## ## self.errorRecovery = True self.displayRecognitionError(self.tokenNames, e) def getErrorMessage(self, e, tokenNames): msg = None if isinstance(e, MismatchedTokenException): msg = "mismatched character " \ + self.getCharErrorDisplay(e.c) \ + " expecting " \ + self.getCharErrorDisplay(e.expecting) elif isinstance(e, NoViableAltException): msg = "no viable alternative at character " \ + self.getCharErrorDisplay(e.c) elif isinstance(e, EarlyExitException): msg = "required (...)+ loop did not match anything at character " \ + self.getCharErrorDisplay(e.c) elif isinstance(e, MismatchedNotSetException): msg = "mismatched character " \ + self.getCharErrorDisplay(e.c) \ + " expecting set " \ + repr(e.expecting) elif isinstance(e, MismatchedSetException): msg = "mismatched character " \ + self.getCharErrorDisplay(e.c) \ + " expecting set " \ + repr(e.expecting) elif isinstance(e, MismatchedRangeException): msg = "mismatched character " \ + self.getCharErrorDisplay(e.c) \ + " expecting set " \ + self.getCharErrorDisplay(e.a) \ + ".." \ + self.getCharErrorDisplay(e.b) else: msg = BaseRecognizer.getErrorMessage(self, e, tokenNames) return msg def getCharErrorDisplay(self, c): if c == EOF: c = '' return repr(c) def recover(self, re): """ Lexers can normally match any char in it's vocabulary after matching a token, so do the easy thing and just kill a character and hope it all works out. You can instead use the rule invocation stack to do sophisticated error recovery if you are in a fragment rule. """ self.input.consume() def traceIn(self, ruleName, ruleIndex): inputSymbol = "%s line=%d:%s" % (self.input.LT(1), self.getLine(), self.getCharPositionInLine() ) BaseRecognizer.traceIn(self, ruleName, ruleIndex, inputSymbol) def traceOut(self, ruleName, ruleIndex): inputSymbol = "%s line=%d:%s" % (self.input.LT(1), self.getLine(), self.getCharPositionInLine() ) BaseRecognizer.traceOut(self, ruleName, ruleIndex, inputSymbol) class Parser(BaseRecognizer): """ @brief Baseclass for generated parser classes. """ def __init__(self, lexer, state=None): BaseRecognizer.__init__(self, state) self.input = lexer def reset(self): BaseRecognizer.reset(self) # reset all recognizer state variables if self.input is not None: self.input.seek(0) # rewind the input def getCurrentInputSymbol(self, input): return input.LT(1) def getMissingSymbol(self, input, e, expectedTokenType, follow): if expectedTokenType == EOF: tokenText = "" else: tokenText = "" t = CommonToken(type=expectedTokenType, text=tokenText) current = input.LT(1) if current.type == EOF: current = input.LT(-1) if current is not None: t.line = current.line t.charPositionInLine = current.charPositionInLine t.channel = DEFAULT_CHANNEL return t def setTokenStream(self, input): """Set the token stream and reset the parser""" self.input = None self.reset() self.input = input def getTokenStream(self): return self.input def getSourceName(self): return self.input.getSourceName() def traceIn(self, ruleName, ruleIndex): BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1)) def traceOut(self, ruleName, ruleIndex): BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1)) class RuleReturnScope(object): """ Rules can return start/stop info as well as possible trees and templates. """ def getStart(self): """Return the start token or tree.""" return None def getStop(self): """Return the stop token or tree.""" return None def getTree(self): """Has a value potentially if output=AST.""" return None def getTemplate(self): """Has a value potentially if output=template.""" return None class ParserRuleReturnScope(RuleReturnScope): """ Rules that return more than a single value must return an object containing all the values. Besides the properties defined in RuleLabelScope.predefinedRulePropertiesScope there may be user-defined return values. This class simply defines the minimum properties that are always defined and methods to access the others that might be available depending on output option such as template and tree. Note text is not an actual property of the return value, it is computed from start and stop using the input stream's toString() method. I could add a ctor to this so that we can pass in and store the input stream, but I'm not sure we want to do that. It would seem to be undefined to get the .text property anyway if the rule matches tokens from multiple input streams. I do not use getters for fields of objects that are used simply to group values such as this aggregate. The getters/setters are there to satisfy the superclass interface. """ def __init__(self): self.start = None self.stop = None self.tree = None # only used when output=AST def getStart(self): return self.start def getStop(self): return self.stop def getTree(self): return self.tree python-antlr3-3.5.2/ChangeLog0000644000175000017500000000241312653072152014522 0ustar zigozigo2007-11-03 Benjamin Niemann * PythonTarget.java, dfa.py, exceptions.py, recognizer.py, streams.py: ANTLRStringStream.LA() now returns the character's ordinal and generated lexers operate on integers. Also made various performance tunings. 2007-10-07 Benjamin Niemann * main.py, Python.stg (outputFile): Added simple __main__ section to generated code, so (simple) grammars can be executed as standalone script. * tree.py (RecognitionException.extractInformationFromTreeNodeStream), exceptions.py (CommonTree): Small bugfixes. 2007-09-30 Benjamin Niemann * recognizers.py (TokenSource): Added iterator interface to TokenSource class - and thus to Lexer. 2007-06-27 Benjamin Niemann * Python.stg (genericParser, parser, treeParser): Use correct @init action block for tree parsers. 2007-05-24 Benjamin Niemann * Python.stg (rule): Added support for @decorate {...} action for parser rules to add decorators to the rule method. 2007-05-18 Benjamin Niemann * Python.stg (isolatedLookaheadRangeTest, lookaheadRangeTest): Minor improvement of generated code (use ' <= <= ' instead of ' >= and <= '). python-antlr3-3.5.2/TODO0000644000175000017500000000566012653072152013447 0ustar zigozigo- new test from CL4832 - CS4531 - testcases for error nodes - did I miss a change to Python.stg/returnScope? - there are base classes Tree-/ParserRuleReturnScope - update old and add new examples - need protections in scopeAttributeRef? CL4426 - testcase for $ID.int CL4413 - need to override Target.encodeIntAsCharEscape? CL4389 - look into buildbot - link in report mails is broken - timezone bug in p4 scraper - core: - only look at changes in src/ & runtime/Java - quick - incremential build - sanity check - full - depend on quick - full build - ant test - targets - depend on changes on src/ and runtime/XXX - depend on successful core/quick build - nightlybuild - depend on core/full - somehow check which targets are ok - TreeWizard: - raise exception on parse errors - document it in wiki - publish runtime on cheeseshop - better documentation for output=template w/ full examples - antlr3.main: - verbose/quiet flag: show/hide warnings - set options in grammar? - write optionparser descriptions - better output for return objects - st support - custom grammar options per target - make Grammar.legalOptions changeable - first extract language option, load target class - pass options not known by Grammar to target - patch for CS4010 "null check for $scope::var now" once action parser is fixed - rename @members/@init to @classmembers, @instancemembers? - gunit? - testcases error handling in tree parsers - better test coverage for runtime modules - documentation - more documentation in docstrings - tune doxygen output - doxygen frontpage - do not use Set* templates for properties for Python target - gate with sempred {target.usePropertySetTemplates()}? - special template for empty alternative -> pass - complete runtime - tree.DoubleLinkTree - tree.ParseTree - tree.UnBufferedTreeNodeStream - default values in rule arguments? - turn some methods into attributes - (String|CommonToken)Stream.index() - (String|CommonToken)Stream.size() --> __len__ - get rid of getter/setter in generated code - document differences to java API - add methods to emulate java API, but mark 'em as deprecated - using Stream.index as a state for 'error-already-reported' or memoization will be a problem when the stream is not a linear buffer - optimizations which should be explored: - DFA: perhaps zip() the lists into a tuple (eot, eof, min, max, accept, special, transition) for each state. checkout potential performance gain. - StingStream: look into optimizing LA(). Perhaps use LAk instead of LA(k) and create the attributes when needed. - Perform some magic to improve dfaStateSwitch. - in lexer rules: LA == u'a' or LA == u'b' or LA == u'c'... -> LA in (u'a', u'b', u'c', ...) or "LA in self.set_xyz" with set_xyz as a class member - tweak CodeGenerator.genSetExpr() - make BaseTree.nil() an attribute? or singleton? - psycho?? - ... python-antlr3-3.5.2/ez_setup.py0000644000175000017500000002054212653072315015164 0ustar zigozigo#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import sys DEFAULT_VERSION = "0.6c5" DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', } import sys, os def _validate_md5(egg_name, data): if egg_name in md5_data: from md5 import md5 digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ try: import setuptools if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) except ImportError: egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg import pkg_resources try: pkg_resources.require("setuptools>="+version) except pkg_resources.VersionConflict, e: # XXX could we install in a subprocess here? print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first.\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': # tell the user to uninstall obsolete version use_setuptools(version) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re from md5 import md5 for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:]) python-antlr3-3.5.2/unittests/0000755000175000017500000000000012653072152015012 5ustar zigozigopython-antlr3-3.5.2/unittests/testtree.py0000644000175000017500000013042012653072152017223 0ustar zigozigo# -*- coding: utf-8 -*- import os import unittest from StringIO import StringIO from antlr3.tree import (CommonTreeNodeStream, CommonTree, CommonTreeAdaptor, TreeParser, TreeVisitor, TreeIterator) from antlr3 import CommonToken, UP, DOWN, EOF from antlr3.treewizard import TreeWizard class TestTreeNodeStream(unittest.TestCase): """Test case for the TreeNodeStream class.""" def setUp(self): self.adaptor = CommonTreeAdaptor() def newStream(self, t): """Build new stream; let's us override to test other streams.""" return CommonTreeNodeStream(t) def testSingleNode(self): t = CommonTree(CommonToken(101)) stream = self.newStream(t) expecting = "101" found = self.toNodesOnlyString(stream) self.failUnlessEqual(expecting, found) expecting = "101" found = str(stream) self.failUnlessEqual(expecting, found) def testTwoChildrenOfNilRoot(self): class V(CommonTree): def __init__(self, token=None, ttype=None, x=None): if x is not None: self.x = x if ttype is not None and token is None: self.token = CommonToken(type=ttype) if token is not None: self.token = token def __str__(self): if self.token is not None: txt = self.token.text else: txt = "" txt += "" return txt root_0 = self.adaptor.nil(); t = V(ttype=101, x=2) u = V(token=CommonToken(type=102, text="102")) self.adaptor.addChild(root_0, t) self.adaptor.addChild(root_0, u) self.assert_(root_0.parent is None) self.assertEquals(-1, root_0.childIndex) self.assertEquals(0, t.childIndex) self.assertEquals(1, u.childIndex) def test4Nodes(self): # ^(101 ^(102 103) 104) t = CommonTree(CommonToken(101)) t.addChild(CommonTree(CommonToken(102))) t.getChild(0).addChild(CommonTree(CommonToken(103))) t.addChild(CommonTree(CommonToken(104))) stream = self.newStream(t) expecting = "101 102 103 104" found = self.toNodesOnlyString(stream) self.failUnlessEqual(expecting, found) expecting = "101 2 102 2 103 3 104 3" found = str(stream) self.failUnlessEqual(expecting, found) def testList(self): root = CommonTree(None) t = CommonTree(CommonToken(101)) t.addChild(CommonTree(CommonToken(102))) t.getChild(0).addChild(CommonTree(CommonToken(103))) t.addChild(CommonTree(CommonToken(104))) u = CommonTree(CommonToken(105)) root.addChild(t) root.addChild(u) stream = CommonTreeNodeStream(root) expecting = "101 102 103 104 105" found = self.toNodesOnlyString(stream) self.failUnlessEqual(expecting, found) expecting = "101 2 102 2 103 3 104 3 105" found = str(stream) self.failUnlessEqual(expecting, found) def testFlatList(self): root = CommonTree(None) root.addChild(CommonTree(CommonToken(101))) root.addChild(CommonTree(CommonToken(102))) root.addChild(CommonTree(CommonToken(103))) stream = CommonTreeNodeStream(root) expecting = "101 102 103" found = self.toNodesOnlyString(stream) self.failUnlessEqual(expecting, found) expecting = "101 102 103" found = str(stream) self.failUnlessEqual(expecting, found) def testListWithOneNode(self): root = CommonTree(None) root.addChild(CommonTree(CommonToken(101))) stream = CommonTreeNodeStream(root) expecting = "101" found = self.toNodesOnlyString(stream) self.failUnlessEqual(expecting, found) expecting = "101" found = str(stream) self.failUnlessEqual(expecting, found) def testAoverB(self): t = CommonTree(CommonToken(101)) t.addChild(CommonTree(CommonToken(102))) stream = self.newStream(t) expecting = "101 102" found = self.toNodesOnlyString(stream) self.failUnlessEqual(expecting, found) expecting = "101 2 102 3" found = str(stream) self.failUnlessEqual(expecting, found) def testLT(self): # ^(101 ^(102 103) 104) t = CommonTree(CommonToken(101)) t.addChild(CommonTree(CommonToken(102))) t.getChild(0).addChild(CommonTree(CommonToken(103))) t.addChild(CommonTree(CommonToken(104))) stream = self.newStream(t) self.failUnlessEqual(101, stream.LT(1).getType()) self.failUnlessEqual(DOWN, stream.LT(2).getType()) self.failUnlessEqual(102, stream.LT(3).getType()) self.failUnlessEqual(DOWN, stream.LT(4).getType()) self.failUnlessEqual(103, stream.LT(5).getType()) self.failUnlessEqual(UP, stream.LT(6).getType()) self.failUnlessEqual(104, stream.LT(7).getType()) self.failUnlessEqual(UP, stream.LT(8).getType()) self.failUnlessEqual(EOF, stream.LT(9).getType()) # check way ahead self.failUnlessEqual(EOF, stream.LT(100).getType()) def testMarkRewindEntire(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) # stream has 7 real + 6 nav nodes # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) m = stream.mark() # MARK for _ in range(13): # consume til end stream.LT(1) stream.consume() self.failUnlessEqual(EOF, stream.LT(1).getType()) self.failUnlessEqual(UP, stream.LT(-1).getType()) #TODO: remove? stream.rewind(m) # REWIND # consume til end again :) for _ in range(13): # consume til end stream.LT(1) stream.consume() self.failUnlessEqual(EOF, stream.LT(1).getType()) self.failUnlessEqual(UP, stream.LT(-1).getType()) #TODO: remove? def testMarkRewindInMiddle(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) # stream has 7 real + 6 nav nodes # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) for _ in range(7): # consume til middle #System.out.println(tream.LT(1).getType()) stream.consume() self.failUnlessEqual(107, stream.LT(1).getType()) m = stream.mark() # MARK stream.consume() # consume 107 stream.consume() # consume UP stream.consume() # consume UP stream.consume() # consume 104 stream.rewind(m) # REWIND self.failUnlessEqual(107, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(UP, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(UP, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(104, stream.LT(1).getType()) stream.consume() # now we're past rewind position self.failUnlessEqual(105, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(UP, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(EOF, stream.LT(1).getType()) self.failUnlessEqual(UP, stream.LT(-1).getType()) def testMarkRewindNested(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) # stream has 7 real + 6 nav nodes # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) m = stream.mark() # MARK at start stream.consume() # consume 101 stream.consume() # consume DN m2 = stream.mark() # MARK on 102 stream.consume() # consume 102 stream.consume() # consume DN stream.consume() # consume 103 stream.consume() # consume 106 stream.rewind(m2) # REWIND to 102 self.failUnlessEqual(102, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() # stop at 103 and rewind to start stream.rewind(m) # REWIND to 101 self.failUnlessEqual(101, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(102, stream.LT(1).getType()) stream.consume() self.failUnlessEqual(DOWN, stream.LT(1).getType()) def testSeek(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) # stream has 7 real + 6 nav nodes # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) stream.consume() # consume 101 stream.consume() # consume DN stream.consume() # consume 102 stream.seek(7) # seek to 107 self.failUnlessEqual(107, stream.LT(1).getType()) stream.consume() # consume 107 stream.consume() # consume UP stream.consume() # consume UP self.failUnlessEqual(104, stream.LT(1).getType()) def testSeekFromStart(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) # stream has 7 real + 6 nav nodes # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) stream.seek(7) # seek to 107 self.failUnlessEqual(107, stream.LT(1).getType()) stream.consume() # consume 107 stream.consume() # consume UP stream.consume() # consume UP self.failUnlessEqual(104, stream.LT(1).getType()) def testReset(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) # stream has 7 real + 6 nav nodes # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) v1 = self.toNodesOnlyString(stream) # scan all stream.reset() v2 = self.toNodesOnlyString(stream) # scan all self.assertEquals(v1, v2) def testIterator(self): r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) stream = CommonTreeNodeStream(r0) expecting = [ 101, DOWN, 102, DOWN, 103, 106, DOWN, 107, UP, UP, 104, 105, UP] found = [t.type for t in stream] self.assertEqual(expecting, found) def toNodesOnlyString(self, nodes): buf = [] for i in range(nodes.size()): t = nodes.LT(i+1) type = nodes.getTreeAdaptor().getType(t) if not (type==DOWN or type==UP): buf.append(str(type)) return ' '.join(buf) class TestCommonTreeNodeStream(unittest.TestCase): """Test case for the CommonTreeNodeStream class.""" def testPushPop(self): # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) # stream has 9 real + 8 nav nodes # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r1.addChild(CommonTree(CommonToken(103))) r0.addChild(r1) r2 = CommonTree(CommonToken(104)) r2.addChild(CommonTree(CommonToken(105))) r0.addChild(r2) r3 = CommonTree(CommonToken(106)) r3.addChild(CommonTree(CommonToken(107))) r0.addChild(r3) r0.addChild(CommonTree(CommonToken(108))) r0.addChild(CommonTree(CommonToken(109))) stream = CommonTreeNodeStream(r0) expecting = "101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3" found = str(stream) self.failUnlessEqual(expecting, found) # Assume we want to hit node 107 and then "call 102" then return indexOf102 = 2 indexOf107 = 12 for _ in range(indexOf107):# consume til 107 node stream.consume() # CALL 102 self.failUnlessEqual(107, stream.LT(1).getType()) stream.push(indexOf102) self.failUnlessEqual(102, stream.LT(1).getType()) stream.consume() # consume 102 self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() # consume DN self.failUnlessEqual(103, stream.LT(1).getType()) stream.consume() # consume 103 self.failUnlessEqual(UP, stream.LT(1).getType()) # RETURN stream.pop() self.failUnlessEqual(107, stream.LT(1).getType()) def testNestedPushPop(self): # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) # stream has 9 real + 8 nav nodes # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r1.addChild(CommonTree(CommonToken(103))) r0.addChild(r1) r2 = CommonTree(CommonToken(104)) r2.addChild(CommonTree(CommonToken(105))) r0.addChild(r2) r3 = CommonTree(CommonToken(106)) r3.addChild(CommonTree(CommonToken(107))) r0.addChild(r3) r0.addChild(CommonTree(CommonToken(108))) r0.addChild(CommonTree(CommonToken(109))) stream = CommonTreeNodeStream(r0) # Assume we want to hit node 107 and then "call 102", which # calls 104, then return indexOf102 = 2 indexOf107 = 12 for _ in range(indexOf107): # consume til 107 node stream.consume() self.failUnlessEqual(107, stream.LT(1).getType()) # CALL 102 stream.push(indexOf102) self.failUnlessEqual(102, stream.LT(1).getType()) stream.consume() # consume 102 self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() # consume DN self.failUnlessEqual(103, stream.LT(1).getType()) stream.consume() # consume 103 # CALL 104 indexOf104 = 6 stream.push(indexOf104) self.failUnlessEqual(104, stream.LT(1).getType()) stream.consume() # consume 102 self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() # consume DN self.failUnlessEqual(105, stream.LT(1).getType()) stream.consume() # consume 103 self.failUnlessEqual(UP, stream.LT(1).getType()) # RETURN (to UP node in 102 subtree) stream.pop() self.failUnlessEqual(UP, stream.LT(1).getType()) # RETURN (to empty stack) stream.pop() self.failUnlessEqual(107, stream.LT(1).getType()) def testPushPopFromEOF(self): # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109) # stream has 9 real + 8 nav nodes # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r1.addChild(CommonTree(CommonToken(103))) r0.addChild(r1) r2 = CommonTree(CommonToken(104)) r2.addChild(CommonTree(CommonToken(105))) r0.addChild(r2) r3 = CommonTree(CommonToken(106)) r3.addChild(CommonTree(CommonToken(107))) r0.addChild(r3) r0.addChild(CommonTree(CommonToken(108))) r0.addChild(CommonTree(CommonToken(109))) stream = CommonTreeNodeStream(r0) while stream.LA(1) != EOF: stream.consume() indexOf102 = 2 indexOf104 = 6 self.failUnlessEqual(EOF, stream.LT(1).getType()) # CALL 102 stream.push(indexOf102) self.failUnlessEqual(102, stream.LT(1).getType()) stream.consume() # consume 102 self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() # consume DN self.failUnlessEqual(103, stream.LT(1).getType()) stream.consume() # consume 103 self.failUnlessEqual(UP, stream.LT(1).getType()) # RETURN (to empty stack) stream.pop() self.failUnlessEqual(EOF, stream.LT(1).getType()) # CALL 104 stream.push(indexOf104) self.failUnlessEqual(104, stream.LT(1).getType()) stream.consume() # consume 102 self.failUnlessEqual(DOWN, stream.LT(1).getType()) stream.consume() # consume DN self.failUnlessEqual(105, stream.LT(1).getType()) stream.consume() # consume 103 self.failUnlessEqual(UP, stream.LT(1).getType()) # RETURN (to empty stack) stream.pop() self.failUnlessEqual(EOF, stream.LT(1).getType()) class TestCommonTree(unittest.TestCase): """Test case for the CommonTree class.""" def setUp(self): """Setup test fixure""" self.adaptor = CommonTreeAdaptor() def testSingleNode(self): t = CommonTree(CommonToken(101)) self.failUnless(t.parent is None) self.failUnlessEqual(-1, t.childIndex) def test4Nodes(self): # ^(101 ^(102 103) 104) r0 = CommonTree(CommonToken(101)) r0.addChild(CommonTree(CommonToken(102))) r0.getChild(0).addChild(CommonTree(CommonToken(103))) r0.addChild(CommonTree(CommonToken(104))) self.failUnless(r0.parent is None) self.failUnlessEqual(-1, r0.childIndex) def testList(self): # ^(nil 101 102 103) r0 = CommonTree(None) c0=CommonTree(CommonToken(101)) r0.addChild(c0) c1=CommonTree(CommonToken(102)) r0.addChild(c1) c2=CommonTree(CommonToken(103)) r0.addChild(c2) self.failUnless(r0.parent is None) self.failUnlessEqual(-1, r0.childIndex) self.failUnlessEqual(r0, c0.parent) self.failUnlessEqual(0, c0.childIndex) self.failUnlessEqual(r0, c1.parent) self.failUnlessEqual(1, c1.childIndex) self.failUnlessEqual(r0, c2.parent) self.failUnlessEqual(2, c2.childIndex) def testList2(self): # Add child ^(nil 101 102 103) to root 5 # should pull 101 102 103 directly to become 5's child list root = CommonTree(CommonToken(5)) # child tree r0 = CommonTree(None) c0=CommonTree(CommonToken(101)) r0.addChild(c0) c1=CommonTree(CommonToken(102)) r0.addChild(c1) c2=CommonTree(CommonToken(103)) r0.addChild(c2) root.addChild(r0) self.failUnless(root.parent is None) self.failUnlessEqual(-1, root.childIndex) # check children of root all point at root self.failUnlessEqual(root, c0.parent) self.failUnlessEqual(0, c0.childIndex) self.failUnlessEqual(root, c0.parent) self.failUnlessEqual(1, c1.childIndex) self.failUnlessEqual(root, c0.parent) self.failUnlessEqual(2, c2.childIndex) def testAddListToExistChildren(self): # Add child ^(nil 101 102 103) to root ^(5 6) # should add 101 102 103 to end of 5's child list root = CommonTree(CommonToken(5)) root.addChild(CommonTree(CommonToken(6))) # child tree r0 = CommonTree(None) c0=CommonTree(CommonToken(101)) r0.addChild(c0) c1=CommonTree(CommonToken(102)) r0.addChild(c1) c2=CommonTree(CommonToken(103)) r0.addChild(c2) root.addChild(r0) self.failUnless(root.parent is None) self.failUnlessEqual(-1, root.childIndex) # check children of root all point at root self.failUnlessEqual(root, c0.parent) self.failUnlessEqual(1, c0.childIndex) self.failUnlessEqual(root, c0.parent) self.failUnlessEqual(2, c1.childIndex) self.failUnlessEqual(root, c0.parent) self.failUnlessEqual(3, c2.childIndex) def testDupTree(self): # ^(101 ^(102 103 ^(106 107) ) 104 105) r0 = CommonTree(CommonToken(101)) r1 = CommonTree(CommonToken(102)) r0.addChild(r1) r1.addChild(CommonTree(CommonToken(103))) r2 = CommonTree(CommonToken(106)) r2.addChild(CommonTree(CommonToken(107))) r1.addChild(r2) r0.addChild(CommonTree(CommonToken(104))) r0.addChild(CommonTree(CommonToken(105))) dup = self.adaptor.dupTree(r0) self.failUnless(dup.parent is None) self.failUnlessEqual(-1, dup.childIndex) dup.sanityCheckParentAndChildIndexes() def testBecomeRoot(self): # 5 becomes root of ^(nil 101 102 103) newRoot = CommonTree(CommonToken(5)) oldRoot = CommonTree(None) oldRoot.addChild(CommonTree(CommonToken(101))) oldRoot.addChild(CommonTree(CommonToken(102))) oldRoot.addChild(CommonTree(CommonToken(103))) self.adaptor.becomeRoot(newRoot, oldRoot) newRoot.sanityCheckParentAndChildIndexes() def testBecomeRoot2(self): # 5 becomes root of ^(101 102 103) newRoot = CommonTree(CommonToken(5)) oldRoot = CommonTree(CommonToken(101)) oldRoot.addChild(CommonTree(CommonToken(102))) oldRoot.addChild(CommonTree(CommonToken(103))) self.adaptor.becomeRoot(newRoot, oldRoot) newRoot.sanityCheckParentAndChildIndexes() def testBecomeRoot3(self): # ^(nil 5) becomes root of ^(nil 101 102 103) newRoot = CommonTree(None) newRoot.addChild(CommonTree(CommonToken(5))) oldRoot = CommonTree(None) oldRoot.addChild(CommonTree(CommonToken(101))) oldRoot.addChild(CommonTree(CommonToken(102))) oldRoot.addChild(CommonTree(CommonToken(103))) self.adaptor.becomeRoot(newRoot, oldRoot) newRoot.sanityCheckParentAndChildIndexes() def testBecomeRoot5(self): # ^(nil 5) becomes root of ^(101 102 103) newRoot = CommonTree(None) newRoot.addChild(CommonTree(CommonToken(5))) oldRoot = CommonTree(CommonToken(101)) oldRoot.addChild(CommonTree(CommonToken(102))) oldRoot.addChild(CommonTree(CommonToken(103))) self.adaptor.becomeRoot(newRoot, oldRoot) newRoot.sanityCheckParentAndChildIndexes() def testBecomeRoot6(self): # emulates construction of ^(5 6) root_0 = self.adaptor.nil() root_1 = self.adaptor.nil() root_1 = self.adaptor.becomeRoot(CommonTree(CommonToken(5)), root_1) self.adaptor.addChild(root_1, CommonTree(CommonToken(6))) self.adaptor.addChild(root_0, root_1) root_0.sanityCheckParentAndChildIndexes() # Test replaceChildren def testReplaceWithNoChildren(self): t = CommonTree(CommonToken(101)) newChild = CommonTree(CommonToken(5)) error = False try: t.replaceChildren(0, 0, newChild) except IndexError: error = True self.failUnless(error) def testReplaceWithOneChildren(self): # assume token type 99 and use text t = CommonTree(CommonToken(99, text="a")) c0 = CommonTree(CommonToken(99, text="b")) t.addChild(c0) newChild = CommonTree(CommonToken(99, text="c")) t.replaceChildren(0, 0, newChild) expecting = "(a c)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceInMiddle(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) # index 1 t.addChild(CommonTree(CommonToken(99, text="d"))) newChild = CommonTree(CommonToken(99, text="x")) t.replaceChildren(1, 1, newChild) expecting = "(a b x d)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceAtLeft(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) # index 0 t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChild = CommonTree(CommonToken(99, text="x")) t.replaceChildren(0, 0, newChild) expecting = "(a x c d)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceAtRight(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) # index 2 newChild = CommonTree(CommonToken(99, text="x")) t.replaceChildren(2, 2, newChild) expecting = "(a b c x)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceOneWithTwoAtLeft(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChildren = self.adaptor.nil() newChildren.addChild(CommonTree(CommonToken(99, text="x"))) newChildren.addChild(CommonTree(CommonToken(99, text="y"))) t.replaceChildren(0, 0, newChildren) expecting = "(a x y c d)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceOneWithTwoAtRight(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChildren = self.adaptor.nil() newChildren.addChild(CommonTree(CommonToken(99, text="x"))) newChildren.addChild(CommonTree(CommonToken(99, text="y"))) t.replaceChildren(2, 2, newChildren) expecting = "(a b c x y)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceOneWithTwoInMiddle(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChildren = self.adaptor.nil() newChildren.addChild(CommonTree(CommonToken(99, text="x"))) newChildren.addChild(CommonTree(CommonToken(99, text="y"))) t.replaceChildren(1, 1, newChildren) expecting = "(a b x y d)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceTwoWithOneAtLeft(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChild = CommonTree(CommonToken(99, text="x")) t.replaceChildren(0, 1, newChild) expecting = "(a x d)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceTwoWithOneAtRight(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChild = CommonTree(CommonToken(99, text="x")) t.replaceChildren(1, 2, newChild) expecting = "(a b x)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceAllWithOne(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChild = CommonTree(CommonToken(99, text="x")) t.replaceChildren(0, 2, newChild) expecting = "(a x)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() def testReplaceAllWithTwo(self): t = CommonTree(CommonToken(99, text="a")) t.addChild(CommonTree(CommonToken(99, text="b"))) t.addChild(CommonTree(CommonToken(99, text="c"))) t.addChild(CommonTree(CommonToken(99, text="d"))) newChildren = self.adaptor.nil() newChildren.addChild(CommonTree(CommonToken(99, text="x"))) newChildren.addChild(CommonTree(CommonToken(99, text="y"))) t.replaceChildren(0, 2, newChildren) expecting = "(a x y)" self.failUnlessEqual(expecting, t.toStringTree()) t.sanityCheckParentAndChildIndexes() class TestTreeContext(unittest.TestCase): """Test the TreeParser.inContext() method""" tokenNames = [ "", "", "", "", "VEC", "ASSIGN", "PRINT", "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'" ] def testSimpleParent(self): tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") self.assertEquals(expecting, found) def testNoParent(self): tree = "(PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(%x:PRINT (MULT ID (VEC INT INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = False found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") self.assertEquals(expecting, found) def testParentWithWildcard(self): tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...") self.assertEquals(expecting, found) def testWildcardAtStartIgnored(self): tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "...VEC") self.assertEquals(expecting, found) def testWildcardInBetween(self): tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT...VEC") self.assertEquals(expecting, found) def testLotsOfWildcards(self): tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "... PRINT ... VEC ...") self.assertEquals(expecting, found) def testDeep(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...") self.assertEquals(expecting, found) def testDeepAndFindRoot(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ...") self.assertEquals(expecting, found) def testDeepAndFindRoot2(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... VEC ...") self.assertEquals(expecting, found) def testChain(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = True found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT MULT VEC MULT") self.assertEquals(expecting, found) ## TEST INVALID CONTEXTS def testNotParent(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = False found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC") self.assertEquals(expecting, found) def testMismatch(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = False ## missing MULT found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC MULT") self.assertEquals(expecting, found) def testMismatch2(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = False found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC ...") self.assertEquals(expecting, found) def testMismatch3(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") expecting = False found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ... VEC MULT") self.assertEquals(expecting, found) def testDoubleEtc(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") try: TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... ... VEC") self.fail() except ValueError, exc: expecting = "invalid syntax: ... ..." found = str(exc) self.assertEquals(expecting, found) def testDotDot(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) labels = {} valid = wiz.parse( t, "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))", labels) self.assertTrue(valid) node = labels.get("x") try: TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT .. VEC") self.fail() except ValueError, exc: expecting = "invalid syntax: .." found = str(exc) self.assertEquals(expecting, found) class TestTreeVisitor(unittest.TestCase): """Test of the TreeVisitor class.""" tokenNames = [ "", "", "", "", "VEC", "ASSIGN", "PRINT", "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'" ] def testTreeVisitor(self): tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))" adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokenNames) t = wiz.create(tree) found = [] def pre(t): found.append("pre(%s)" % t) return t def post(t): found.append("post(%s)" % t) return t visitor = TreeVisitor(adaptor) visitor.visit(t, pre, post) expecting = [ "pre(PRINT)", "pre(MULT)", "pre(x)", "post(x)", "pre(VEC)", "pre(MULT)", "pre(9)", "post(9)", "pre(1)", "post(1)", "post(MULT)", "pre(2)", "post(2)", "pre(3)", "post(3)", "post(VEC)", "post(MULT)", "post(PRINT)" ] self.assertEquals(expecting, found) class TestTreeIterator(unittest.TestCase): tokens = [ "", "", "", "", "A", "B", "C", "D", "E", "F", "G" ] def testNode(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("A") it = TreeIterator(t) expecting = "A EOF" found = self.toString(it) self.assertEquals(expecting, found) def testFlatAB(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("(nil A B)") it = TreeIterator(t) expecting = "nil DOWN A B UP EOF" found = self.toString(it) self.assertEquals(expecting, found) def testAB(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("(A B)") it = TreeIterator(t) expecting = "A DOWN B UP EOF" found = self.toString(it) self.assertEquals(expecting, found) def testABC(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("(A B C)") it = TreeIterator(t) expecting = "A DOWN B C UP EOF" found = self.toString(it) self.assertEquals(expecting, found) def testVerticalList(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("(A (B C))") it = TreeIterator(t) expecting = "A DOWN B DOWN C UP UP EOF" found = self.toString(it) self.assertEquals(expecting, found) def testComplex(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("(A (B (C D E) F) G)") it = TreeIterator(t) expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" found = self.toString(it) self.assertEquals(expecting, found) def testReset(self): adaptor = CommonTreeAdaptor() wiz = TreeWizard(adaptor, self.tokens) t = wiz.create("(A (B (C D E) F) G)") it = TreeIterator(t) expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" found = self.toString(it) self.assertEquals(expecting, found) it.reset() expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF" found = self.toString(it) self.assertEquals(expecting, found) def toString(self, it): buf = [] for n in it: buf.append(str(n)) return ' '.join(buf) if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/testrecognizers.py0000644000175000017500000000305312653072152020617 0ustar zigozigoimport sys import unittest import antlr3 class TestBaseRecognizer(unittest.TestCase): """Tests for BaseRecognizer class""" def testGetRuleInvocationStack(self): """BaseRecognizer._getRuleInvocationStack()""" rules = antlr3.BaseRecognizer._getRuleInvocationStack(__name__) self.failUnlessEqual( rules, ['testGetRuleInvocationStack'] ) class TestTokenSource(unittest.TestCase): """Testcase to the antlr3.TokenSource class""" def testIteratorInterface(self): """TokenSource.next()""" class TrivialToken(object): def __init__(self, type): self.type = type class TestSource(antlr3.TokenSource): def __init__(self): self.tokens = [ TrivialToken(1), TrivialToken(2), TrivialToken(3), TrivialToken(4), TrivialToken(antlr3.EOF), ] def nextToken(self): return self.tokens.pop(0) src = TestSource() tokens = [] for token in src: tokens.append(token.type) self.failUnlessEqual(tokens, [1, 2, 3, 4]) class TestLexer(unittest.TestCase): def testInit(self): """Lexer.__init__()""" class TLexer(antlr3.Lexer): api_version = 'HEAD' stream = antlr3.StringStream('foo') TLexer(stream) if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/testdfa.py0000644000175000017500000000300112653072152017010 0ustar zigozigo import unittest import antlr3 class TestDFA(unittest.TestCase): """Test case for the DFA class.""" def setUp(self): """Setup test fixure. We need a Recognizer in order to instanciate a DFA. """ class TRecognizer(antlr3.BaseRecognizer): api_version = 'HEAD' self.recog = TRecognizer() def testInit(self): """DFA.__init__() Just a smoke test. """ dfa = antlr3.DFA( self.recog, 1, eot=[], eof=[], min=[], max=[], accept=[], special=[], transition=[] ) def testUnpack(self): """DFA.unpack()""" self.failUnlessEqual( antlr3.DFA.unpack( u"\1\3\1\4\2\uffff\1\5\22\uffff\1\2\31\uffff\1\6\6\uffff" u"\32\6\4\uffff\1\6\1\uffff\32\6" ), [ 3, 4, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -1, -1, -1, -1, 6, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 ] ) if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/testdottreegen.py0000644000175000017500000000334112653072152020425 0ustar zigozigo# -*- coding: utf-8 -*- import os import unittest from StringIO import StringIO import textwrap import stringtemplate3 from antlr3.dottreegen import toDOT from antlr3.treewizard import TreeWizard from antlr3.tree import CommonTreeAdaptor class TestToDOT(unittest.TestCase): """Test case for the toDOT function.""" def setUp(self): self.adaptor = CommonTreeAdaptor() self.tokens = [ "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" ] self.wiz = TreeWizard(self.adaptor, self.tokens) def testNone(self): """toDOT()""" treeST = stringtemplate3.StringTemplate( template=( "digraph {\n" + " $nodes$\n" + " $edges$\n" + "}\n") ) edgeST = stringtemplate3.StringTemplate( template="$parent$ -> $child$\n" ) tree = self.wiz.create("(A B (B C C) (B (C D D)))") st = toDOT(tree, self.adaptor, treeST, edgeST) result = st.toString() expected = textwrap.dedent( '''\ digraph { n0 [label="A"]; n1 [label="B"]; n2 [label="B"]; n3 [label="C"]; n4 [label="C"]; n5 [label="B"]; n6 [label="C"]; n7 [label="D"]; n8 [label="D"]; n0 -> n1 n0 -> n2 n2 -> n3 n2 -> n4 n0 -> n5 n5 -> n6 n6 -> n7 n6 -> n8 } ''' ) self.assertEqual(result, expected) if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/testbase.py0000644000175000017500000000161712653072152017203 0ustar zigozigoimport unittest class BrokenTest(unittest.TestCase.failureException): def __repr__(self): name, reason = self.args return '%s: %s: %s works now' % ( (self.__class__.__name__, name, reason)) def broken(reason, *exceptions): '''Indicates a failing (or erroneous) test case fails that should succeed. If the test fails with an exception, list the exception type in args''' def wrapper(test_method): def replacement(*args, **kwargs): try: test_method(*args, **kwargs) except exceptions or unittest.TestCase.failureException: pass else: raise BrokenTest(test_method.__name__, reason) replacement.__doc__ = test_method.__doc__ replacement.__name__ = 'XXX_' + test_method.__name__ replacement.todo = reason return replacement return wrapper python-antlr3-3.5.2/unittests/testtreewizard.py0000644000175000017500000005075312653072152020456 0ustar zigozigo# -*- coding: utf-8 -*- import os import unittest from StringIO import StringIO from antlr3.tree import CommonTreeAdaptor, CommonTree, INVALID_TOKEN_TYPE from antlr3.treewizard import TreeWizard, computeTokenTypes, \ TreePatternLexer, EOF, ID, BEGIN, END, PERCENT, COLON, DOT, ARG, \ TreePatternParser, \ TreePattern, WildcardTreePattern, TreePatternTreeAdaptor class TestComputeTokenTypes(unittest.TestCase): """Test case for the computeTokenTypes function.""" def testNone(self): """computeTokenTypes(None) -> {}""" typeMap = computeTokenTypes(None) self.failUnless(isinstance(typeMap, dict)) self.failUnlessEqual(typeMap, {}) def testList(self): """computeTokenTypes(['a', 'b']) -> { 'a': 0, 'b': 1 }""" typeMap = computeTokenTypes(['a', 'b']) self.failUnless(isinstance(typeMap, dict)) self.failUnlessEqual(typeMap, { 'a': 0, 'b': 1 }) class TestTreePatternLexer(unittest.TestCase): """Test case for the TreePatternLexer class.""" def testBegin(self): """TreePatternLexer(): '('""" lexer = TreePatternLexer('(') type = lexer.nextToken() self.failUnlessEqual(type, BEGIN) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, False) def testEnd(self): """TreePatternLexer(): ')'""" lexer = TreePatternLexer(')') type = lexer.nextToken() self.failUnlessEqual(type, END) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, False) def testPercent(self): """TreePatternLexer(): '%'""" lexer = TreePatternLexer('%') type = lexer.nextToken() self.failUnlessEqual(type, PERCENT) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, False) def testDot(self): """TreePatternLexer(): '.'""" lexer = TreePatternLexer('.') type = lexer.nextToken() self.failUnlessEqual(type, DOT) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, False) def testColon(self): """TreePatternLexer(): ':'""" lexer = TreePatternLexer(':') type = lexer.nextToken() self.failUnlessEqual(type, COLON) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, False) def testEOF(self): """TreePatternLexer(): EOF""" lexer = TreePatternLexer(' \n \r \t ') type = lexer.nextToken() self.failUnlessEqual(type, EOF) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, False) def testID(self): """TreePatternLexer(): ID""" lexer = TreePatternLexer('_foo12_bar') type = lexer.nextToken() self.failUnlessEqual(type, ID) self.failUnlessEqual(lexer.sval, '_foo12_bar') self.failUnlessEqual(lexer.error, False) def testARG(self): """TreePatternLexer(): ARG""" lexer = TreePatternLexer('[ \\]bla\\n]') type = lexer.nextToken() self.failUnlessEqual(type, ARG) self.failUnlessEqual(lexer.sval, ' ]bla\\n') self.failUnlessEqual(lexer.error, False) def testError(self): """TreePatternLexer(): error""" lexer = TreePatternLexer('1') type = lexer.nextToken() self.failUnlessEqual(type, EOF) self.failUnlessEqual(lexer.sval, '') self.failUnlessEqual(lexer.error, True) class TestTreePatternParser(unittest.TestCase): """Test case for the TreePatternParser class.""" def setUp(self): """Setup text fixure We need a tree adaptor, use CommonTreeAdaptor. And a constant list of token names. """ self.adaptor = CommonTreeAdaptor() self.tokens = [ "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" ] self.wizard = TreeWizard(self.adaptor, tokenNames=self.tokens) def testSingleNode(self): """TreePatternParser: 'ID'""" lexer = TreePatternLexer('ID') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(isinstance(tree, CommonTree)) self.failUnlessEqual(tree.getType(), 10) self.failUnlessEqual(tree.getText(), 'ID') def testSingleNodeWithArg(self): """TreePatternParser: 'ID[foo]'""" lexer = TreePatternLexer('ID[foo]') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(isinstance(tree, CommonTree)) self.failUnlessEqual(tree.getType(), 10) self.failUnlessEqual(tree.getText(), 'foo') def testSingleLevelTree(self): """TreePatternParser: '(A B)'""" lexer = TreePatternLexer('(A B)') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(isinstance(tree, CommonTree)) self.failUnlessEqual(tree.getType(), 5) self.failUnlessEqual(tree.getText(), 'A') self.failUnlessEqual(tree.getChildCount(), 1) self.failUnlessEqual(tree.getChild(0).getType(), 6) self.failUnlessEqual(tree.getChild(0).getText(), 'B') def testNil(self): """TreePatternParser: 'nil'""" lexer = TreePatternLexer('nil') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(isinstance(tree, CommonTree)) self.failUnlessEqual(tree.getType(), 0) self.failUnlessEqual(tree.getText(), None) def testWildcard(self): """TreePatternParser: '(.)'""" lexer = TreePatternLexer('(.)') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(isinstance(tree, WildcardTreePattern)) def testLabel(self): """TreePatternParser: '(%a:A)'""" lexer = TreePatternLexer('(%a:A)') parser = TreePatternParser(lexer, self.wizard, TreePatternTreeAdaptor()) tree = parser.pattern() self.failUnless(isinstance(tree, TreePattern)) self.failUnlessEqual(tree.label, 'a') def testError1(self): """TreePatternParser: ')'""" lexer = TreePatternLexer(')') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(tree is None) def testError2(self): """TreePatternParser: '()'""" lexer = TreePatternLexer('()') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(tree is None) def testError3(self): """TreePatternParser: '(A ])'""" lexer = TreePatternLexer('(A ])') parser = TreePatternParser(lexer, self.wizard, self.adaptor) tree = parser.pattern() self.failUnless(tree is None) class TestTreeWizard(unittest.TestCase): """Test case for the TreeWizard class.""" def setUp(self): """Setup text fixure We need a tree adaptor, use CommonTreeAdaptor. And a constant list of token names. """ self.adaptor = CommonTreeAdaptor() self.tokens = [ "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR" ] def testInit(self): """TreeWizard.__init__()""" wiz = TreeWizard( self.adaptor, tokenNames=['a', 'b'] ) self.failUnless(wiz.adaptor is self.adaptor) self.failUnlessEqual( wiz.tokenNameToTypeMap, { 'a': 0, 'b': 1 } ) def testGetTokenType(self): """TreeWizard.getTokenType()""" wiz = TreeWizard( self.adaptor, tokenNames=self.tokens ) self.failUnlessEqual( wiz.getTokenType('A'), 5 ) self.failUnlessEqual( wiz.getTokenType('VAR'), 11 ) self.failUnlessEqual( wiz.getTokenType('invalid'), INVALID_TOKEN_TYPE ) def testSingleNode(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("ID") found = t.toStringTree() expecting = "ID" self.failUnlessEqual(expecting, found) def testSingleNodeWithArg(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("ID[foo]") found = t.toStringTree() expecting = "foo" self.failUnlessEqual(expecting, found) def testSingleNodeTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A)") found = t.toStringTree() expecting = "A" self.failUnlessEqual(expecting, found) def testSingleLevelTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C D)") found = t.toStringTree() expecting = "(A B C D)" self.failUnlessEqual(expecting, found) def testListTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(nil A B C)") found = t.toStringTree() expecting = "A B C" self.failUnlessEqual(expecting, found) def testInvalidListTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("A B C") self.failUnless(t is None) def testDoubleLevelTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A (B C) (B D) E)") found = t.toStringTree() expecting = "(A (B C) (B D) E)" self.failUnlessEqual(expecting, found) def __simplifyIndexMap(self, indexMap): return dict( # stringify nodes for easy comparing (ttype, [str(node) for node in nodes]) for ttype, nodes in indexMap.items() ) def testSingleNodeIndex(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("ID") indexMap = wiz.index(tree) found = self.__simplifyIndexMap(indexMap) expecting = { 10: ["ID"] } self.failUnlessEqual(expecting, found) def testNoRepeatsIndex(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B C D)") indexMap = wiz.index(tree) found = self.__simplifyIndexMap(indexMap) expecting = { 8:['D'], 6:['B'], 7:['C'], 5:['A'] } self.failUnlessEqual(expecting, found) def testRepeatsIndex(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B (A C B) B D D)") indexMap = wiz.index(tree) found = self.__simplifyIndexMap(indexMap) expecting = { 8: ['D', 'D'], 6: ['B', 'B', 'B'], 7: ['C'], 5: ['A', 'A'] } self.failUnlessEqual(expecting, found) def testNoRepeatsVisit(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B C D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append(str(node)) wiz.visit(tree, wiz.getTokenType("B"), visitor) expecting = ['B'] self.failUnlessEqual(expecting, elements) def testNoRepeatsVisit2(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B (A C B) B D D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append(str(node)) wiz.visit(tree, wiz.getTokenType("C"), visitor) expecting = ['C'] self.failUnlessEqual(expecting, elements) def testRepeatsVisit(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B (A C B) B D D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append(str(node)) wiz.visit(tree, wiz.getTokenType("B"), visitor) expecting = ['B', 'B', 'B'] self.failUnlessEqual(expecting, elements) def testRepeatsVisit2(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B (A C B) B D D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append(str(node)) wiz.visit(tree, wiz.getTokenType("A"), visitor) expecting = ['A', 'A'] self.failUnlessEqual(expecting, elements) def testRepeatsVisitWithContext(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B (A C B) B D D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append('%s@%s[%d]' % (node, parent, childIndex)) wiz.visit(tree, wiz.getTokenType("B"), visitor) expecting = ['B@A[0]', 'B@A[1]', 'B@A[2]'] self.failUnlessEqual(expecting, elements) def testRepeatsVisitWithNullParentAndContext(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B (A C B) B D D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append( '%s@%s[%d]' % (node, ['nil', parent][parent is not None], childIndex) ) wiz.visit(tree, wiz.getTokenType("A"), visitor) expecting = ['A@nil[0]', 'A@A[1]'] self.failUnlessEqual(expecting, elements) def testVisitPattern(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B C (A B) D)") elements = [] def visitor(node, parent, childIndex, labels): elements.append( str(node) ) wiz.visit(tree, '(A B)', visitor) expecting = ['A'] # shouldn't match overall root, just (A B) self.failUnlessEqual(expecting, elements) def testVisitPatternMultiple(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B C (A B) (D (A B)))") elements = [] def visitor(node, parent, childIndex, labels): elements.append( '%s@%s[%d]' % (node, ['nil', parent][parent is not None], childIndex) ) wiz.visit(tree, '(A B)', visitor) expecting = ['A@A[2]', 'A@D[0]'] self.failUnlessEqual(expecting, elements) def testVisitPatternMultipleWithLabels(self): wiz = TreeWizard(self.adaptor, self.tokens) tree = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") elements = [] def visitor(node, parent, childIndex, labels): elements.append( '%s@%s[%d]%s&%s' % (node, ['nil', parent][parent is not None], childIndex, labels['a'], labels['b'], ) ) wiz.visit(tree, '(%a:A %b:B)', visitor) expecting = ['foo@A[2]foo&bar', 'big@D[0]big&dog'] self.failUnlessEqual(expecting, elements) def testParse(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C)") valid = wiz.parse(t, "(A B C)") self.failUnless(valid) def testParseSingleNode(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("A") valid = wiz.parse(t, "A") self.failUnless(valid) def testParseSingleNodeFails(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("A") valid = wiz.parse(t, "B") self.failUnless(not valid) def testParseFlatTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(nil A B C)") valid = wiz.parse(t, "(nil A B C)") self.failUnless(valid) def testParseFlatTreeFails(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(nil A B C)") valid = wiz.parse(t, "(nil A B)") self.failUnless(not valid) def testParseFlatTreeFails2(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(nil A B C)") valid = wiz.parse(t, "(nil A B A)") self.failUnless(not valid) def testWildcard(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C)") valid = wiz.parse(t, "(A . .)") self.failUnless(valid) def testParseWithText(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B[foo] C[bar])") # C pattern has no text arg so despite [bar] in t, no need # to match text--check structure only. valid = wiz.parse(t, "(A B[foo] C)") self.failUnless(valid) def testParseWithText2(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B[T__32] (C (D E[a])))") # C pattern has no text arg so despite [bar] in t, no need # to match text--check structure only. valid = wiz.parse(t, "(A B[foo] C)") self.assertEquals("(A T__32 (C (D a)))", t.toStringTree()) def testParseWithTextFails(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C)") valid = wiz.parse(t, "(A[foo] B C)") self.failUnless(not valid) # fails def testParseLabels(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C)") labels = {} valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels) self.failUnless(valid) self.failUnlessEqual("A", str(labels["a"])) self.failUnlessEqual("B", str(labels["b"])) self.failUnlessEqual("C", str(labels["c"])) def testParseWithWildcardLabels(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C)") labels = {} valid = wiz.parse(t, "(A %b:. %c:.)", labels) self.failUnless(valid) self.failUnlessEqual("B", str(labels["b"])) self.failUnlessEqual("C", str(labels["c"])) def testParseLabelsAndTestText(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B[foo] C)") labels = {} valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels) self.failUnless(valid) self.failUnlessEqual("A", str(labels["a"])) self.failUnlessEqual("foo", str(labels["b"])) self.failUnlessEqual("C", str(labels["c"])) def testParseLabelsInNestedTree(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A (B C) (D E))") labels = {} valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels) self.failUnless(valid) self.failUnlessEqual("A", str(labels["a"])) self.failUnlessEqual("B", str(labels["b"])) self.failUnlessEqual("C", str(labels["c"])) self.failUnlessEqual("D", str(labels["d"])) self.failUnlessEqual("E", str(labels["e"])) def testEquals(self): wiz = TreeWizard(self.adaptor, self.tokens) t1 = wiz.create("(A B C)") t2 = wiz.create("(A B C)") same = wiz.equals(t1, t2) self.failUnless(same) def testEqualsWithText(self): wiz = TreeWizard(self.adaptor, self.tokens) t1 = wiz.create("(A B[foo] C)") t2 = wiz.create("(A B[foo] C)") same = wiz.equals(t1, t2) self.failUnless(same) def testEqualsWithMismatchedText(self): wiz = TreeWizard(self.adaptor, self.tokens) t1 = wiz.create("(A B[foo] C)") t2 = wiz.create("(A B C)") same = wiz.equals(t1, t2) self.failUnless(not same) def testEqualsWithMismatchedList(self): wiz = TreeWizard(self.adaptor, self.tokens) t1 = wiz.create("(A B C)") t2 = wiz.create("(A B A)") same = wiz.equals(t1, t2) self.failUnless(not same) def testEqualsWithMismatchedListLength(self): wiz = TreeWizard(self.adaptor, self.tokens) t1 = wiz.create("(A B C)") t2 = wiz.create("(A B)") same = wiz.equals(t1, t2) self.failUnless(not same) def testFindPattern(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") subtrees = wiz.find(t, "(A B)") found = [str(node) for node in subtrees] expecting = ['foo', 'big'] self.failUnlessEqual(expecting, found) def testFindTokenType(self): wiz = TreeWizard(self.adaptor, self.tokens) t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))") subtrees = wiz.find(t, wiz.getTokenType('A')) found = [str(node) for node in subtrees] expecting = ['A', 'foo', 'big'] self.failUnlessEqual(expecting, found) if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/teststreams.input10000644000175000017500000000000712653072152020527 0ustar zigozigofoo barpython-antlr3-3.5.2/unittests/testexceptions.py0000644000175000017500000000527112653072152020452 0ustar zigozigoimport unittest import antlr3 import testbase class TestRecognitionException(unittest.TestCase): """Tests for the antlr3.RecognitionException class""" def testInitNone(self): """RecognitionException.__init__()""" exc = antlr3.RecognitionException() class TestEarlyExitException(unittest.TestCase): """Tests for the antlr3.EarlyExitException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """EarlyExitException.__init__()""" exc = antlr3.EarlyExitException() class TestFailedPredicateException(unittest.TestCase): """Tests for the antlr3.FailedPredicateException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """FailedPredicateException.__init__()""" exc = antlr3.FailedPredicateException() class TestMismatchedNotSetException(unittest.TestCase): """Tests for the antlr3.MismatchedNotSetException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """MismatchedNotSetException.__init__()""" exc = antlr3.MismatchedNotSetException() class TestMismatchedRangeException(unittest.TestCase): """Tests for the antlr3.MismatchedRangeException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """MismatchedRangeException.__init__()""" exc = antlr3.MismatchedRangeException() class TestMismatchedSetException(unittest.TestCase): """Tests for the antlr3.MismatchedSetException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """MismatchedSetException.__init__()""" exc = antlr3.MismatchedSetException() class TestMismatchedTokenException(unittest.TestCase): """Tests for the antlr3.MismatchedTokenException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """MismatchedTokenException.__init__()""" exc = antlr3.MismatchedTokenException() class TestMismatchedTreeNodeException(unittest.TestCase): """Tests for the antlr3.MismatchedTreeNodeException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """MismatchedTreeNodeException.__init__()""" exc = antlr3.MismatchedTreeNodeException() class TestNoViableAltException(unittest.TestCase): """Tests for the antlr3.NoViableAltException class""" @testbase.broken("FIXME", Exception) def testInitNone(self): """NoViableAltException.__init__()""" exc = antlr3.NoViableAltException() if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/teststreams.py0000644000175000017500000004162112653072152017746 0ustar zigozigo# -*- coding: utf-8 -*- import os import unittest from StringIO import StringIO import antlr3 class TestStringStream(unittest.TestCase): """Test case for the StringStream class.""" def testSize(self): """StringStream.size()""" stream = antlr3.StringStream('foo') self.failUnlessEqual(stream.size(), 3) def testIndex(self): """StringStream.index()""" stream = antlr3.StringStream('foo') self.failUnlessEqual(stream.index(), 0) def testConsume(self): """StringStream.consume()""" stream = antlr3.StringStream('foo\nbar') stream.consume() # f self.failUnlessEqual(stream.index(), 1) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.line, 1) stream.consume() # o self.failUnlessEqual(stream.index(), 2) self.failUnlessEqual(stream.charPositionInLine, 2) self.failUnlessEqual(stream.line, 1) stream.consume() # o self.failUnlessEqual(stream.index(), 3) self.failUnlessEqual(stream.charPositionInLine, 3) self.failUnlessEqual(stream.line, 1) stream.consume() # \n self.failUnlessEqual(stream.index(), 4) self.failUnlessEqual(stream.charPositionInLine, 0) self.failUnlessEqual(stream.line, 2) stream.consume() # b self.failUnlessEqual(stream.index(), 5) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.line, 2) stream.consume() # a self.failUnlessEqual(stream.index(), 6) self.failUnlessEqual(stream.charPositionInLine, 2) self.failUnlessEqual(stream.line, 2) stream.consume() # r self.failUnlessEqual(stream.index(), 7) self.failUnlessEqual(stream.charPositionInLine, 3) self.failUnlessEqual(stream.line, 2) stream.consume() # EOF self.failUnlessEqual(stream.index(), 7) self.failUnlessEqual(stream.charPositionInLine, 3) self.failUnlessEqual(stream.line, 2) stream.consume() # EOF self.failUnlessEqual(stream.index(), 7) self.failUnlessEqual(stream.charPositionInLine, 3) self.failUnlessEqual(stream.line, 2) def testReset(self): """StringStream.reset()""" stream = antlr3.StringStream('foo') stream.consume() stream.consume() stream.reset() self.failUnlessEqual(stream.index(), 0) self.failUnlessEqual(stream.line, 1) self.failUnlessEqual(stream.charPositionInLine, 0) self.failUnlessEqual(stream.LT(1), 'f') def testLA(self): """StringStream.LA()""" stream = antlr3.StringStream('foo') self.failUnlessEqual(stream.LT(1), 'f') self.failUnlessEqual(stream.LT(2), 'o') self.failUnlessEqual(stream.LT(3), 'o') stream.consume() stream.consume() self.failUnlessEqual(stream.LT(1), 'o') self.failUnlessEqual(stream.LT(2), antlr3.EOF) self.failUnlessEqual(stream.LT(3), antlr3.EOF) def testSubstring(self): """StringStream.substring()""" stream = antlr3.StringStream('foobar') self.failUnlessEqual(stream.substring(0, 0), 'f') self.failUnlessEqual(stream.substring(0, 1), 'fo') self.failUnlessEqual(stream.substring(0, 5), 'foobar') self.failUnlessEqual(stream.substring(3, 5), 'bar') def testSeekForward(self): """StringStream.seek(): forward""" stream = antlr3.StringStream('foo\nbar') stream.seek(4) self.failUnlessEqual(stream.index(), 4) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 0) self.failUnlessEqual(stream.LT(1), 'b') ## # not yet implemented ## def testSeekBackward(self): ## """StringStream.seek(): backward""" ## stream = antlr3.StringStream('foo\nbar') ## stream.seek(4) ## stream.seek(1) ## self.failUnlessEqual(stream.index(), 1) ## self.failUnlessEqual(stream.line, 1) ## self.failUnlessEqual(stream.charPositionInLine, 1) ## self.failUnlessEqual(stream.LA(1), 'o') def testMark(self): """StringStream.mark()""" stream = antlr3.StringStream('foo\nbar') stream.seek(4) marker = stream.mark() self.failUnlessEqual(marker, 1) self.failUnlessEqual(stream.markDepth, 1) stream.consume() marker = stream.mark() self.failUnlessEqual(marker, 2) self.failUnlessEqual(stream.markDepth, 2) def testReleaseLast(self): """StringStream.release(): last marker""" stream = antlr3.StringStream('foo\nbar') stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.release() self.failUnlessEqual(stream.markDepth, 1) # release same marker again, nothing has changed stream.release() self.failUnlessEqual(stream.markDepth, 1) def testReleaseNested(self): """StringStream.release(): nested""" stream = antlr3.StringStream('foo\nbar') stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.consume() marker3 = stream.mark() stream.release(marker2) self.failUnlessEqual(stream.markDepth, 1) def testRewindLast(self): """StringStream.rewind(): last marker""" stream = antlr3.StringStream('foo\nbar') stream.seek(4) marker = stream.mark() stream.consume() stream.consume() stream.rewind() self.failUnlessEqual(stream.markDepth, 0) self.failUnlessEqual(stream.index(), 4) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 0) self.failUnlessEqual(stream.LT(1), 'b') def testRewindNested(self): """StringStream.rewind(): nested""" stream = antlr3.StringStream('foo\nbar') stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.consume() marker3 = stream.mark() stream.rewind(marker2) self.failUnlessEqual(stream.markDepth, 1) self.failUnlessEqual(stream.index(), 5) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.LT(1), 'a') class TestFileStream(unittest.TestCase): """Test case for the FileStream class.""" def testNoEncoding(self): path = os.path.join(os.path.dirname(__file__), 'teststreams.input1') stream = antlr3.FileStream(path) stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.consume() marker3 = stream.mark() stream.rewind(marker2) self.failUnlessEqual(stream.markDepth, 1) self.failUnlessEqual(stream.index(), 5) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.LT(1), 'a') self.failUnlessEqual(stream.LA(1), ord('a')) def testEncoded(self): path = os.path.join(os.path.dirname(__file__), 'teststreams.input2') stream = antlr3.FileStream(path, 'utf-8') stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.consume() marker3 = stream.mark() stream.rewind(marker2) self.failUnlessEqual(stream.markDepth, 1) self.failUnlessEqual(stream.index(), 5) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.LT(1), u'ä') self.failUnlessEqual(stream.LA(1), ord(u'ä')) class TestInputStream(unittest.TestCase): """Test case for the InputStream class.""" def testNoEncoding(self): file = StringIO('foo\nbar') stream = antlr3.InputStream(file) stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.consume() marker3 = stream.mark() stream.rewind(marker2) self.failUnlessEqual(stream.markDepth, 1) self.failUnlessEqual(stream.index(), 5) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.LT(1), 'a') self.failUnlessEqual(stream.LA(1), ord('a')) def testEncoded(self): file = StringIO(u'foo\nbär'.encode('utf-8')) stream = antlr3.InputStream(file, 'utf-8') stream.seek(4) marker1 = stream.mark() stream.consume() marker2 = stream.mark() stream.consume() marker3 = stream.mark() stream.rewind(marker2) self.failUnlessEqual(stream.markDepth, 1) self.failUnlessEqual(stream.index(), 5) self.failUnlessEqual(stream.line, 2) self.failUnlessEqual(stream.charPositionInLine, 1) self.failUnlessEqual(stream.LT(1), u'ä') self.failUnlessEqual(stream.LA(1), ord(u'ä')) class TestCommonTokenStream(unittest.TestCase): """Test case for the StringStream class.""" def setUp(self): """Setup test fixure The constructor of CommonTokenStream needs a token source. This is a simple mock class providing just the nextToken() method. """ class MockSource(object): def __init__(self): self.tokens = [] def makeEOFToken(self): return antlr3.CommonToken(type=antlr3.EOF) def nextToken(self): try: return self.tokens.pop(0) except IndexError: return None self.source = MockSource() def testInit(self): """CommonTokenStream.__init__()""" stream = antlr3.CommonTokenStream(self.source) self.failUnlessEqual(stream.index(), -1) def testSetTokenSource(self): """CommonTokenStream.setTokenSource()""" stream = antlr3.CommonTokenStream(None) stream.setTokenSource(self.source) self.failUnlessEqual(stream.index(), -1) self.failUnlessEqual(stream.channel, antlr3.DEFAULT_CHANNEL) def testLTEmptySource(self): """CommonTokenStream.LT(): EOF (empty source)""" stream = antlr3.CommonTokenStream(self.source) lt1 = stream.LT(1) self.failUnlessEqual(lt1.type, antlr3.EOF) def testLT1(self): """CommonTokenStream.LT(1)""" self.source.tokens.append( antlr3.CommonToken(type=12) ) stream = antlr3.CommonTokenStream(self.source) lt1 = stream.LT(1) self.failUnlessEqual(lt1.type, 12) def testLT1WithHidden(self): """CommonTokenStream.LT(1): with hidden tokens""" self.source.tokens.append( antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) stream = antlr3.CommonTokenStream(self.source) lt1 = stream.LT(1) self.failUnlessEqual(lt1.type, 13) def testLT2BeyondEnd(self): """CommonTokenStream.LT(2): beyond end""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL) ) stream = antlr3.CommonTokenStream(self.source) lt1 = stream.LT(2) self.failUnlessEqual(lt1.type, antlr3.EOF) # not yet implemented def testLTNegative(self): """CommonTokenStream.LT(-1): look back""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) stream = antlr3.CommonTokenStream(self.source) stream.fillBuffer() stream.consume() lt1 = stream.LT(-1) self.failUnlessEqual(lt1.type, 12) def testLB1(self): """CommonTokenStream.LB(1)""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) stream = antlr3.CommonTokenStream(self.source) stream.fillBuffer() stream.consume() self.failUnlessEqual(stream.LB(1).type, 12) def testLTZero(self): """CommonTokenStream.LT(0)""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) stream = antlr3.CommonTokenStream(self.source) lt1 = stream.LT(0) self.failUnless(lt1 is None) def testLBBeyondBegin(self): """CommonTokenStream.LB(-1): beyond begin""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) ) self.source.tokens.append( antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) stream = antlr3.CommonTokenStream(self.source) self.failUnless(stream.LB(1) is None) stream.consume() stream.consume() self.failUnless(stream.LB(3) is None) def testFillBuffer(self): """CommonTokenStream.fillBuffer()""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) self.source.tokens.append( antlr3.CommonToken(type=14) ) self.source.tokens.append( antlr3.CommonToken(type=antlr3.EOF) ) stream = antlr3.CommonTokenStream(self.source) stream.fillBuffer() self.failUnlessEqual(len(stream.tokens), 3) self.failUnlessEqual(stream.tokens[0].type, 12) self.failUnlessEqual(stream.tokens[1].type, 13) self.failUnlessEqual(stream.tokens[2].type, 14) def testConsume(self): """CommonTokenStream.consume()""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) self.source.tokens.append( antlr3.CommonToken(type=antlr3.EOF) ) stream = antlr3.CommonTokenStream(self.source) self.failUnlessEqual(stream.LA(1), 12) stream.consume() self.failUnlessEqual(stream.LA(1), 13) stream.consume() self.failUnlessEqual(stream.LA(1), antlr3.EOF) stream.consume() self.failUnlessEqual(stream.LA(1), antlr3.EOF) def testSeek(self): """CommonTokenStream.seek()""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) self.source.tokens.append( antlr3.CommonToken(type=antlr3.EOF) ) stream = antlr3.CommonTokenStream(self.source) self.failUnlessEqual(stream.LA(1), 12) stream.seek(2) self.failUnlessEqual(stream.LA(1), antlr3.EOF) stream.seek(0) self.failUnlessEqual(stream.LA(1), 12) def testMarkRewind(self): """CommonTokenStream.mark()/rewind()""" self.source.tokens.append( antlr3.CommonToken(type=12) ) self.source.tokens.append( antlr3.CommonToken(type=13) ) self.source.tokens.append( antlr3.CommonToken(type=antlr3.EOF) ) stream = antlr3.CommonTokenStream(self.source) stream.fillBuffer() stream.consume() marker = stream.mark() stream.consume() stream.rewind(marker) self.failUnlessEqual(stream.LA(1), 13) def testToString(self): """CommonTokenStream.toString()""" self.source.tokens.append( antlr3.CommonToken(type=12, text="foo") ) self.source.tokens.append( antlr3.CommonToken(type=13, text="bar") ) self.source.tokens.append( antlr3.CommonToken(type=14, text="gnurz") ) self.source.tokens.append( antlr3.CommonToken(type=15, text="blarz") ) stream = antlr3.CommonTokenStream(self.source) assert stream.toString() == "foobargnurzblarz" assert stream.toString(1, 2) == "bargnurz" assert stream.toString(stream.tokens[1], stream.tokens[-2]) == "bargnurz" if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/testantlr3.py0000644000175000017500000000017712653072152017474 0ustar zigozigo import unittest import antlr3 if __name__ == "__main__": unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) python-antlr3-3.5.2/unittests/teststreams.input20000644000175000017500000000001012653072152020522 0ustar zigozigofoo bärpython-antlr3-3.5.2/README0000644000175000017500000000573112653072315013637 0ustar zigozigo1) ABOUT ======== This is the Python package 'antlr3', which is required to use parsers created by the ANTLR3 tool. See for more information about ANTLR3. 2) STATUS ========= The Python target for ANTLR3 is still in beta. Documentation is lacking, some bits of the code is not yet done, some functionality has not been tested yet. Also the API might change a bit - it currently mimics the Java implementation, but it may be made a bit more pythonic here and there. WARNING: Currently the runtime library for V3.1 is not compatible with recognizers generated by ANTLR V3.0.x. If you are an application developer, then the suggested way to solve this is to package the correct runtime with your application. Installing the runtime in the global site-packages directory may not be a good idea. It is still undetermined, if a future release of the V3.1 runtime will be compatible with V3.0.x recognizers or if future runtimes V3.2+ will be compatible with V3.1 recognizers. Sorry for the inconvenience. 3) DOWNLOAD =========== This runtime is part of the ANTLR distribution. The latest version can be found at . If you are interested in the latest, most bleeding edge version, have a look at the perforce depot at . There are tarballs ready to download, so you don't have to install the perforce client. 4) INSTALLATION =============== Just like any other Python package: $ python setup.py install See for more information. 5) DOCUMENTATION ================ Documentation (as far as it exists) can be found in the wiki 6) REPORTING BUGS ================= Please send bug reports to the ANTLR mailing list or . Existing bugs may appear someday in the bugtracker: 7) HACKING ========== Only the runtime package can be found here. There are also some StringTemplate files in 'src/org/antlr/codegen/templates/Python/' and some Java code in 'src/org/antlr/codegen/PythonTarget.java' (of the main ANTLR3 source distribution). If there are no directories 'tests' and 'unittests' in 'runtime/Python', you should fetch the latest ANTLR3 version from the perforce depot. See section DOWNLOAD. You'll need java and ant in order to compile and use the tool. Be sure to properly setup your CLASSPATH. (FIXME: is there some generic information, how to build it yourself? I should point to it to avoid duplication.) You can then use the commands $ python setup.py unittest $ python setup.py functest to ensure that changes do not break existing behaviour. Please send patches to . For larger code contributions you'll have to sign the "Developer's Certificate of Origin", which can be found on or use the feedback form at . python-antlr3-3.5.2/AUTHORS0000644000175000017500000000022312653072152014015 0ustar zigozigoBenjamin Niemann : Main developer of Python target. Clinton Roy : AST templates and runtime. python-antlr3-3.5.2/LICENSE0000644000175000017500000000262012653072152013755 0ustar zigozigo[The "BSD licence"] Copyright (c) 2003-2006 Terence Parr All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. python-antlr3-3.5.2/dist/0000755000175000017500000000000012653072152013713 5ustar zigozigopython-antlr3-3.5.2/dist/antlr_python_runtime-3.4-py2.4.egg0000644000175000017500000052263012653072152022047 0ustar zigozigoPK>[׬antlr3/__init__.pyXko_1pP8)duwBKtL@TJ6.WŚ/aY=3Rd/5ly93s//..CGImVw>ToȝG=]Ѧ&qmP^&]).VEC*˨骪[34n[RBD'Ԗ5jE]7΃.tZLVq>ZeiikM;U,jZ|h| i2.}Jʸ5kӲ7'W-,%Vee,ӇM1h*72d_U8].%UF-/O=!*lH=d[ ډP=u0`-l+MajS&X i,˴c i LTAexy.6®Uu&e-V3@R0*8O,,X#Mi, ^:; 廱vi. Ҧ+Fc糝lBƣ /UݼaYZ#||cx0`\ x l,Zaյ&eqpZgn|UEnA3t RJmS1\q]Ӂn{ѺIkCgO*dLS4VcK,55&59GkRDCB+{ 2]J*lWlgA7l8 {IV 5A2]8"fRdV=![',(+iJjո7)47I+f0 u\蘃eev}6Q5]I {_NC1tG[8*uaTq hom!]] c1YͻkDj \N4(^)04+C H@G(]RB `mY ȀԿ](!KZlwZ=^~Vl!LČ9KpȠk+t{"e X(5"4 '霭>瀩Xu,**.E)Q2ԔknY"ZAcVy89 m$(SsF1. 4>™"a$,4~s yp(L!+#,7QU U}ŵY`z#>n8kcx]S?cyas/m0,N|j@uʁc]+SoJ7_0鑻b={/ 0E1rchȟOf?8UDED3ޏ,ZDm{/ag~Etќ 'r,!,S?\ޛ2Z# lэsܛg$£x O"b4`lDқp724YC+,$M{S! =Y=[M*b1e`)O AC}Kcnr0XB_ e/nNbKE3>y>.F?ˠ1ZW{gGo>x~r>C^jDZW8#N?l]x@7{| J?_/GW,q|[=pǯ}NOV.˗w;dyǣΆ-#>|UU_H*K?W߂ryg3lو>&Ng$rs/PKRy>]antlr3/compat.pyTao6_qp?40`DdAPE7$Ѡٯ;ى8{NkUD}vt:fnM5_&s1֚pooxvτF( 2fqu4t.lp( 涩lk<ܡ?@Wf<Ӏz $jZӅ[۞*o?Y>vq_`e_OSWtQ-v3] .:bh݃#78#͖G'-cVKH }^LDB[l J 5R~,u!\SC@)LKf$8-]h^jrM\I4F+_Jh.Soǚ 3Qe\QAX L! *N# .3T%q-2MjkEs:<D(Jd!bď"`HE,y!> SgJ]" DJ(:Q>+fͪ+-u]yƒŵҜ^PJ"cJ%GdEQk-윖 ,|v| cU!K4<̓|?,Vmv/)g!ȟg/1%W;jP_PKRy>^?antlr3/constants.pyUMo6=׿b=4A~@dʒ*QbWh, o(CO D g޼8z*>R?4if7twژJgK_^C:-{X;>ftVӇw~{?=PZ=!6PzP3]fklkM{ʍi]to잺F CWJSCZ]ӱL/-h4MwoU][Nim+^uGBUW#p,Dxd0vN` 4.T(Q(>j/fk<+?ik5\pQ͡MM?c8#3q\N0)0>f$X4PK鱳>]`+$antlr3/debug.py=iwǑ_ =?6 swAH.Zѓhcf9H1 8~Yٖ꺺De…ޓ'B|]I:z]^^墽Ϟ} G1)6gAb~ jTf2ei8/0E/EI"K yX%:0IJOe  b#ur)6ir.C~8Q܆X$2NB~k{%24Nd -,bpE= wt8qt4)XOؠ?-4: '#ޜ.}= ~ ,{ wŻ7Cx>AyI3MFG3 9;q^q2|= )z7;0G)6009юNJ`viX?}:R"B;z#uKpIRXZ{x8ozZt6fr-i`H.cR aKM\=h.5dNb*{,`4M $/^}3Oi4iM6MWTNUfW^q+&x)h7K  s^;b`T "MVrS )i;F^4($# ޳AR."h4#L% w[)=aID߉pJ]2EVK5g=R3ܧj,2x&ycmi3w pF~ufp\#l_ηRu%ڹ|ܧ ~".!M1x br#`(e&\~uė vżEjqvu v:),T.r5H7>}qq v$;VxVHZsNX2kz[kƕ AץA4K-qJo g /{?p N/1.$f1.a9at,\O`A{[TcjU2P~%P$ERFXa|i Uԛɦ,U|-ʽʾgx ]|Q(bqvDf z󠈊%R Mrfpp`]nGf՟-ü!@)K0h.%w\%ɼD$L)E$3iP`rItYI1E23՚)s*0Q^K` ~^%/yi?NRx bXS_M,TSy|0  1MA ʥDSIAFf*\\a 8d P`kbN,2OEtC:e0ns1-!~KaNgPY."lzOBZr D̓e"v%"@#TAfq !1>YvaD?v-K롾&y)j17CQR9t*n<Y}9՘dVQ PΓ:gݠW iYUݸtFèC֩lV3 BE ~Qdzn@NvRX/{X!~qRK;A;FA3]2e2Pn@!#Qley%G~ڶW2 2Set3W w *% &^9-n90HHzIa޶%PwQ 5QciEeTm}b<U["d &k9O`CG;q$Ͽ#MM[tjZ>v|re~%3_S`:mMW,I#0<%3VNc.&`h38tশv#7%%M͉Н$:flh+oEÚ3eyV N,p34.͠qjbJLJHQ uΤeL E@@Iv ڣ%:YaiVgqX-" }Vg$b0=$V_ϭV\AΟET^*6/s{XhI`MpjYY>="•SuOggK| 5=K2"ĺϥY:n#50.#,pg ,1tGG&־MӔ+?En> yajN̛ѺʇU|iAJqDT;RlKAo1-]+R#,7D=V-c|Gm-Bt0 (9㯱1qWATAJ/@{`A&-_8\ @C6`q.:1T)/b_o` }# s x_[}~z韾O~k`* /pYZ ^ #B?quKO_no s"D﮴ wxuh{IaĀzIRӒŃ|cJy䉘 (G!(Sd :N#Nhm>t%7I`pdT˃ϻ d +lapߓKm<ӎwB[|=tTf]!NEхExB>9x&S p1P=?CQ;!UF q}a5EG9Kzn)8"L>0,@ `'0*xBܱ,9YS8U*pF})1TH B'wVWdFa~Ǧ#lcXS$czwfpm'~nv'`=b wPpiN)857͕0 '`8%Ɠ/1JLvdd6\];4޽fWgkك˧jΠk"^M.1z䶶Y|-?7_{eMG=փiz;5c:J> ۲_ ,|2k/E4NME)o4) ҘE;4םM?wCC>+4tgF eL͚&)"qpphe+ _f}ufR~E2yM¨'lp_C +~Kt4TJgKZćOB[/lddgtVx|UE|VLy_VNrɜQGoAƿt'$[03xllQ.!.A?ȐjT8G0=x +Jb2KqM|\js#/#Lb9 1L?@{>dzt2aϟ=kH*}ZU qlH˧U^9*x_`BW zSe JMeFVE v s[ǴzuuĽ8p'QMcf$ɞa'^}43=jYJ]S^wM[Z߃\}<3ۨ ؠ͌& U$׺rOVV CoT01SVᬫNO|1Mu7(2Oƀ/b2<+7CcI{{kܚ nyk \o熱*sD%rioq[yNutN͚kD7Mh5iTiHBa,2/.eߴf gGgf`8+x&;/'HG!"Yr\ݕm1+3![g543ծ[\ _35Dc^2 u7,+@Zש-җxkUTdW Dݍk6`G9EIaЫm*K:Rd%᜝-VqdYuԬ]sCC`˴ks~%YW4׾W ScȧPem@[Z]T3X:9,jJ}w݇yQ=#=L m ᷳml^}i{jƨ?}$9դ.t{5 x%&0{M-n⢠^&wwqոȖ"^a7W8@c+-"MQʰ&>rF垭ʎш*㪸7Jԭ~OIA>y3Et⥕ywC{?%aPnC{1]\Pn>%6 )9qVIvoyRT٪ 0kq25;Mgr1NV>rC4: UvߝgG'?ǣÓa`宒!!ۡ>܈Sm{_n&{^bh~brRiڗ|8΢}ls2H_X_UjeD>A+%YJRTi- ^Ρ3+*L/J 6`V<[olS6>y2ܰߩ[E ۭ/[֗/ouPԐtI04o1FÀoR%-PKRy>U76  antlr3/dfa.pyYFm,S)#\dՀƣ-fAqy]N#P Ւ`اtKB&Ne}ܯfiRy𿊅l6M"?/c_~n\`铷ԼXnbj2ߦחo^I0 RR2j-gu$gA`gAg$W@+ jKDEE [R$π$Jf<H(H#UdJ%`l)2|I d xHX$x9+d^2'3iI2F&k*IMYR( 'eGSh9C2J6 ,kNȈh?dZ`ۊ"I0ݩ\[օ`~{S,xDÚeUB2% >]`L8K @EsdmؠP>N:#6n+ 1{FnGl:dO~cs=Hί#g<hC2B"wOzE7CZ1ҽ3aqQӼuӻ76=#N&B;mx4D7رozzxQt pҷht]~p~u=hAzc_azSk_|:T4ݝ{2'7c&C+u?QȪazgk@%]"w9s6 ?@pk;GF̺ʶÝy#2Fn׫7d;YiA!6դaqPh& hiۑ{Kv-|C{W(x5Ȥh(q:Hig$hG_c @LCiSn¦rǭB9k4H iJ[L~ MFJN*a\D T'(ʘbiv%pIҗř,GyW($}iDM5& K%2[FKjP,bq%1c*Zml %anP=Iȉ历.51B| WpnUEG* ~kGS`dY[L$Q}U `t[+ß у!ԗ ~!H939_J+Mk^'A躆/ !' }X,c7 %bx߮u]ύE-N+<2 Uq y0z rY؁qhǿ$'7t %UB` $m̔vHE4/ G@3)+6zAә}ͦ,;My^f$Eг Lh2{ O֥x+U|M?7E`O&N67-P nTе Uߪi]L;l4vt娟9 .U`x,҅@)Ok|M͟pXJ:#sCo02(!Zn.N]PiX=*0m̯c쭴m=ǝLVӢ4{êM˲˝iXRͅ/3 զܤrN8=>ь_&]:cf>2Lő<&-?L$O)&LWVǡْS}y LdO~58*x&\ZSY :I_\ᬻUUqF>v?Rȅ\﮹l_>"n~g }^,0 BIpƉ77-B"kIY6[B7Ӗb n{$:hEn(G*_RFARHF2jkT`*dl ʰRRao:oA{UմVBVER0"n +7cX-Վ KҷFa> h,<9Ć"X5,ԬkQ"O Dantlr3/dottreegen.pymOH{~#ÊdRZ݉^5BNPwO/@vgfqr3 a/K6g3,Qzl)3$m2NC>kcHX   $#h00Ib8Ee D,x$ (i>a"dil4 h4`ga|O]cn9^6pr߽x| \4E0 E`Y=T](P870, +0ftXdmx PMrD40"B<]Bi"LDQ32FRDm [reLML&doI@ĉ@KI#*Oe@N:.+c-(DP**BA2lSX0ӐEYar)[V4;6)f(;GoIHqR% $#E"8(EBP0]bL#9TdK>A̐B'Ud j6;^ ȿo=w Wy0tsyw Ǝt38oȇЇsےx i v`:}ǿ<@P ,wȽz()*snr. W /ktj8iVQ]oD]]^_mrۨ=tpz֥un{Ь \Cѩ;ȷ|8a/N>AH>gX=˷$s$Ah9D]wt;A ._(=i5&[Hc녍.u]`Y+ sn6Wdz[#GF#בT|v<9!58stlHu/UTcDȊLy2U(YlbQ'IQ\Ea,N(8⇋l;::j4d&J=,I%8 N|?n4d'uGl&DNԡ,'Q(VQ=RȠ?CGKVq(R$hf\'9LJCO'D.x!O*c3ڊ/$ND6y-o|Fؓ;ۆ0=濾g ) ou@Q2ƳVOݔ_vzM6gKޑ1 h4|A;8B`;!?έEۅ<55y 1?aΩM]]t#D(j0$l__'_|#ď9'&bcd[CjScD2(#Ұg؟(؇ov+k@/$۷0ŷo͌GS\aIqTQi狱l%Y\& jEcD"&@RdKϿϰӼ<8w Y5 E@f U XҞy坎R$t 8]rGzSuV&Ջ983,ipZ=YOØ(֚ Eg7ŵ!8Ocm;PPK8險/cbqQdND%{#|ӥZ iV( <>g8(,XA/8 dFu0ɳ-0~H!`o/JGF2E u֮FPHm0iUEf)%ž߬UAޱ: !+ѮPm=ZAS *O g/.*T->3U;C4ۆIkl3ތ[Uꑡ3$J\xln x^zv&2j*bU] uO?=K]2p^Pa+ 5֣-Wj3`# s3ZnPQPW`IIv!9 64[F}g\)_T0J)p3WR-z֋*_Q 'R@*CJ5z.iL9N<0ڐ,EŁz(Suktm-7];qOO񬖞vQ1(;DF_@q7km@$-ku'<͇?B6OrA?|L_ҍvŬht2T8z@"7sq> (1LbL]d2ONȰqɔE%fEi[ٙ.k{R:-{)u{D,+\$-l9[4Uff""=#~?aJbO$< z@I b/7DJ5~Llɫ+$n} S:Z[T2Y^ta #ڵy0掌wPKRy>ZVͽ1antlr3/exceptions.pyZksS15 *gcMe$Cbޤ\oh"Ծ _ v@`|_N7U@`d L0aZu2J0AiA^vI ד6I»ܳ;WTKOqɔ } Qb "$_7W$ކ.Ŷ<%e_l7!f)a(i"W20.HMCbb̭p R/.@!|3 Xh8%74RIߴ~FD7I4bp1 q╣pcJÍD]^a ևmX5y.pZh1MԎ|I54؃Y߯>цcI}ȱ7`P$[b(5-YO$6ctXw0U@rd)4R <2&thXdŠf]%e[#r<#x%7I`B.J#bRX Po%&3jy|&_h%!eG^"Xn! ue-eJpɫ&RnnN.:Fӡc׶net/|2E Y6 \]3TQF.1^IV{ 5yr܎3+ǖfl!>U /\ ɬtI@kF+*U6!Od}T[Lʒ6][Q&ag?#DbFm&FDuLycQe-c\G3P*`߁ZH?8QIJEh0 kx0()ROj j__s>+)KJa_0 gӂ@63*8/'`^%Evj!;M)GSu4]O)}t)}de\X4})H?,Ղ.VL=5{]RE= tZE,uEV3KR<UV7r'{7hk>ޖ/0]Kcn̔L$Gi3Z0F~]4@%Il`u@WU>ZV>OvKޏvVeon*?ʙF'! G"f_RUv۪ `{qxO@:qB<`q Gkز`eN(&r,G@pElK0AG& jê4D[>3[J}bڶ' J퇗prHz:ͫ4ˣ 362 h> l9z_b󂢔|>i1 홠#jk? #U!I~4h=c:_yfdM3J(iDg6zQ:N(CX1UqH-d fOyEf* "8v㟞_U&]F '*,_iw`P~Σeӊ8RynJf|mK"W݂_xo|νP E^\PO#~!קcusb$Tb։: :)y}J8'gs{s]|Փ/ E ?ʌ%hWzngj%ɲx<#޹ >'iN鼻3{?RtWty0kARcla^՜^U~n5V~ՒުoU2q4[.A&f6L71GPk]R0kaIp9Dgr+*"ҙՍblٗu<--鵠\9iu+3pvvYP̛"Oꃐ[W W>ؠr[M˚6kN?zor<|w*PKRy>yantlr3/extras.pyU]o6|ׯX{@P$W Nh8Y4(:i;+9M.y84]ɄGTuuzN[|Y9*;o;AMA|e~Yn=P{4#ܮrv,&I|ޘ[kj{Myazyəͽ>G|FJ;eBPzrt3Ps2&Qot{b ~Jfm!T9nk }0 Ҁi[h mlWOvL {Vfz8qZ1K!' r;C@Wf<Ӏfj %@[>{/(Ųm3UۿX>Tq_`e3n*46 &<1[4 XY]cў\r o4 hQbo zwNí9M~d0٥ sA/F"Zd9}X>:)wG2EAYyL$6%1Lez5y(%Z*l:쒮E-0e"ݐR]MH0W2*0e/L! EZlLčH0I^+ (?nI2b)"UxM,-ğ%Hqx^5梜JR ʲB72dl%`Zq!9@` XQY"*R,=Ev 3x2KYP~w ^ fOv!0}ЦM)T.#: UD^41έ, # CjTYA80|ۏӡv$/)o$?ޅ<`]8?v5U߭GÍGL1]}͸vOj~G,Svxfn "3;ľ{xg*zIs9름9Vng#Nu_PKj>81#antlr3/main.pyYQo~D]e5@hʢ3@G+5K;F~3HJ"[q@|7Z9Y"e0{X:: n]ь%3'N->_>^1/ |&HƁ(f1(KxdaO,R)LF< !KD <@#QgsH{BgA<, .abIBX "1;$H)ER[0_xQ0K%z"$J-<O%FQAC D)m*,p9F~٤*(haބ h5Pgق%2(V/,DirR[u<ʦ$C&wI(Yq`I"Ka.&qV|s%QfD#(9&iY!a{9gZcl^Ŀp\wv]ص<aF%Dqqٖ{N FCQw |S 333dct}?.'):0д/U>[# s8ztj!thiDh`V'c4ɰ bf!qӽwFpyny`G`׺$v79|۟;΀ ~w:3x#ȣg#rطQ.+ 4Qyba^0B W>wM v߯EqɳWYC-w푀L㊣Չrz>vځ}Msq\osC^g5nE%%/2- (5 Mazud3x(tj,^*C,'dFoLӾ\S!_`B]g2OeB}A7: \ YzZ>l3][ f$}J%lmN>/͈M¬ͽX,3#Yi(a/ h]Rd "%!t~(mi~ Y(^'B\u "1qTd<%dqglKDeDM´aɵ"d,؊wE H*.:6ZTnک%lYX+Zem-SDV¦gV&yث(=c#D4UFӧOlHo3YBpх[&F"[NJ/3}Is+Xp1%ęխnczPNlVSVt-SLmJMY}e1ڋYB)vޫytv0/raPՓ\1ШAz=h;(.Na@Z˻Kv>ZuKmVj,`6dȒټV|.x4:1m@ ~o澤QL=QӰ(Ė0U OS I_kb6)$߿{״$;zCk? K\.-49"W6c쁫.:\ETKeD)â-qƌȰP PkX}v7ENNu7Y'߱p4@M /IpD"ԥS]=9[PO:3JWOJ=ǫCA(Ӣs?)ɆX\a(N$}{YFuoj1mzqv#ɥ[ !;"ݷǗ!wk%LvC\?L60qbRٛ;a.VvjݗO_nFo$#ԚWV|KW-{UϨ}X/tKel}O.f]* bWPWWr1Z}*L-څ6C;쾼4WҊzOr}ѩ v0J} <}nW|M >3o-фu>oB_ymVw߰+'zdtnG]h)B 0=v z TʣQiS-0У7BJ7JF@Rrou{5^_in軰MPKK^>碌7antlr3/recognizers.py}wǑϫ ,;yw+R$d!HIY*0 'f3Q꫻{f@J_bLuwuu}Wu?>?:ΔVY'eݻoe[f4݇ޝ_=2M60~9OK|ƼN_. =V2c:tUuM7uV&fS&MUlJL<)o̢(W\g)JdU̳E6K$%&-WY]s.~岸K3+y/U_[(WeЬà La&#~%5`jd (~LZZ8!sL`;J9 N!N8~^"*#؈.*2KG9Ջ}72HMy3yo`7 fU4%| WE^p]ɤ`\ (oyUqCFLNgH"fS"qL&Uų`/'gӱ_89?cɩ?wv!'3ٙ990W&@9c6 yɫ9>9Dcx/`6yp89;8ڟ"'08>>7g/􊞏a:Ϗ Vt897q؀ y ?i7'g/g.@L9˓>|H<9ONB Bм}9O}}D\?#Z1~pN؝>0aa7pL^'8yyl"A;x)gYl.J875xJzY~gYZa.Ӌd]|Lˊztʾt8~x $rrd~?8?|}1ۏGCY0Xih鬸̉C*#ͮy!ȸ$L_-},/k8)7OY ,M㸨'^$2.Alksu ͛0\+seqrX=(V"ǁ4ym<}J!LZ{@Tݨ0_L 7xBKBE@P8,t9L(R708 ׍ςDI^fP0iY81i=FI 1N&Ѧ)Ah&\]z2Y5rȕ"w_vPƒݳ &k#e33,7cV3 /Y4M DE` i5L5+{,"rqѯBp?b~h7oi:X (Kǂ .25yƑG^3@ MH)[ƚ` zOjfiU-65J+65"24OKV^`+k QV`0EkNn`=/@I%!'/Pg}%t ;Y WYǣlSvCXhh Vx(֕s&+@HQy![ #}nn*Nkk%sPt7,Uu`r 6e-SX;@zl=CwS_r|@ {<|7b0 qS0eH"g8g6?3bO\hhQ'TWQ^90Ļ(\/k'T[BPj!$A~G>*9$B$r 2̀yy9/LlV]!TH⺨2dģKBpS۶f2l1E%ec(^ 0zOpPuoP % )!vEx5fMژ7?@$ԃ9(3DiK}ZfNqbΐ,Ft},0SJ {0_jR) ~5-Ȏ:ԏ١u/ (|֊j3P -1]ǀϒrc'(/컝ܾ}7B{b,w4m}yafCй4W 'Q&0SPǟ>k̽eR GUą5N~3ƫT" ~}5?#m8< /e+=8ø<S%N9a==T+S щyQRQgㄏfSzSX-J+Hl!%RǕZ8|i! ^ 'p]a G@J)`-b@f2vpltGYR0.'G:osDM4Fo4Ԗ.RABԌmv iyE+$`U!릤f=m d 7s*‰+%a u ]d@4!RNmT*)tIֶ#.<<3a2<4LޔYZ_yxyVɍrwyxAЄ^K/a[Ҳz1.^qB?ՠ# ~a={c¦0O |d}

Lr ybh –ȟDO;)^5Gtp,.Ȏ]kr.(VÇTFN\yŸLi9V#%bX ijlh8{k]!Q%f@A'2"r0l[%[D?+>g7G /kaBf9Oi5+3gދ+M`xmDai@W!s fIb<*s Ztq jMZg4td0|b{TPD/TSIwj!,=P/$O y~UW҃V?ŏ 9Y7Q&AllIkj?ӎX;6";wAn zŭN,H(}Ϧ9?܎{uggs|Fmr)->¥L+`i'5SgW0 9x@E-8[Ϲ}k'U AvV0Ϝ{b~4.<żJ,_1e,Δ`[4ɎOÉtSҹ\ V!+׹ n-J4R6Jʆjk6d NWӴN&{cMrx7=B2irp_?!)[F4t'dl5tF)CL|ԒHglN*Ͻȕɝѧl n*tg[,үO^[wHlķz/n"=u%⮱>X2]8s#v3JZ㋬S$4wړPBT⹺@ o=z{)~DN)%wۢ2OWgv}~~4~L2a-pӓJ 1UTw+v7F)ZxGlKO#\jh‚:XSk[܎0}SLy~bWt.4e[* #(TۣJw%^BF'P8ݖ2UbH|R/`8a?ȭ@q%; ih[T}sUx?ᙰcTj`>D4zL]d'q>-;N hVϴ.ٲIRʫ/H-v "QtG_o+ MRlܦIVoe"2Y9Q< ^>‹9E\"Bϖ9Eyy%K;;ZCNf+UTPhdthTY?ca39+K1K;É󜺨b?z}4Ő'[ɋPpUh a0$nxTgM9b2US-, Ts(mQMzڗ E-W''N1Kl (! S^\HgcaHy~ DruG=F '-)@PlNe$ycfS7;}t?uOὙQa@x}=v{RxC؃mm{Iɝ\@`y\r$m"/C91~txtONՓ''f-~{/HT S>| \!F$8HkW?CeQ;RоqYK'1}y+|C@wT=z/+,S/&* [iUT&#,MJHIYbVeBhx(ӡ'LN% )"ΪvoSɁF)VS:r< 95 pt6>TG7O% 1e6 NEzbxdr7LF({P]s 9Fj<ʈJmKfX%@;\:{b~Œ~d"EhͰcFfISx+.vRN4?48zN 5-* v{Ho*%PK毭wY<ԇ>/ #I [^&h&H]uJI]EZq]x[1UA2Ean@{ZZ1s}]BCʁb#g'$u:Y~DZ~JaB* &7Jk>w'dyՋ4j}tH&d ,MO-zKZU9LĜfAɯu7ه%0}6+@bj)l&ޔrLB!z*O=0C%ը(/2VGY6??\ʢɜ_sY5F0( 6ǜ׍u,hףY1+]lG:ɖ6~aMͺ-[`6c\چ9L{\Ro*~C;ݬθ0> X+ۦ[:#CqBէP?'=hL:pXjMd쐄TMQ_PeGl`7jO9,.$p{Omolіg5?3}WPOMﻞ@ܙصR"A6(ƇOy/![ zDӊ,HÛ@D.d (5[BswuXd2J=iVPvFC-31Q{O_&8mE,OsfSæѠr,9QDпhM(ۛHM"dZ 7_U%RH#_K \iaah-柆pϑrϭ{V9htԩ{9{ٶո"1KXq#{x7 82Ml)-&$H(e RDOK0HVpݞjm`y{7s6wC_n}c AnNlwm">/ú %vmO)4lD9?7[dBr*\%:|oZ5؋tr#Ss)}!f}=S' T\ +Pb*g& q,F!G|iqFTsg\JNCikDJd}Iq%rm*PHC1jjZO_A9󟡞oaEG 6y1\8F\-[Kic5kj@:q9mR2M]c` Op?}ۧ@{QA9:ͪlwI!>n{k +`$Vt?r}JZ" tui#08Mךw$€n% ȫe+D.m`)(?HJjVZd7 ̔ަc|& kEKKK6taҝ:[GnJΑm׼TƓLi=?܏ig}F"CUZY5A|bLӥrNO{C*.b>@rGT_uΓK.qJRI^Ir/z qL@}N)UIE0u w%C=:gUܘw jԳS~WɺCzK+"3>A}y-tGH  - [UUì *"f|#gl.ҽ\'N4f~^`a:6Nlk܊st/2TPL[F 0U_?-)6?/?#ˠyX_-_邤~&b !M|1 `Z2s=[e|K"D. fYt[m֣%%."M:[]bD,NV29QNyUT Y2HI io3(;"f*0j,A`)QG,%>-Ttze,K<85&>NŊ^0V@x}^Es[ŐO"|&Ɗ8?.%4ѵڋazq*6-+JW{Qx(,F+OFwYewH@2eua}Ljz4葠&]f&lEk"tYaՁ-<10p:E[Tr Ƕ~^tZC7k:m?bО0rɆkU'~GU|,O[6fδ5#ڒeU=m>uUtPs!mm(Xu@+#n֤9ϝb{NJUz䍍NQ 9^D6Q.VreDZjjL]%dcV ժ@bj彺bE6#Lܩ᪝# m AwQzkDk#\m 0bRr&U15=mk`/E&T5 UX 2Lho2IwO j;^&ttqKP3cd }_ցVVR{&UJo2n+3~/tyѹ,ݝ E7v` ItWTz+躽 Cjjmi\asHG#x[!LؓÓ'#Vc^UFsM!z@(x߬V!^Dj+SMtuTR^ҝbջYq1)sjϸQ<DDpLuIspn7qbBxD{ҏ1=~ f9Z\'G^s7 6iE~{!bV&ՇlSqkF+(#`;C[Dl݈nqU]FwrENNw{qCE={֛4yqq9/;$Xhts,;>s; W.Ou?QN^㢞Dd \=n#E}?_K2rXH`2 QM"SZaTs6{D~eu ƽԶ Y-ݴ&- lKc^ Z`ӝO$džUJ9mQdk0l[ PQ*{,Ga\ f?6˰!I)ay@{T*RfeT|Q_&a4<4 ru_ 19iu-l7&piu>ac1nK$l/_LCؿLx.xkM (TE(9qwc8ǣljUU^\WZ"/jС.BI_ھ<~N]y?<~]!G{w!6鸰i_J/3?ٗ*Mi#Qydݴ]^T"sb,Q5 iW_ņ% تUtD:\w7\\`,_n \yA (ɅR|Bu;*cL+; u{My땹-s/$n:P$5!,Nk-@|ǖ7*k9cj^ϔ\j>X+OYƘuX vmYK[N=rYv9SWQt.gy/X)ȟEk6Z6*!jwrn %WZ2o3ꌕTXa):v< 8DO:29d[\>u^HUA6F2Et(U󞂦ȹ5YOų(wRMҜH%l/yjwK%^}^OpVUQZ$l5{~\]6$aGk$d[@;d@BLaݢn 2J46?oj%c,XF~h)l\{)u@]΢}j$ƎN WI ֝[čNsoJ?? 2x[ ֏@ hI:+h?;/|PZ [ +Or΋g'F(i;Dv+X<Ū`vW/l,M[G25V .)R|(Sqx xc6+=5×]'7~5ӜGQRe{uWhM>&ْ_Ho)u˲lcX sPmC~C7y,GJǐJllVS2_/0f"Ns8cA6|mLdǒ>QVAEA[ JKo#A`Tcun:qJ,l- g6d*Ts< $!ŸK?)\,ZTK42#y:2ƣJƒ[ Z\l8&Ѧ}A> L AZ4Ux3=vARog PK&q>Q.V^.Űantlr3/streams.py}kSIW@ Dl؍g0txlI*ARƚU!=;{U 2O,Lf᧝'uv_O| ߫t΢Buf]uQ06]e85Ua% (GyEӲD\yDif%AV4[=u*gZdΣE4 DO&̖QQs/~) LQrfi2S@2,^ ^JY:e^LE4>IZz ʁh1ij>B0,`92:9h%`x he^a!RxePYĹ%9u'30!KXMFlT<̀')@^ExE.HPjOyy(nqAGT g"3Bɐ9f ~7ex:8Uu~Hޞ:>?_ލ㱺wgCPF͆qO Oޟ'bΆoh6z7uFN~/_φ_i799VGz~b N㓳)}x/||v9~}6`0hp2A7qLzjnp2_ǣ_{0{urq>{hcJ "v0p~2P?]\"ax0ex2TgH7xhOip8!hx>FM] y O8aZW bԇs(hx2qU~:48? a8taucl0aa4q\ ~cN ߨ_47t'? ᙫaYQ-WiV;gư9ᅒg~$Ey{?\"siGބi>z14?v@|Mvͣ","Is % g:ܯۼ0g9 ӈC~ jg,ym$vhPr'~gEiEBHYdQ)zW qyr4t,roA2j24 HP(g@7#bXJ] gV"Qw٤rP&<2;dAs:O lv=;^=9Oa+2 \QT0YܢY~B"W< <@@B %J\ѡ;}F8]uqrE9(x: ? &xќ设ΐq[z-JgIܸuaÞE@E&dC5 2K *m2"bCSb3d7!~C֙B­G@I2t:-,lU|@ Ңo)iݮf3c+1|4y9r(EQ~?KchHoT؟G y6) *m[Q1(skwTz9Y|pj_i7@FN̰dBK$BCڱ uZ4n9 >$ ,.x$l%PVd1C_eLRmE"{FЫKFV6l< iS] m:@aX$K ǧ5E`-'@d#Dp.0A>a e^'b`R dr rAvE8 Ve,g0QsT 2YppR b.4jZ"K%յ}IP2M?&EfZ1OI(`shU0Ii }4JWØbai d'QuM?\6Nd4Tyu9UzD)jV4 [n30d#-5aӜu;Xs $eopiz C 9̿ƃ$gځ.nlƐ(,wBpz`,Pe>ͣ\iW]r,0X͛ea-quVi"/z؅?/Ģ$xV'6ff 9< (M:lktvp?q+^%Yd9 ?dY۳#{ڱ/*P$|%l&0@ *WL[@R!Ggd,C@]~|FG%R Lqa(5]gM]}s/l(@X"#Pk$Λ :Qd245Hĵ48Y6/s0C@Ί<($W7o.@,31xW`ˉe *? ؓhIr'K<..*0Ћ\x4ODm qD}ZA iyuo?e#RbyX:  F >+P0Km G`ЪS#HAq$vD EKƃfHqxb66AgA:1ksg\ozHQf02GhHE(scc`mL ;%B~prbcd] mp#i)' ab߷#=&NUDhrc9U63$DpkU`-֨ {>ܴjwV+0jG:\'*?s1 <|ug-W&' 8+txo{ p(X%1UR,(+Pذ"? ]X`D~'sDvN?VDD=JTrz)JSt~׃&a`Ф*ihBkȎ9wAG~؀!ȍ[>m|O2_qbR΢PEU^=U14Ҿ$h<{ca/j\rn}.ѱ&o!f-t2<Yo`SI.(+%1E媖)"a܁'_` G}1\`q39nY)Ek8T _!s,ԦDt'VMm gБTEpzlĢd8zi8]qm~N^]115<^M̔m*>9u,UqF5-V}䖎m Ƶl9t]YM{])S[McK(w{ B ^ڧ{dʁt G9?kEn{~;:=C9n{Lę >\rBAL^COa3uu+s V ךcODI7F[՛2:N.²+rQ^H=3Fs#q#^ĥc| B_,1^뢱K SW ٵMk *%5L5Ґ'սfǑo{8 e\+>mχ7jĥmBYNU]i͇JlIya劥. qKY%'HXu0)u&<9z+-tA-r'yI`M+;R p* ljNt`c*L9G>nz2)?@RzH(emMBW/MAuz ^H" EYlu!g(:ppRrT#\8J*왱2tLb +:zǵ] _1P(fl(iƷGmJ…H H| >tyx!=8E#Ž%]dvp+c-F- T&akN_9q q_qe\@z@[^8MZ &kt* $ $dX&:: ,˝K.tN\k[#0HؓssdgC .Y9@X+hP-^RqblMN^ VyBs:kQ]byCݬvк[Q+rlkxh5M3p9j%iܛ:v1DԺ 6:PWH9瞤ҙE˹{#|s.@СcpSj^ 9Y: }BwG\"%pt-umYOڡ&YY)+cդi#'7baKmϬh~t?ZtG&֧9apݐ.bt^M'}6ց5")#gf`Ҥ޿ Ǒk:Vt'B;^ʊ^xwSh6hB\ݻ9\6;S#ך4oT)f(rK(ǜ&x(C}7DLY-UIy*Aѷs$gG|\A~· #fb/Ä Ҟ2iXlncVewx C \9 _·f0'K>;Qyͅ#TMR:96<. 9<7NvK@t}GMKfd^%Do=iJL Jە&8*ƨrɉc!$(NbQ(g¯"s(&8e7w-3sT}xY>i1Ά}4^AxxJzb'pfDrשj#[AoQ)lMmO0BK,sšr`anc4qZ6{$ aQ [~^Wdd:. q(~?:mPY*.Ծs : VͼYi=^uC\St vRuܿDtˌW;%1':ib0ĎODښ? -dX}A><ĜMtpd/MŋBH "Rf[PCQם6~l, `v`T+:6!oLŲhMW`I*rJM:mYNjLmMskD cπnlB&17p[C،wQ~6e(܎2v΢g/Őf9 4yXThӡr}E׈TԪQ,ǡiG.v9̳ )̈́D<1JV }<&5 #*B{vuL-gӿ'ȨķPgD3t%%,/i//7,|1O?=}SOUksPYꊨu(#%_cՃ}1Cfk]:U~7oZYO#t=n?3O k˪0;|diÄRͨ^''k\t_,٨5IZݣƒ4άZQXh}-58A/6_fV/Y»guf_m$?C@2 ]zaVX4B|$]E].ҽ,Z4_0?9(븜Wq%Ѫzj#_~O\-0U.70WpFGUq(^S},׍I/8/T9K0Ν2T[ r1^-Czy;WT)ɅلzEp w% gv%X!tV:ın!2'ں51wua otE{ C;@{ݡ5ݖIi`-ٵ>$b{YeUu5~z 3 B8JEs@D|F30T2D`"/,yJ\rO{,ԭ@m@n#'12Y /`ʢ+}>0r7}!/"ʰIuiy,0;ki2}7yֵWG:@"G[k{Z]z;Ρ# m.=JgĤ =ѡVQM>u Moڗo1?2*cpV_ʌ&dYMEĶ~+mD- ̿(;`lPSMH_z7/u^>U;v, ޏ0 r{44J#0F6]{zL[rfHƄEVې DeD٦#ʯU_CѷC"u^ jfd_rQ6Xt'ZX>%a4 '.N` ?Un{>nWDvyWHF'W ;xLjwFo/Ϗ`rrtnFj_Cp 7t4OxNL 9P%OruLv:ͱF?;Dupe`0h8\}]ɦu$0#KOe0[ $hh"z]}|mA>ȮN^iULdƱ:16%.nΗ(PըQ8]<`<9"imFI}Lp V{gM\`(ոqţ|Y$Pڳ;7$곏LayLj9ȍ"*vgm 2:On|w Z_`:jjсʦ J:Xl2ţST^"Ū}v/Fݖ?Dլi&aa~YD0:h~-&-Ƙ?lYφ:Q = -h{ŭ5aTЗw&y ~0/B ەBRHvs U'GOZקetesa,9fbcP;X.[n,[R5xu;~rvKdBMcV ȟoj6;"0@0ѫs4\-jVDkǀǓW* |V#c }jt&VUb<N\˃7Ρ&l5XU[z;5S7/xrtCʡljxd+^b i2G~;TkBY`~;B|] d[fY0y%lwTQӲـv} 9UT8cZ27ѰQԂNEZK1Fn%iU0#P]:ސ#vbt(,n&P]am,g@:iԸV^dk5GU[p<{V.tӣ:H#W*%ZU~)P=Bk[)ͮOW smmAYWSYsECnuީď+UMje;\W5QC,ؖ39U) `_:,i ?%bOH7(]SÐ9klh鼛xDYo1Y_y;ia? bg4qPm$PKi}oIv=:϶G!u FM}XdQ}L'.U*gJ imbةGG?OeS3Rӧ>Q],etEI n[U :$L:_tB!FY?r!l{mlf{]5/1 "WVn=0n^젰iWHzOZWzu ݑd7a`y r+P8Ku``A"%1m5bчqa_fDCc5k@rXKx-p}ig=$-sE6 ٚFrFBv]KYz1 yXg<ݚi1<FemoYD²K_üЛX RA8ma,fQ/-;˜4G(>G "'ٷ0cϨHPa|>jMȣGڐ* L1|A$BsaܜMx0r__rPUg˞ΉeZVdOG ej9{@N\=5uf*[?Qk-V~ݢ~=1,4`VA:q Sa7:8bz\H:5y .antlr3/tokens.pykoFs+ZFM[ppDD'Rqp)reBq%eofv)-Pyf퇽wỵ}t+f 6X ~gArס#*p/B.#2`(IEJNj29DyB@C! 8#fi9(L&$#B@PfiYJޥ >Ө?dO[et@$tl&S|WrR1E2J8- ā\)'ˊ&VgiYPv $g)beL h5DƋ˨h fQ)TeJl)k A}_ׅk!1.ˡ0"eCe~77BBo7!9 \p^3s?sƅKwQhx9StT{C.^ϖCvܳ1D]uBb<1j99\zp8 A+T.r'`xMIl.<\PpRpwB )!Iޛw<+?Zh? Iő' gsp|bހsa/*7Dy1R zސ;wo^&0G|s9 |21LXΧ$V9i&*p4 _O973[QĠy#t) rXB `(Me)\ Lk,#`h4a.U"d_l "jza0[>;jPwBVt'(bN>smmmgb6F~H4D"^ s2;[:<(vMYo1RW72MUE)2#/j0Ygs̫߳_p46+Hf<ؙ0VA3 hDcUcCe "{!D(KJiBh 4a$ ( 1u9J+0\"on<-on}e{ь(fL f0g972KXպdخZ+zzu#ΟX3xP\ZP}jKUd݊y-mnA& ; B ՚)`bqR:hnym켰 *+ۊcdidyFCo돹!/ٴ2e |v~Qӑ&'e._+xq#ŒWjQs2h'97@}4e]1*duush;¨\ T2G!KR" .[u3vn|ag5 sEHpbTTzo=>X f-JhZN-OI)mkӖKƀvǞ6knb>`b;"A5t^3w.+56JOԲ9`ў؜n)7ӁWɡV!$f aEaS5Mju*ȕ84vDr<߶,ҶKt'oUV[҂ݬh\r8uZ(9=J^)QQڄ#8Q:9Yge˙*9Qqvi>~GfXئމ}jؘ܋+;<9oۿiDo@(R0.*;/T*.eRundS,=L,atZPrm"LrcڲȔ2XgWzH)]*6~ʡ]zGO}Vbq(L ye(/JCȸ1YJL>antlr3/tree.py}kSW˯聓B 'un \8 x|nMw w{`3L,{^{^ɟ6Βh7ʲ?]Uy6INoϿIE,7{ bml oZ$hҼt:M|^VM2uɤ!T7beyZYU7667776:/OQVlpqϛ{rXΗU~}$Q7zwO2*|'9K ^;)kuRe0]6S8 W =E%yeU^0{}$\4 %i ʪY48W]>M2f:-1αQ `Yּ޼ꤜ 1X ›L;|$>(n^̘ ~gB0&llwIYPӀ50/2bz Q*MV6 ~Eʾ'ME 3y/`7%fU&Y1.=qˡYd /eR0. T2ĺ4#I=F"2Gԩ9 FYAgß/ |>;?ypt|+<8Q㟏OOo+scVt48?>uqЀ%gLJp0_{?.%x;"ˇ|/Ϗ`?\ q68>ypx|=EPI./qZG: ˋhp2<>?<NOO``yz ?>=;FX{/?>Xr1<`9̬599c|~28 . ;^qO`f]2x< Oxj8@ilEӌ粸Q({{ WG9OQ>b|9͋5jW^ml3r*(ʥFW|/\_N`Ao~>x;88<';ͫlT^~Rٹw5—9aY:MEsA8/5mVw٬,LǙ64Bb8C\O8V?o wԻnեD;)q?~]iwY:>Cml0pMfW9g踪ʪ +, s@YT}x9x4I{$F&9Si2¾kwdfH7}լ4+~8]UȱeUwzu|M"o>~tSei!n7H 3y545Ni[6ͫ0 䉢v>cV:?rQot*;CzI(~8DI:>?a`l!ypHG 23ha" =-۷ sM Xf*\9ޓ{Hc6)/*tciIk`T{&afDwBE5{'߅.z|yƔC1^LRk؆{Q "aǗ$o1A/8| SeD GLKn\npt9-^JVEz_kQ'>.\,&96; Im—氄(gcǑ%rYIG FqgKlWVθ f~pagr3%&9pPP"2K M A`ȹ U2ɫr# 3@H̀ N0P&oQs)<ʲhl{9Y[zޫ3@q¹~ydq_pl=j|I㋬t oLG hM?ny@ $PU,SBQL۵<+9A$ߚ7t[ = g]`.{UЗ|J~>[@q$k6&dDc ТeTl|ߺ+!EQ.RC^b8f7bj=)[YH-(cAtIp;KSZ5ـEltԔC&)EKd=J@  x4!0yJ. cu(Kwh^CM yx MRb3( ԬjCoGh1YUc~wOM?Y˰anQ:At kԌ W$m&|7C䔥,&}qo.- @3~*<*Y$#MqqGx(mi*=ahFþS g7FL!Oo4묬 w;uz?MdZ `'T(4ط+ϹmͮՅb>%l43`|ԟ2cMxmSh]6]EMoY3_Y:y^fͥ UuYM9,3@Nʤ1lN;ܸ;C%f/4'벻tԢB/DX衵!~S"!J2wI7F%j]ap$;1QcjhC5’^܈^ή)|.=lQu "y}:<:QKz^>KC*d" *rFG+]J;\.YH8mQ3r d1@ C΀Nnt)2:ok8򧮂'm@.W@78+2GQ]d'6b:r̒%f]2Cӥ}f.N:L #LQrD}7@~:QF>UBvc~'bm (p at3lc_'&)5o8q顉łYxiz+ZR䳇 .ahۜN_*V|Af&%G E1N+k,JChΖH>|o%I5h3,)|}ϛ^(?ʸGqD܊n%9墙)zo3Smk{0ts³E`Q Ifg!;єަW dFH6H >=RZ|g89OУy[7Q ;!zcxFzmr8r^ٵ'Ә"X b! WI9 0=1{Xx{Uh/'v@0ŶKYmI^+A9/H1' z+KdVu]`+bZ$fHOFOR)|՞^R{9Ņ2z~[[emlmoKB)l]g(;^/p} 9FM=w,pNl8VׁoNwm&T2-n8>Wh|uZ!E7h[g/]v0*rrϸ㽄[I<K+ eoq<юrϚ8OL-63AziU%߀# Sڹ?~J?䎒 x3uO?3+z+)E_Zm?U34^u~Ѐvb )wsE=x:jl]ddqڤ}|z5Ltji<Ψziί×7&oлNicoޔȁ :qSNOgȧV<~p'De[tҡ$'hAFcT˷Xj_v\{8pΏ3+"1@IIܨr䪧T99;a҄:h%IҢ(QjTlzK׍:G i$и7>h?9=;L+3zj4f컕0Duir"֋'$-쏁 cO,ɚÅ6%1EdJTVy}۲A[ɛ;~ ruUi.%}Z7Jk'SӅo.tĸwYR -ψMzzvez"f]Ec>1 &X3ak; cͪbLOKy5NOBfUo\9҉@W q v-#pgɺe/osBw]xțM6]nN:#Aw^ }[5KMnVA?aJV OXG 6_%m73F\؀ѳv؛ѹ=+6Df-+u\^H8et?n/Ǐ%{Ŵf mOKa5UrO[ƗN-PNɣ)]O QTov՜{,i9z5x.qűg8_29*Urr3pT<ݩv" H0=ݮ t$ SFI԰}$:Fّd:}{- $CrT'T:S-o?ZFϖI'L''& tV%L_1 %GU;:I[J~3ˆweڷJƳo'c$ՏqάM8YH%eB9'71viFli/XKԐ3{"Sm.bl:m^%X4rׂ*X 1Ny:xTA- 0:([T /f|mʞX[m9s"I+ȶԺ!C:M3*^c]%X2DnfUKKA[VL\ -h\WI4e=kH,Pqe{5;->-I^,< :RڅZmS6݄"uA+7WnQR%)X{o8ߌ.-;DX'De5N5w$Dbn.9ܷL0g[}{bگ|'M$r=}oyɟ (Tk[3JFCm+VQ,s^ 4krZmwyg1t>&f[C[`rX.TIdJ*-rg3'q jp4Q%)$_RBW(&LfXP^z7Ii% K !64KS U6kR&AsDBuf i.5u\ ˄Vhx))>$Hu@Pbgq_;*'NYJQ:6Ԛ!V. R#FȌ`AXգF1k㍖o' ?UaLź,uISȿ /I)Lc~$3, z#!-ы YEBwkaGkǩѷE`ʦɺ̶X ++<4{̙sg#Q>^) uWrH)êeŠF`]I#+jaT7}_sOk~50{jޗ߼Y{K$v+Un~*ՙ42˨YchVMhMdkI?YTԍX'`svQeGqOH(ٰ̆%u{61l(DY:5kT{mu7tm]ӹc}%BLCLk)Am[l'1B4b`GFaS7ee*9W3~B7+UX8FrSv^i/2h- s=qpt. TX,a} d,u4 9ELo1K&lr,)&[ǔxdl̀EfɷeXt|ԓGaR<9Ɣ![:r]v;hxHwCX:s@̼DSďUDXDw?8o{ŝW6QHLi5[Oȁ"d$P:4dq" \۫")6|¥\hktu=q% ŭ >E.Hk/w0~ʢ0!]Ism.~s20nx Q졟5LPaܻ\tdoI*pd7A("KՖ) UŒ]kE(*Kn|4\{ߵKh)%; +1r/={㰤}zlf*hMDX)j9IBs,)T:@4ZrJʋa_++_kXa\ Am5!tT՟@96SM\dN vsPq|۽md-4*^aӕ!}D[K6\c{٭Ccz_.?r xWO)3/c ֱddn![!v= HGns㰖$fGeY_6W$Rm?93,$/N*^♙,Th6Vo$/!Ӽϟk"Ztfu-HsU?ZB ?vhSg5=& nuz/z|vOYã"h8!1flТb!RfVAd{&xfRKmVͱE^zK%JTR0mOꧣl}otnnٷӃu+` XG{~P[?c)% 0_Km &FV~[9w,ZwAvh>I \DeG eA"w|Epn"ufゝEs_Zq\Fd1WLg'ZXr(+ 3n[>ab/slD HMBl_E˨"xH;Ok) q]<|Ӕ ]R/*"?DʲŴammb Kc1=+k%T(obP"xqT2>Fv{Qjr@Q:*R+aU:ouHl v|(#Dg(&s|orM+ּJ#)~,t?J=V5;Nϔb -2]aB1mJ7 6^ɁFiT171o7͹0*CPJ* [a},r2%'-~S-PhGJaո)"r@Ez_mţܿHKv֚e9X4سK5&DMVhDᏸgY]{W8Gvo,~;k]-g|G}4)Q0u s̸CLRXܸ Ke:R⻬?vGI9C[[ *;C; y:Jǥ1w0璯a<&mzM9v>7`#UrZ-nN9FYppc2H: $WKwF~%9sآ|1)mSZNDb*˕&ی}XM*RDXꒂ'gJ;UpYWex6~rOTu~i Ж~HL 4sQ]tܯ2_=8UQzu? ķEP,-F+39NrḶʆR;_&1*M=rnO=ifANmoc|5<ʧl";Gݣ <O8Z\&b ~),[ʷO;4F zyCOtFRhJH \-fsX/zZLj:}hܡdNΗXJN }6aۼ(F͒b>֗O>(BR *,'RH)M\f++]G*lۼ5XRz^4x#^IDtG«J@uqw^,ȥ)Øo#hWN6g W"eQaѐ~swСtԔ0MzR(kL(R\*K7C,D[L9^+Q. E P;$SQ7Og]B!.Y, RoC nC]NI:2Wy"]tj;]E_a2탆Yڳ{Xr}cw<"x0t8 v:_7/('ØO6;S1x5Asj!lk-Š9шjE^„w&IMd>HLi diP WʬD7Uek|/7YZZ7]мRT+bPT:u^O޼4RG:8TI s<>6dW3)J~8<*$#L KQt=On_01 !4 cM+N&o -"Eex,Rgy?ق Չλ6ސT̉ 4givdD^qWy,[řIfOAOx^:n;1ݶ$z&d[z,[Ʋwr2(@樽w*w.ȵxQ ygTJRSuޥrw"i'40 q."Ј&Q)`@ނ3fh>()&:(.^;Qz%\pD,x58IE3 cu7}_GB9>e{Lv@w 8N'7,oxC\vMr׭e9G3Ixc $xh=)q^Dr ܦT`(^q`ںXM6뗋?jӢcSbqwB5S -a5P Φޱ8qܝbd?{pVw?ȔO1tM b $Z > VP^)2& gU҆:/LRxH;n.pI-Vn+s^|+z4I 7epGQU N&˳P%<Ń#GSEFdgC:>m(NC*܁+O$h}f"m#\=+≊Hv2q֞izljHHg2<47 Gh0Hgc mmUvmpfXB[B4Ke"mD!x5YP>k^cذ]apU/j "0SZvhCNmlz}h}p-;Fd@٨. Ys 2OUqB巧@NۄMe,*yu'*"ޫYj؅r3 {y]) 6*#kn3)$̗nėUÅxn!˛1,'yBW(e]5,Bi]͟0 q!%']GjQD.啯ԑ{oZpZj.}=g>f22I GJi{3O=%>p1ݕqF>Ĝ?A:8[{HǸstk`TD^f e3&SK~3tVhLÏn-oӮ.Y[ODŽG1d4qprA/LOfY GuA)ޢ $ӤQK5Rwo*XF-2D2;EMvWR쩼LMY;fvIW򐩠!7&NYP`Spӣ 頷:˥Wpk)ni"]h*7E B~N@zRM NȊ{b͔0b^ⱪE$XNElImfҳ驵'~|4G@ݞk"z5'B??1؆0sMk}(XS ՕsP"ݕ#U|ĎX qR%E5/-;O[ 'JL)3fO*: 0#]bR0CN˾uQvh=8(-;]Ӽs,vEj4;¬%XIK>dХ&Vpn5}@3X̙;¹jlbepмJ7 MD-ΣjE<.ө' f1#@@~m6T/âtgjkM?ݕn2:?5fC4Z<_C߽|sVaH|%xK@d}C˚ P:*õ}Ql@LVsHbm %4eS6U<ޛ*U~:yn~mq,>!RI+W5VRKn}`T Rf:%KnsyYG sTh|7mTvDW߀ ɞEeAuUŕu3I}R\+MEVO ssbHGa9{!c 7oehMlEӿ&tBTx[- NR:\Ϙƒk*Äk5{GN1k!.}n'#KN@%HJ"X.x˳:!*3ьJ4VXb Q!cA'wܝ0! YO F>]4Oze؟ԦMyTtly?>ƈwnVYP97 g@( q\,5GL r.jZoV>e.4iUW'zt\KP ~V*$ikJrBᥘ"W;F*NIJ @An]3S6d~Q W:`yle.4oI&Hyt& ӒOeUCTҤ}71*NJnbRD|/ѡrhf"v|HORh@)>m۴Jl95J_@N~-{ԴHcs8=eÒhyd ~}-6Ƒrô !kd(,kI|"y i CSv:'Gc vu,;]6`CzÄdġ 3s+y{](HL br7v0WFd dX~Rg9R7ʿa2{,3 AN0˥/)):7exKqi\]8V(`>bQxeCA r6\t.fc#΀XڢcK K-I/X#Lht^Bu48G`U0T4[9MzID͍Tt ޶cM#13AԵ6xF-+%( q:Q[t䯋l lMv I`lb u3ߑj}O<(] X/F/z8U$ppiR`qS$lA'`ί1]yǫoI<ְg ǍW)&BJFe1w\<OnI0Sx/(i!/;klbtG@{e@עH]%ثR!&8yRyJ&+=q#3tT٣D2ɢP A9i)A4>5ܬ-&W axmODֽNMU,R0P4^uN&"qZ۲SԑQNs)P츔 d/D#>TWi)0Jϫ)52aNi"2ƹE3:)2𾵗E##?`#rکZ?9c3]_W!M ߈ m5uҎmId 6J_WNɖi@EIA! =`U#ʳ,v:LrN&wNByJ{%<@JЈ[P"CDp؟ O+Q AyJ"εLjꊮ[Kbݠ 8+*%-G"+=ᚖo1H`MvZ]͋Z#GrwOԉP}ޞ*[YD-I{QeUQ`j%#M1 \˾ eGőq7?Gantlr3/treewizard.py;wȑ?< Ǟl67 bp@wӆh-$VL._UuKxwsܼ]K]UM^g]~ġޅ?0cw)l3Y_x8c`xV$v=7ި',4<;q"M6"WGC;w~>37Zy|?|tSsM|A|QXw0/KsxK_Gg'źWh^78'fp9&6r`"2b[6UɕÆ# 0g&ekt.qtO|=Omg5]v;vj˫h}{t B+>ZCMλщtOspP@@ɠ&Vj7N_dE̚>* $ݻ[H<:8sXl4`{l0BQdN6$ @&6:x|uأaA@gI!2LFOeAns Q|3P&9:Y+Zg,|?BGĚ;]@Ya+~e|ڭFܹ6?HIKB?f }5~jˈUFC:-ޞA\qUw! 5 T|N30cvPa_jC1 FZO GCu\I'1Q=ƷHxvJN+ЌќԞ @ ECNXUV N z.~lh(+1BRX邇&Pc8W_l}+Haڞ4ZH|hơ [8#JK?f`z-`6kϐ~ƍB&vDxi+deZV]+v.0eI5Kai֫QCۅͫH ѫS݅jw!i=v;vqH*,~6>W0P`2]HSP X@~yI_8.3B@a8G?}*̯YN ?ۖ%uÍ*r%sIҳ %q}62-rAgD*B oէz*$4ԝ<CHSCQc BV(o rƒɕF "UrS? YPe (l~KD[+.ttAKF@[)B#"vژZ. &;QL~Cz&NB[uI a+%Sr/pk  L$h$ VyǥGIJ*WP])bةf>}ʨ ^)# Ytv 5j*Y}. 8z֦ioznP<%PKe[퐈j~W9~}hrͺ,ϘK%#z6 <cu&BL'̛ 9uj)PEyEUKb ;X5GC$ ν`3][+T0Xݵ}$ō\NEhi\<BǏW.y}Wa|S?^XXp=KH^rVV`zŅ^keQB62Pt U=%ƛ{/xmqavU ecYh|԰1[zca%;8ѝ%H*tڍ*lվXv9$7 $X?GѽVJ`K0v?XT5c@~7MVÒEo2x5idBC dN#:ol)O@&'"jlxyȧCҬwݑZA] U !N/ Im(\Nw2A G(Vsu>vc wxρC7H"EciPJdi rJl:1`mCaIj5 4鵒c]?sVQ*_v F'Rċ`u jY#rY:G kPJiXqg%%Ji`C`[@|EQ4Q91)E8bg8w)wT#n/C|^",5=7b[]ې[UǗ*.-+' Ub@CWGi+3ChnWeoGs9?(ؐ*U̳gXz+aQS.E }c6agH^9!_.1xB^bƏQ.2jRt:a=HI?asR|dL}FK[>~3\My]s`aAwn8&)tb D{y9{r8Y ~>_H9"ۮ"1O,C%x): ZB=D:vj8xTu4+=:>QhSg+qE5d! JrC5zhJRxR%8 ĤxݦiE(W1r_1h0 Zwy) `{1S/26&֢޴OJXPQ. v  5׊uk::]/濕b:ʲ[Cmh/>T?]]SuZI`PhN .}' r 4\7k-:`-ŸJQS'=@Vp 55M[1R \7sx2!Кf?U9Ј47d¥ND/i,< `Z$J\^jyj I^6[PvGڬ ԴJQzּErJ.`GBgЧǮ-KgVfU4pI#ɻP=PꦈtOC$$)RjFFT=( o3夝i-(KmG,]J :0Lq%~* W e-V-o3l,šc5D7U;63|*:n4^nt|,mWuf8ٱ/,$zn\_8En0SKŠ0#>djMk!}<]a=/θ7h&ԤX{lS]G{aH!1e,{'6Tգ\ \g4 xb 6D~HEnWSvBt"DDUt0V 0˩LZpFRT$iRg3<.'2-#7ڕT!ٔ:Xr 3 n]l譤Hf0x.+pi}qeɯ- 'KZ5 2@ۥ\TKC-] Jʢ>zWD HsA+*j ŊJH*T8gSԭԼ͠&ȝXGRYʼAWV* %]%N%.b2 ZU}4Z[?>_rKO44hv'lx7!w(WrbԂi,ƣ^KKvDP,dvt%اFs/߭ק2'Ta lfsfK(Ō  gfBQ5eBiR řbHv72 {SW;Fw@yT;90vFh:GwcH ,|8<p "S* GCus0ZzKjxhPʈŊ0Zy@%X ɛtܠ$S]!Q7Bh<$xxリiI]$Ygw4W(hHt +0"qvImšlAÃ"=Ҧh]%p|^L)G=/!):c#Tks S)="ޘbJAcY;Vh PhpM×mND61Fa9u".H(SᎊLV?'GCs4m,@ϥ} y5xۘglu'DO|\n&WC~3lv shm?(^ u%)j]7m.R:FWHDK8% ر_7%`#t(+ƎF PK>UetN/antlr3/__init__.pycW]o6u4]^ C!Qu(u)͂$C_ ZYK RKR8k!"y{I>KY>? .`םA; x nnm?ܞ=\3qZl.$vE29׊&bxqꥨTI"NJ4ySȌvRi+dQT i-Cl&dMB gDcIԔVQm)ixMIrZH\$pLb!kH3p!Q( L( /En$x%qI))5B&Bk:rf_ŰS?F+HsDvj"3s\!6s4E..VLԭ:L &\˥%Yfsiى*04M ^M i%)8|K GaR}h6SJɴu Ju揊Lf.7}5h5A뛁2S1[:lc:uk|d4ZIS@A5u?Pu2yqyt"\*TlipTUO-jh ֥&Q:,{vCf vT-: +00 T@[?S!\P¹SH`נ{* .MvۂF!z1KLG2z$+χ_I=6/7$?SI\mɺX-՝o$Mu,M҉}sNJk'_Ɋdu5w" ێL͔+nw!Wo6^Uc޵]%v7P<#I1*{Grzp{6]//_<'Fܗn Q1iJ=cxul,HN]xM_vaW8+q8W֥85ˣϯF~:8| F 2G65.}dPK>NIQantlr3/compat.pyc]QMK@MD(HA9(śB҃KǶD7II&҈8~.̛ɾ73{> cX Y) 55x:hdԖCZ|e%[-8kAT*q$ҼW`p sT]g nVLι Kb0پIPD$?)WFa&o˲(Xh*\nocah#g[|P]]֎8PK> -antlr3/constants.pycUJ@ւ?xozc#TWj0) SŽYl(mZ-wi} :0.9gW 1,{aAؠ( `P0EkjSE{>JWEg3fBai2d6wRs) a24xПƙ|9BmAIa,yD5{ơ 7d3#9E)~/i R\MigJy sRys+[r + oV2xzwCw:e7PK>MIz.Γantlr3/debug.pyc}{y{uA.zѢ+-$ˎ\rmjI_.-{gvw\rmu# <8)PI IQ0R7QMAP?5.}9g; Ū(|w9>__wMǹA'g⴫΍Ӯ97jN{ȹ1䴇Nů:#N{Թ1ǜcNܨӍT9_pw?5:bN81suN0W?ꤹzti:mrCa 0B^I:A+f]˒Gw#hQqttqg.ĝN$A{,VY 7fo]-< Q*g| h DB:N#N@!*(#4 qRWEFdB޸6SZ h'Bgv TBlعR8ʷ!SU V'q _6Sa@7t0J3/jHٴƓE H`M{ iu2hectVW%)ŧGrsNs+wz9L Ct g}mtӧaԻuG_\}i/}t1n O[4T+G**y.Y]jn.]bP{,n-Z+] 9a}fe3\fRP`@nY V+Hӵ^B6(8^/ؐgm<}t#JxQ,P3IЍl)Ib5oap+N:K~kᒡ>DO:JY:4Ǧ+3{JF^' i;?:k`eӴqH?MbٶÚݘҌT.gxkrp>^ՇOTұ)κtt)08R6 & :L*S|rH/0Ge8u!Ǡ1efb*q@Q(;OFxvuV!rӚ21ZzAUC-* u/$NQpC E|@H#;R9.%_W NchWx{ȉ_q*Bk"2?s¹SӯTyKQ ~i<CLuK2DlǥLV5flJ327M$^D [̤GMOYIfqxx8HǙxՃÕAGI0Ak6A8EML#B(ЗGd?AZ7'^kkZ^m噉L; =H0I+yQs h3;Q}f9SpHepaz +3v7ǀD /|y!3` uӚIKK)&;7oW;KvzI/f`s}ĕ!*]CAqZcCq0Ԏ @o:p*@ },Β`d)vDOb lï!m:̐w=eٝKo9(}il ,_7]פ:~۞'rĽK/_g>yuvdSҭL&ν-'S0ةɊ uI2Axw-8WJz4np kOlm,Tw$;f'4#1'ӎ#'?x)#8bŸH`08`.T,+XN y-cю˨v\ƴR׎˸v\&2)ZăZrH{-rq_8JO;Q>yɃOraOqQO>Q>y?'Ow|!Op|r_!.]HomK$l :\.P|w ^份^6B jƏzdm.µݣI$6%Ig(v[A[(P u )?b?O{kY'G2ϿAǘA0Tg@d ypU "16+65+xS%J֬iG~@ƈu ,ED5Uui < ";BV[1@CDf5)!0W4ޮ;!B 6Y:iYԄ:B qZáUׄ].::Ak#6> XjX`n`0a.+J$a&& $׳ޚ1Wq(s!˜>D ٳ={u0Qrc*ga1߸]0^iqm >csr+;q4v.V~¢>w)fb9tm_& >ejI?D㕢UӑpUPK)1;DZ}*V%aePhL Y#j\7<"°L3%HA|>M$9Z{2V=RS,f4$JJ+:^4` 2^hqa`Y-3zH xN$C#;!,CɊ !TU9!jfWQxx*!-mŸHcQsens++>I),Sh>P>G~aAH/r (Ԥp_Fu}`SKVڮ0 ",sF$ҏyJv/Kġ$~Xa#:Oƀ%auU=Bb2zc46Wc#ktT1Sc*L[C a@BndLѸfdnBƛ1;2S}͘[6~M:|~!#ĽwglÚšף^piQL-̎-ߏU/!Gtܟm?g2;Uo,Sc]ツ44nj$*Kj%tضB/ALPχC@) m%Ӕr9mO|xۉکÎ ,rٟ`[x׊wwc`Ϭ궥ٽy໕$m& w4õ*&FVW%.I 1"pHkbٝ .b^`SQl9f)ZHV. `f0`Z0P%FtBmT'tBj:6j:6j@rj%6sjڣ # tZ:NH!m!j.ɋ!g湪A '%R8bh$9vmj4 Y#YO[\R.oqS\F^tnz x\wozHQϩN RIdds3h8#9gh1Q qa2@ ҵKÀRd 5p+Gq;W[>1rOJvcmfBm?WĥYom=Ź;zY/ N?}EI7qD9&2z#v/#IywVq5 "cA3efk3ºB"X4.\\Yjp nИYVp+h테8I?$(ϴnzacfy:x\ \:[ۊ;D /<|Rp\nYmiHƨ{V}}%drAF}By"Fv(1I!sʉ`J=AyYx3H' md[hfsZ܎{mѢ/“0Hٯb/0 6rKq Fh`>\<%h^OsKTzBs>d&Ej8ӋFݙieCBWb1 R0 GblT謠PGx$qӨnk/nLLܥݖNW kiI?7Jl}} o;a/1# n+mP+I3$}`\=΄- )XW_b)uޖr ti]אH%ǭ0Qҝ`"|dVHTډE99{YP-#̱v ֆiTvdJ`!8ʠ~ MiXTgO|W08@7{{2aBq9奮"pڐJ"L͐1,v)_"XYm4U=|yfvNN|?MZ}Q.ziD=- {.Fi`Lf,}c`hh/Qe~-')Hɬ6^Mhڌb4ZP}$^^`uy$C[5V@0-/9J8VͷVnyXh0Vv{uǜ#$MR-Ϧw A,t .Jh`SeC. 3XgqW0Y%8l*:xR Aݕy d^915 5N2nc4&~p/$A-s96}*u+c!\C!ᬌ QYOsԐG8d|脀Rw趶[mr/.3(士9I5`5E:#5œV8U\%@qEԯm(B V촀%q_FAl%)o!mh2fʄC`@lEXh49M!ʉ؂~./=C`yN%:DPeulgh3WX$V&$0m1iVc^ [nxoK8 -~[a0-[ "qp|# '"N>{aLqEE)>#V[Όu!%9*fQ2]= m%e&mVWّs6B2[$9<%<{,ȔΠ[Ԣ.N4@oS ; XVE$zˏ/&jƍf_H[s+k.BG%e1Lɢ e ^>K6A/Y F^}z-<QOv9[oohjQlԗMϓ$Wl>LmhuIބliR&z[j Z!R\x晏 W+cgCGؘ9sHNE $Nͤ765XV =pekM[ ھ.f:b,%_ICv21~]4"}C7T!Nԣ!ڐn#nKO( b>E!-Fݺ,RRvNS{; ϺOtC | Dn\%4⺷EY[*#+-@aO#O\64 IYu \I |.hÁ7s Ty OO{]%"m9po Na"v!irbNٽ8~> JΔ &WAem]p'J'ư͵f=(Tf{ei!|U){׋pR"8b̀Y- ('nL5ժ{DO 9EdTP%ދ$C`6Ŷ눑\PCJ'±ن%J9Qțo$fՋzkWFIlp#yDn.21$wK'*-.\n[B=P.DD󤥳ة }kz'wȓyvodwJpߐyt/2c UEPrw"' YNb6Jv;R6Y" ݞՉN/Dolh7bɕZv^:\hz\*AHYl䵈a3mQ<+u^?b%s3վpmeEB' /,k]O{9ľvWC"3q38JZ:k'DG>H5i>s{Ң(n\LT\$IJQ`q ?ͼЗ^4{z*xm GT6h NED"u|yt,7i5Jsx-cZ$w))J cڐ:$3 dJNU԰ D9}NԊn/;1x<~F_2( et| cBx"0V:IK,k#.Ei3Ѻm5*Yކo1/m-q/T3%鵷w`gxquc,jd!⛛VZQp2%Ғffh إ3~Yh[Lp=xyb'0Y(_xaa^bM\ȟ)|_@jP%߱ ht&)IOO#k@Қ!E"$+2+.Z㢟l^㸘o5m6\p꧖.]Ynh7?pێ*aS\Ov?*;iTX-:4sbF(.Uw>+{de=Iw'xh}"C|ۋ6wq=#ުoH9}-&ɥw,GF*V = e>Fq5Zj(mijG?Pm.SFԮ-\?o$'_;SVN}&Vc p:#) AGo݊;Ts:*۩^p._|( d*D ߊC ye]?XXg,uUyh^:7uP@W&SUCqȆ8 _-|Iwx.Y|Hnj浪shˤ:O/L-[E~ZTP F4T P񾞲ڻp i6< +FP9x}/K嵁E0DFj(ՃYG%P7 R?kCĸbGrf3:6YB 6B~x!-ԾCu+aȔ|sz>`$ssM ՁLax{-wU=: ޛe'ZI@YA`Jpf{OAAެQC@rv%Oð垀>>Y pw p^^"`n [ 0r7o|>$Gn:}#Ǯ;|74kUM]qrԩNb{a+$\aޥ~}P_Br o(E4/E/It4TǶժ6r[bR Ռyʶs#5Goxzd/}N gE ڜ/[4''rPh b*66$$ҥ >l* 3׾JqO:|)?'3$:S|3?'s4æi>lK]6٫:N9hKi#AAayMjq-8x,1XŅWV_ե+Mgq#["ٿ/|]A~"0k#Ճu:OutA=&ڃfך!ʢ9h4";*rI-1+Sx}5W֖9dbT2);c 3[أu0_(531mZaY֎k' 3`ɷݷ+Sؔhla|jGu(_?%yǠp@~A&L%݃*| 3]|F.*I-pS?\>n|p0J-Բd>>dx4; W/-/p1ɵ+>zmd4qWf͵+W==?WKׯ--,.6xJ6CƯzuͦfN W|eq›\eN0i=  wiͅR2XM7ALP|ʣhf=X9TX=Z9Z2_t~vjSEZy2\_X`K7v5<+nJ2M/})7BapLjt$lDZ ?_э6zɏpLTڷpǺ]d~Ҙ\ͨ #=\)K@"1l)_ȜY5f8U[c$v4}\Bk )OBC .$Cpy0j5[OV ̝")_,aCUGb PJ-8)A| {C=Aůwq h @wtʯ?~t6׸7 !mE? ij-we 6Y9e,qe`4:f "Qkvli=hYC^ woY/u W};a) DbQor>'N"@QCS~3I_Ki]w={;,.5#;M^_7XiV/TVx\;# zfq{wNX;eWv^cE`vڹKf<a*z᱇ ym\Xg̳|?k,]G؋;#?ʥ'0,G|#!>Rt|=?P3`~?\gk.޽tnzc9Ar>ULgk;3s&j.>(J=H[^7XS]<&.W O?$tח CP׎ߠ{)쭕Y4]{LCNs8Mj 㾼 \|Q }ejD;І̥O-\rK˫+o^]jxyNt ˊ(8KB}7/=>KBAF *(=YhƷX,iC) i9iX*NWue5 S0ckiw]ҰXB9% {n*>n A1׊B ѝ` WKċN{]9o j},y{Z馿<^Ef.].u]|X2pot({`}_#s=TLsQMqB,@?"FZpaㄟ3, X/n uȐ&<2!]q;)WQ~|Y*xe%sV |xÒ_.)kö@v+˳8wI81NTez?3WǮVHdAu !c}j`CI$A'b>q##(>?ͧlrT]8`-ɣ8O= wɆWsKV`$#I^<{\4y,]HR5=^IM"iw]=b~(@>t.c?|_;Oo?PK>/LNantlr3/dfa.pycWKo}qVEI^;2c'@Ĕ%&1dFʑ X8L5L r r5r= ?|U=\R!XWW}Ugi#D"!@8"ā'dÆH)8h-FRDM{!^AGD-kMUQe6NPMO>|ݞG6@_z>: yV^\Hwu击; I wMiѪ>hʹm^ _0x!iVHE4,b3R; F8&&55ҥҪ4V#2&7%~Ti5NUNPM !hlT\rjX'?4jɒtadФ8d[&Eڪ03R$퓄 zﲰINHߙL0Cr vO3 H@k!Xxm /@ۼt>/|z}N)D ,IM2QNU{(&M'RUmCb\Xր| Ȋ.5h!(?9 8Q\~gɏ?|m3ohә%9Av0YT{\It55K&iK|oy/iOyюwNB8%_I;+hBl+kˎ atƕ9 5 9lD#r .qEܤkDр# x ։&rDd+X'Ow\mِkXku&ٕo[oz덵vN{^Gvwm\EIsiBCȻB]֔U wmx]dtM°[=Rpiư  )w)M1\X?Wv ?*igq,x' H(JW'7e[5|Ė;y,I,AQ{ YHQGh9Q<$}^rWRen{! |g/lJJ ,Ɯ8,U\Nt,2~w<~BX׹bF1Mȋ?eLy#LBD2Ȱd|J_@[-y7u#}S3׋;1 6y!Ђ-uD])ܦՎYG}}SZZ\;YLZew&ϵ45W2ŰQƃ#Z7PK>F_Mantlr3/dottreegen.pycX_s?$EBdөI{ejlIv-SM ș4r\DœHJәo'KڧN~~l%OGon5oþ9GC<%xw;`'doX̷({s>;(3e&2^w =z?yHp/PhJEB@D #a$ňw;<JNO82i'¶c'>  dlF'^ˆh>T2 `V7XDq۶zGt},}.~F !bCh`uUxkl932CJ@e 4A\e n#_e/ vkVguh0I+97pδո yaܗQMclEᔄPȋȢq9ztɀ󈇧Wb:@E6/8'<ssXp:+! @qI\UV+iOvN :\ŽWyI,f.~G :oH.˯D ֨{TtZ`RD-;N?WN y4V \#&C4<(<%g6 \Ou&t/ 7Ӊw(&.x(@[jyo romJ}b{a/NQK3v@ ⇉9ӕHdV7顈>8PaiBs8l2V ;GG:sl癣=;# 2j0-̚5,o^4* P&& 3)ְo` @9`xnA:,s.KYƭ-AtBfꆁPWf| ./Gd1PNR!c1"^H5ʾ<CHSN..E\"*>A#jf4Y7k x 25nz y%mj#=KD4,9l^86/0aLG0;o[I=~x),PQxtBHF*~<P pF=ϊĘ}UD“+%ăgXv{;VNu/]mcFcy\뵡L`'~ 7s, im-N{~ P%4*$*antlr3/exceptions.pycZAs]i]Yd Fxd ƀrP0k$0E)^m۳3^K"R_T)U9Kr1yu쌴T\vOO}=gco>g߻>g10vJݳXXbwJ J۬ef_1YP6‚*jZ,gJuD:΂Zk,4' L1>A,6}g: fM鬵9:Ni=dSl4)(V7o9|])i xoZgOVn[^ߓ "ڹ鉐/p{8| #%n([뉔TD^Hm;ӕMbk+:|kKNC'z!>c[[MT_PlD,o"Kz{eE2L^[ .u]"[nY~ j:g /_=p+!J!C8f&ls'c-GB6gKیA,g5 EھGÓ$NOSowxCxqLI:c)QQ w"PO'uBT&(jdmx00'*%}m$L)}\}DBp/q/uލ d;Rf{PA6w K c ŗv@0Mx{^Hf|O :bėTGU^ K4l#^t8 uF~fm^D.{I'whI gϹteُ{TGSD @ݶ۰0/J!Aǝn$sp NI1b8TlIclddP$V/*H4Ba!p qѡJl@c@F .XL z7b.Py INn;N$ae.jK`ib>IԽ+dP6@ɑT5i`F\.6HKHM9k=L&R3GDjB 1QDIѡ8#RR(Ã2O^b*HI.t"ujaRQx@:NE&Fzy/~ h9b?U~-U"_[&N*h;`YAmtb*US3qS'RgޭnY QFDIvXfIvҽ{S,<mfA+%,,fp~hiͰDMk'NdY%iVGV[iĄSarBNE*SXo7Pִ!a.L6!v9VjPdγH(!a*q%:Pk@B[sH#BF`ar;N)Ev23P=Q5`DU#k=_3,s/'{Z`H"e~fM P,4]pZ\ΎU\ ʃ**& x "xeTӉ#|T >oC}bI0Tqy.ȕEc TMBD: 8\^ua/~{qEк'q iLv8ABɺyLqÆ{Lg Tria8t>u0+[Xd4ptBNK5?KyJl-2},VjP:&A"3]󘁆{K ش SPBR:X~v6%G v/bK2ejp7$]f٭JvF*_'͇_m X.SfPu5V2{e%+A^KMA̚RlJ+6^*3%* 3²ͅ Ŝ2Fj; (u֟&bful r5Z,^a:D|ɣq'=H{tإ (99zgqq̌D)53rt)/;45i'(B+ NlPˇ3"Mfvp5~Epx۔6Sa&a/jbM3l40Ndd*t&"f2z5iL,$xQ9}fhg {3S=#0#9N,z&6 `]nJ>72e;$UqwwB wX^(bt M H2zdx@dж=*smi=0$eJ>n3 {-,>dd 9/$KK sZ^*:L +\`8~a[N63veG](^4FBw=D7IrJ}219JD`#U;S8 iTB>[mŠoO "6lxV-9u\TԽ1× ™6 E8+Z6FԵǧ KN0%a2}+ e4.q_o3j`䁦]E+hZʱRn'<> rvݠ ;]3Jß?8 D"OBa<}4Mj^K:t1gp~ /rn̤:7DuNϣT_CJ Ԏ}nliՐR)h4\5(a {uSW S,>!YdhT>̃U|S˼9&E_hw򡦯P㇚P|1KײO؀;#)d1Øs?nx~}=i?>"֠Ea)#CbMH.ͥYfaηtH̔&(if#{{B x%P$U5`At|1 eNÞ};7573)bnv6wPK>\Slantlr3/extras.pyc]j0G^g.nrBo@{Rb⸫4&Kycn!1̇{?G!f!uX2X3 ,` GSXnGH~Ɩc`r&\)SXֿY/h;/EtJ('кlrW{y`(S1X9he}X3 :+W<Κ@y|e9#Y@ݓprwF~⨒t|s(GߌHmS9µa~eN",`5tZNWX ۳bvTm __QA" `N u-mS3 O$\ohߜZQLmy|QRa0ӓd&=PK>Z 5antlr3/main.pycXsG}˲ol6%A%GQc{-F JDRXk5iRt8p rr΅}=;]?R`׶z}_޿jտ~+̿~DO-jؕbKr[wrb+'kl, $hfU}DH)ʢ:cF\r npd5Z<`61αunY# C#'(v*;<$E yX9<^u\5'-k=r㪈gR5DZ !WS"Ge^\fpQ[OW w$\̏+"ҬQ4$VYq7cGD&<ΉW ;\=h4}YCdEK^v6iU^-uEXDy T̝|IJDpGI^Wg@ϕ:"2f^ijڮK 0?U{P8Xkqq½=@Q]U^%<܋OTGiM==V5٥|ҾmM&#YkOy`Ӹ9q }4W,w .ѽ2Hqw3h95*sD.gWt\];Wep>OTYSrʠg)WӲJc3B1>:Gj~ATLe2\f7FVȒ=|؜sŪlj~7ԴL !8jVxkH 4A)gͤGC5K?P|*RSE0G| XHApUm㮝>|p? ėS=fWo"oG.iAt߲fk>n:.#LEY)Vި+o/_)0'g_5ӯti]S+UA+Cƺy˜7{$ɸ1aR%cF~00crU?$,S3ya| {(f{*C^_~T7qA123-Kd-%⾝jiCF`jV33;z$#q}YG S@iL*V ܻh?M2kͩȊL}j%zLcTh:yiUFUӖ8.š.'ԭ`n#1Џ]~rϟ /^"VPG6#`VٟJj5eOZ?LjH|Zc7M+j>\^2a7o*:ɨ? _߀R2@V.[^0QPӊ_1PDxmg%P#6u_9Z ( >qG g &J-sӂI1&WOPj vO.JMc_E0.SJ7}`c?uwOX|)UZ7q qv^:܍1>0H ~TbM3rJ c]/Z{:? )JR)B`?cHvZ j®OX1Z6_o_#&F&MNL3n<_>U&0p"1Q0}¼>'Y|f 4.c\Fcpiv\VCآ"3^k1hKEk;~wnT~x?i*k(/p;k/PK>nL=antlr3/recognizers.pyc}ypgv3c@oRZi[@cdJ$%R =Vؕthqfz4C+vVN9>87NG8q.eǵM⸒GW*Uv*;ẁ:R[I=_㹙~*(+N@ z7^QqEݪvMTWխ ՞T듪=֧T{ZOv]U{FPu@gjϩ9>!~HjVUZ?GQ>֏q~\O}R&ԭS}ZV}hRݺ_??TA RG 'VߩԻJ}~=Tq=9<3j~оxXŇGT|Tr>}TGU(?=f>)][<>AK75nvnEb':H7FK7n'<ͺ +W/zƋ/_Z]r-?H^^|ʪ}r*rqg߸G+tmxqK3ieݴ6i%=+iމNn%lFĽe?IV{&WϦRp/+QwN=>Qo-^.Z͊[jF?VT$^(tIR{uAr*\~XLˋYum>C>rC|Ë'E^ٗ_q _Jk;u}۽ yNx +޿@pkҥLHn$!f[V<&I^uh34a6ޠnϩ!&~äh505Ջ0GlͤU,ḯkyhaDv"pu:Q?&rtìk_i j;I1c'flTolivck"OB j>WDg~FN`@O=fE]zG@bpS67SɴyR7g̓f̙nV??)bqPcjo `Yst+֔5+/Q^\ӕn)6ծ" vJɸX-;m⨹#/JKKϯTGFrE+].0uI;R 6qYTR<v|n3NN;g|x\}vw9yǃH nlcYOkz~om uNQN)gR< >P촪$vW`dӾ'#ҝLf'SIthĚ(-PA$tM!MOp-aMK|QMG qM@IMS|C49o7D?7Dc*YPcGᢊCYRC|*~oUߜU|Pc|sN |1/͓KXYqy&[n -@wN=.I7駭މ_DŽ/kB`x8Kf?m,6 jb$/&!wc{yFVBgP4pZ'Yf[F)ۊW>e>M+G3Vpۼ"J'4$𚾟MͺUc CgryS-?lI!>-fdN`TD[Og|s}D7& 9GdQKJ𴣪lߣA hL ErW&択SQoVTm9y1]ІiL;5Y!y`IެAbrA֫ޭr[53+ޤ>a3{ͬFQJ`/" )Ff KZ1֐PT/c,ce~8v8N? y4f^3 A!~L~'UC/.1߉n^S ]F ƽlF<$ `hE,g`>:!M|L:A $lu[B2HZF VVORc- / D1Z*]1lI~\`/^$ ,tt٣F,K}/zF2m1\};W[vϪʡ4`r2S9>?K! ҌYL(]db hVԏ ,Z.NZt hH.@‹a4[J4_elgd<&k9czӗ9.ҧ!V6 myAbўhK_Ÿ7.m"`C܀V½l`2}٠ bVyڒ0/ZXh3%Ucp/ ZӼ׎|u.bOjIu$~z̦4"sU#=V9]Gd9vSB:wM]twKw/teE`x9=fԲCWB)ɷ hƒNZ*ߧ4 OFI ,: -o]! fk(0s@T5&<ÂyL 1:_Mf7čD Vfф7SO W~2F or /}!bl\-,G#ASa[>/ےGl܌:Y3ޱ^OC9A_LI` NyD,lj8iafk>#Q?-;q1:%٬ǬHJN CxOتt7 .5Ť;ymc9@}Dj!7jD3 ҐDy 7{3YT4dڴZ,Pgd3bHD nGtwzB ]vBINJi8N֠]؁I1{m}FE6:c`X-p$8S2r5#ay+ I,EiczH'9{ S e[>MyA.,^mN7SOߊ?WiM)N6ۘe&L!`W uz|$nB8F{%~N`@h ן0B' $W>-b-xF 1w#H=hOq0V.-=юys;Рem{_햜wbª'g÷Y",#6ū]TQ <|p `TP(:t((`m|̄?Y?1!Sđ {`_F1/TvM"Z4 K+'+(@gvu"jh&"La@"Y.; ?0b:*bfR&ߑQ9nGmڹ"*k CL',.W_uu/IA<[ki% ;hz.THV8Uلq?"GA@UU'X='X+?T TdONrAm>{-!U|>4fs?WlNfe-tQT1Zx˷F|-c'Wh+\psjQOx5|DWI[W|׈ \SaG15q#L渍2ƹLj<'5k4L`P *n'ׁILM'qYx}Nj D%'>|q 4.ef-eaմ S}-|YdĄ|*3mN% tkO>c9onsx BWTIY\^LlC7R'i8>F VkEb9JԳ8]2x4i99*0\^V gn?-N4(B{k?iY<(ULFoKIgQeqWgCeu=3d wr~{Vx!{(qäcXӲyê.y/'3G)DDGmrgr,d}a KZZ1@2e-v@'wd}#?,1u~̈́D&sB_Xxn,ll')e-}_bS)U;=q3G1RC.\;%iIU-CmOzNːi.Io+~M!7WiO[~N n`ftc\vaSh^Goz#Q@cÜ?Fe"Adb)2*7wnD[yFACElXƺN%Ÿ u}cl 6WWk7.;Up҇V;c9_U?mzcJ ڎX M´4dZkUnb(260X^d=5tY=%.rڕ&`v"EϔT YC0`]QafO41b&C_suӟВQk'L* DaI2 sPcxN027O]+f]dRТg-wFUO}M=Ԋ䥩W-Xbg3cFp f} tzowpፅpeEW.**"8E=_YвԐ `&&mO02>LY <0UۛqX|r#nDKEه4UϘ)p%)4R^7 W4z#2HKH3oM>G D/LK#nVc;("Nncc,GcD"u~qiIwKf64erb8һEx@%i@Z37YgXo2#9gz[SnOm*hvpzpw,1,kkz T\ClHpv]B, SQ5fu!r HӗfFhZD#NԣtA nvi%@c=K~3Nc;GV! $[C4:3gx4l!l٥El4|]e+!Fm)4 "/;1m @Rl-47uupenLXh8shUeY͒'Mo4[]̳fi0b5ܢ,i+40dz䡑Ln\g5so'f)Jd݈71b鼶v؅ N-h}RNI2&Dx1iיKmzYtrTDZh9.7x \zW[mBki)QLBeڠ#j " 39E;Q:} ;hmuO'cO5֯~VR?3ngPOXLZ. ^#a6ZY+kGo\49zx؆g\n?/=oLox_1 }АNXnTN/.>t D~b6 -sz*_|nx˃vGeI p\y\c)6x4}NK~2beIR7+:3/'XH,P2#LQiحp@Һ o_S8#C+CDf2bd.\BkSFjhQ2e3D cl]މm;usT6AmnxfMzgBQVF-YlwcVhIi.-8L.n3(JttHp{ nH75zk \`+JTĉ$/DOHQ}F(`5b iǕ'. hn4w+{*6T|T3Te:+;l':_0~&:)ؔnst:`|YHpPs=/񣚉孩p^3i{KMMش# ,] F-f0cAHD6M$sy)6OJ5!LvEŲN%Zٔ ɟMȰ- )]^iǐdvRwJGAK\gb1/kn/oGUGxeemme᩠1y@|,kFa&K.¨e9Ll{,DХJV;2yJs}sJgOd*3P\循u/嗃l0ch7+} gVf/ +቏Ak3ݒ/G,]I 9q9 -Dr#v*a g|>Rc #'H2 +D-O2Cgny: wxuH3ShՒnglzA[whi0GV2U±A ܠ(f3']N?kZX-)׽;bg%|dH4/{?,J[޲L)hTM.nʾDTTm<r^SHj df<+7>͑u>h"ib2ؗZuYVWC_82Z:x&lc?->;*UڊJ+ZHbw#:tA\% $O#ZUeL#S*{$ez ^s^?Jv1~hxD_[/d>7&f$Fc9TqgsuB(hJ4#fe"KAj<'UDpr(5~;v8KՒ3:k$=+Y2qsDI'# mDb"r V` \D6[39X0R(VNqXBLI4r!DX'"on} fXBoY ƿ}uRq"MaL]ty?hЈjM;ٚr4XIM,ף}[C=3/{d")MfР"a϶_2lch&ilٳ.cҰYP1qӾ̜t'+!whw6??|jP1MQ)N DN: /#z]FHQ1 6N "`\{\dRqWYW )X4ϛ[&mSif^@m?iW\,Rs\VlowK&&@'ŏ]i|8K橴c뗯|h2h;d&0y-$|: QO+ -?+/ @7cPg0E[Hƹ RVM]Uՙ,nwɏI1+[y~ .wpyg jr28&?U+;Y(PX=x] ɪK6B!Hrrn]pG>}#ßlwj`J'n A "3ZIxe)a*{ej%* xhX1I+WS Eto[YS1_Y${_ |ꗘON[v˯ϻdiMy+Tfc_Y&6e&Lț:2&P1'"{"?oU&N7/<:glXTP5̘N.mZR$6ZwA8fSVaYcx2Nair@Y1{0zߩ ~y׮l믭JP{,{.6e iKI:CeJ7 <mlҬS?*[)1DLʨPLmN-pewI;"/ê3ꧽvS22s|OHĈ-MA{2s6W盫jPzS>A[̏D~.rvߙPjVro6R׻ԫ1d5[Nt]Yʄ4F&ހLXAa2:DI00﮸ziڕ$aofvM8-ʓD;ض~)D~VI=s%觪n< F#ӖPq3iǃo-Z> ,@?*x`s,51?!c ;/j fTT0O)az҇5~1;ˊS$!tgLFZ *'1K >M l\lhe&b.sPLs,);}dYRޚk¤CK?`g"(A0c/4f9pO+UH!z娳Y&&X} 9XM-2Y~p9paoœ*g %.>K^}py\kJ'LC\kv~xcYluhߜ-%˸i1ع6ss}Pۡɩ/r(6G1\-(c\M͛#xHvRMsn>.t[$>dUMd:|su47qa+~v"g3ج]\h5+AҒ  BUp/k˟U=o~2/IY44W}̷a8Qg2dexV?Vs`}g׏/CY>)׹$|Hϒ|.nҸ1L=c.#:-V67BɦVݖ,9y@miZâ>eI A 9 Hh> Nb^}E1N[vs4`|!e|:.ʅäʥ$6De)+檨IJ &h:ͥb\5epǴRׯxK6ӏ }‰~ ʼnD ;9=\|x͑?8A.$3䚆sY 0pNf).ZƝ/ ? LPF:k[Mdi%d9p\oC,X5G7lRl[)T,,;>ӲC8 Yvթ8ܖ OG,]S+{j3G-ߥ'LSL;&wt +,D|'L$^6'!j07g +penjxFlNNml/;I=8Qo~4Q/d]p?QJ$3;8lP4 qIBfowS; (.Ƹ=6h8`7t3L׌G$=NO7 ~.`yUUCx'!z5C' M|sn10͚y|y|y|<bF`?J./bں22t0${2qedwvqv]8;O=ixWyW20+^mNvҼw[XGsQ炿ψ]l@cuV4sߠ=y1Lp̡m|/paIj|©LЃy`4hz:|> ˱҇F ?Qs1OX0SL-[`x[k|w>|WM >`cS6*UA3/D EX[Sqc f;ɶ8 yD v\ŚQ*cݐ?AO W*xGg HubY䉅 J=i!^̣jܽ5.yɰi[䓼+PT[U%koUU8ޜQ*'t 8i~PV'ƕFyUA1 e`1ͤ@ +Iభs:LyOYݵfwWn6޷:hYͫ()ȋ+5\³ D leԍe.:Up[pL%^WIu@(]Q: < nnn8٤ڲ'[`%WD3'{ξ. 3Y?vF!6s=W$b!8bݴlGh˸Ҙi,F >%61'3EŇPq':{x$GJ*;mt/d=_-]ieqS1kp@ pZNXoCψC :tM#؛gWﭶ?X3SO>¾/I\g!0CvϖY#x{;E0SIɻɦJ$zae=?/.c2Xw0\r̗ǚ9AOg ;,&&­ȝWWg%)E232s#/M"(V][S#C/K.'Y0> qǞ Hvb3g20_ 3\@jj'U9\!n",w"=4Ђ%t!0or@{6-GQVU _QՑQrږ%kcKeJ'cф:Yu's:¾g٥ټxwRކkl Ý n{:Ќ}ϡé),:q~cvcnF:<ǜKy\X_.8Ռi@?T%B*Op;8m3Ӫ2Bs^>><$(wf4*Љ >xx1l9asGe=KlHG׮q] lQO2r~Lydm|<gъrxN/u F?7;YsW-؛vQY̩ԡs#y#T^z#v~lG۝{=3r +(c,ZLǿ tͿ A~cD_וbOx \`t9DzH}>F'Ӈt},hQlf7Ƙ K%ƫ̬1_k=fmĢ~#*X:lI8Kkd(߫kGQ".gּȕ acY8}:';b O8 hJdv%\I,Gu,fܝҬ sMgLGMxJQz?"!MVԵS 2??*ZAUϚD rly|#g}v=:S;pgB0pziJݤj^kkzu5OLsMΪ[i`\ g†(8"(9 ![/z68F Nb1Cܙpfx3/)a}%Y4]j;<;V 4p1iN-67ϵ(ѷx|#{=Dz(j{~pf)ރD gq$Zp}r]#jcLR=\IPW`R,`MtqTڈpiܤ,O|"tiO,@(h3f'(!h$G0# m+M1{ݚA3HjOnAOé ndHJtrݖ>yW*4NcsU1Ԃr)8X/BN\)5{u6ů;qq?u/7ŋޫꟾg_U=um/GK_,-9J|WmqӦjnޯtܶn uAZ? kxͮ-IͿg6 ڛݬ .AؙJרqdCBrڿKl#;ɳ,3;C_$ -ke|_h1{b32ָ[&]c٩)jy]Z? 8 iht?UuT]<܉PK>k75antlr3/streams.pyc}{Gz_up_RˇS:ݡH!wDJ;<>mrvz8ݕ)w<ۉ8mNΗ1;1`Ď<C6lÀN p~WU]=3\+ms{zjSVSBh\F<Ɵ@(4B0jx/ҥ+8Z ^ (;Y[ Qg{afN4Q+ڈ:An+A3EA Nl# a'ePtin? ~s#STea':wj?5,&"Oc`uKzea^[2ڏۭ38^Gn}g3a'k?SdԥtfV5GMT{ty#ʰ ̂fףIҟ]V7qnD!@? @vm1+:ي4`ȷVԪ gї@z/NSYf͍;ċu=j)Ӎnժ;qngD+q ?굪 K? So._$1s ίxs="i`6 6´sߦfZv#h+i b\ /,.ӄHBOzw"ܡ`J/ڢ N։VI qH$=_X\@: ͛V̸̐&=ZuugGzx utZ6ПׅYWÔdGS^'0oxDZia{p" ^8XJ &${)`^tf:!}m-r:"% QրV5甞}tX n[bp3tɦ B3\mƁ!!WVg46#s'N74FF *Փ"Icz3"o2 %IGFЄhftRaTĺi,X^LNGfGZFyp&aNS l?K1fa//0=:;rɱ:r7 JcB|A=Gpzߌć7K:V΅.]VB׽W{q hNZ 0`D${;lK䚬Xa9&Lcjϖv+NmBV ӵ[zg"P܎Fx{z`i^ՂK/} kFkGm?'!mE>Om5Li=퓢 {B.1 Y!$ 7WObK@czDk.k>X?m_g [[m9[ j:ke\xo_$.1]P>>=T6ytrxx@XPXr$ !g 1ߜ0*bXK@c$Yl\BfSMNZAQF )"駶xyq@aOp*=Kί];q-֭hK1i$MJ0z1&3Sq._0; $ԯ }dNiMzPd T gIwa(S[u0{BLr[.M,1Iyċ#UxpQ'`̽7+[0'$ԠUQ( 2D4 UN2&Th{d6A+x<(+ Q:!A.] 0f YK$L#_iFzb-[5V^hkgDADfV*~ۮ_ؾ"7t"s$Xjw:chsm^a5{ᤘFB禣ߔ͋pTZcfLpq+(LcIÄmbe!D2h|dEdI+FNsVJ[:wNԐGantq's*0 7_Ew%, ,f:Gd;i:Do| _IH.Wv]?eP# lJӪL0W\ -OO YsѬȳ I]1.<>מa9dkgPa lIsԒ؈V>Gld4ǟ8`7 [F{L$[PmL,2ZfX}Nd6mbI@Hi%]tDYN_QZ(B>p{ݎt$ @C'_p.'qyC|Pb=E?#19*j:UfynS?݆#P˦j(uC唇)|)T9 8|p7 7Ta/ /b $hIäf1+*MYG4$ewF"Bч(h#Cv 1J8OɬFaX}*FmBJ=GOZG\QߝS~ uGCw?UF![F0ŇifW͝Cpm#60c& ;H-сhɈGucpf#-) C;T',i3iHAxf}En#.^Fڬǟq7}&d\c4r:ɌAEd0OB 3:D@4C#3X)B7J3riT-UJգJֈ 5bM));j?u0iq'HZNmF!)('%fFG^}$+x0~rvO-q xeq;gAh:̰ W}<bar|eBg{rٱ4WʛӤ^ω\ 4=G"罼bKa%"j9 1DCLCGxB"3XDIΐCU\"V+@!:E,?" qHs:8m0BR8OH@mʼaI7 ڽ^].MRT`u5+MMNNWgorB E%yu`!viA'tsز`Xs:ʚ:З$lN㾈14g/3*^_Rä+qy2ib1fauµDzY+v{zC<ŁxPy+? HmƻԘ LiI0dDXfpvU%O+O !nkinEQ|\)3.2#0Tby^_]k%]6Jr#f[2Fd M8D =Iln%;Ž}TYCexr$;81nƲ9o23$cJ&S6ɘIƌf3̮wO# nN?24%NL̔IĽIČ<#b1p>QBǸ<7;3pq}'p>WC~&vvȜ[&HǦ\g4w%l+9>qT=ƺ*@6X50øD`AvzI]ز4dhjR/ Tڌ'[c#?u\>;së (ERfzj^T~4Mc81֬7.9mKVf&@ :9霍`Y~`2x Do.$밃>#[ m^a\.^xS12Mߙ91_ݝu 7?P'.[W~8T<[%gLʣop¡6CJeW9)GeIw@Z*J̝ܘӧe4>[ǒt-D p\tC5jB7?8Ǫ1m Լ0f\h;Tea6wx:iX^v&#J#̢Ҿ 0n`ՍU\ hÌ4*f#r`V=Ryο\O5U=#iDn}Rnx`x _t8!_s.}}䁧x鏏}hA }.aWgFJtbO?-d[J,=S)۲U͋N%tVaYz7]`N6*eSl 4Zj6m,VEj 5bڗ:Ca%D.+ZgAb1QM3CߍRʸC uBC0˄縜;y4l(.U Y>ool@K+Gau2eY(J6O #mn:kn~'t^ܛ$x]EFC](~O%*dycViEb) SC\GLD C/?jv܌:%⏏Dd⁸4t:bCmNZ' X%;ƹcH-;7yAnv]?>2Ey?S_.PU9R/^g!zHͤ)0w-|8@_zHdj v^,8G|}cO>MkevaJwb2c)#5㼖/Bу5M8:Umgwy v3.ӈѩ4R·rd&!6Xpy'9*x yF̐uРCZ_˥ϊpIc6Z4=9h `=2,4Ӄ#,$y{k4sKK~Z9})Qt@.cyTʩ֋'O'?o cW-&[IrKIwa_͜IO+Y{״ܺ`.16 %]3hح2qXaFUNN]ӑɔ=a Ya)G'v'1R4DO18rHGڟ͎3S`m9MF0}uGhf'0]!B a015n;lŸQ}*F{pfzbغc1->_i8]Hjagy{ٳ=05M.FVNEpD˜GdNZkDZ0V%X:ю#Y9SΏ0a'hKJqFSY|o)!Cl-\eTi<ν+^-L쳣RNi'a}&<]ybԎ~I Z`Žeee"'vb!>b\mX)C)+$I bZնnGv0 ERO QW1GI!q渇Qń9J BIa4Psc),TaiH oV-GIM93RrP %n\.=&oZxpwtB.fuEESQi͍~N'i [#FU\ߋ;|o=<%7jTyCqNGRGd`AKd7#3a9voFJ=!}8@xIR3 "!3Q!Jn1'u,Gtioer AM\WIN?JM6sq6|~8vͰagimb]}Z4qI.E!kqG߃(͒۸&Y|O_XugNEۏD_$xT ҄Y}.a-r&Ac]ږL8;lo;)NI[ҋcB?)9CõwQ뀗l] ƛ`vR6gL٦u`1@J$6l2t81n𶂁M^ '˓:}]NSizXVѐn1}_moSPR|Yjߎ Q* ċoR(YwK˖yU6 {"~/@/dKL8./!G%v,ڬYջyYW(hvARu0}}Z2 ] E\0V!rNkbL3-xp{?|r]hUSC$mx?zh^1gEjS#ăsͬ߼&;aɶ~o1K&)^t z\ ĢϞ$٭9g%=ŗ׹ -@Ek(DHd+Z (N9y0-wpӖGR)p`r8eaWAIyvÁpXl]׋b0Ѩ\iJ?ӸF[\xWç ̵"oaom<޴]J՛-Bi4AZ%@Q;OQa*/o*U)Gץ[rZ4_'he|N wqS>@),U>}(7oJjJޥ1hѻ[4^ԧu~}^QEd"* +/]~<Ύ݊~sه^Y.R)г WbQ#&F52dvl[+Wn]KWn\xsN014_/]y5oڛ޴??Myq#ɉГ/姓Gp %F&ڎM~uKZ3=\s>_vZ(LqrJ]x299|]iϙFl?x˷YB_P_?ǕbKRlZ\`J@j 'Θ eRU{.ެV"Ҥ?O25_;)? ;)?v2Q/qq.̔ޜq ϰ dmQq88lU8rddi&Gp +r=_vٔFțA֊q F#<>={ۻ½F˸$9L ɦ6ѐ8uf2M\:`9~\Wv?tNUtJy /qF?v<kʝ݅ryQhmC$S}^;燏J}_Y3 <gʏ|a*x)w#P|6T ^e_A$/V30b DљP.Tz~N9)>]3JȦˣLLvJ[J+ K4 L_0H9&$ 27Եf mTuxؘx=c8 i9(A.w`ˀIGΏ;3ɲy3$~.\1XN-$^S3e]ʙæl:9 }}zۺH]\w c&P/SZ\Ӱ‘W}!Sݯ+9e%wP2[VI(^޵z0q}%+W%ԟ3%|ڦ'Bk~4"_uYrU5>bw^I=6#,F:KC=Z5?c>N#Fm݉Q\ӹ߫ nϲk|#r2@<:WkHTkU@8RF^%8cJv;@(nkHzI9@Jf.6ԝ|氬.^Rh6?Vs?˞M'] 9 ;5CAЎBv֚9قʞdČ*+R^Vw{(jg˭(+oX6}y ,\&d>5bŝO|ywOxUfdžG܀m/Y=͢aKp[F d(E~5ڱD=֓$jWD[QSm#dg+Aj;N2z. vj:lGHP] +tlɿ@9% Ny5h[i4w*ҜqU ֎Mfoϳ t#[@wDwUp[|混N qC8yoSL1vRkj?_LvW|dlX1pkFݽ)ldE[vĒz}G|bcvb'ƎK>2quF>XOˤL{4+,rƋ%HF=^M~@#`Ou#:ݞx$LD?{YEJ؋>,HQ\>FJY4  H?8-wOh4gFrfwu|\Aj1vN -l?{vqyGC}!S]يNkQ]p<-_qhI D!?gV{%+{>^ٙ^?i}bx0Gq#%^gux@dn>C0fؤH 8[QWK%1Yt\3u֝Ճ=jT|mEWrHH8<%[N>Is2j;:#jE Eez t4bpEd_ pc6= @ɌIς#.(9s%MP 5!$vf0 kB$Cs68J1yKp;C\ Xj"p.̏ Q!Sw-f'0fu0l[>'=ZZ} 7,jnt5y=dͨ%y%`N,>B;ټzn7mᑛ~ ZIj]\~WpAKpb?LU>JSj|qKy4=?⿞))؂?%~;YS~\wN,E}ߞA Z[* 6>7=@`Wؾ6 E.9h?~1.-=GY:uq(Y&qn0 VI{Ɂ9vp[T<5q ]~bs<#*s Ey#(w僙 wb3* tY~01 v4fg;K '8wy!j$oaÉL)!:`{ 'vgcbΞ>EsJ/o49{t$PrVEb9wAgNWh=|?E>HBOr) p>T=37=`7mcS_W<|m PK> ]>Ϲ ,antlr3/tokens.pycZ_lG;?O];I: Ls)DMCp.Uݖzwlo{qih B$^xC +;ψ?vfw^K$dvv|ߜlL6~u|^O M( g0d&Kl2,35csK`ě料PXZenEV2<slC޸e=ǣ~ =ٳwDczf< zs[_]iWogm[kt6nlڪpO|A!Au ``ɠ7(Rxs JE)(RB`e L0w*̝s'2)0kj=ybl2%*X8-Pzj$oMۢLW\{I ~^`BJ-+n܋+zpl)t1bOLQxHwN_ _݂U]x O΂};][:TbnGzpV6ec\RKݕ#YStpbc|_n01XrgƄ L?~Cgz-=Zpk8x<Gļ5|ThT iOJ,:P^!.2;|kpZ-ھb0rn2}&F ;;"z|"&P"ÿZrh*vǨ ^~EyE7|H.S&F;sҁFT"SͰ0(}{-'/<z0nsZ}* >(Nm~RI8~p6ZHjaO]qȘJ"c Gf?N-V>mh?Zp"YvYT ttJnt:*X`U[X`\[Hc,[-̃{weZX{v jF`G7n r{Ԃ %o2 轩}I{Q{ QeLe4(n/~M3f -8oӷs̝̝^a]U8¦fMFǂd d}E,.P=m9iL gT7o>r,@>Qz`$ 4n'[y%yQ9⼖ZX!qiiho^3X<5`Ė޻l$hxH+ 4n5q>ˊ6 *J\b<Ӵt"!rŹvk>=瑐(P]=[z1&f>BxooJ̖X ;K>wҷЙ`-[KZ-s, P:J<<1ylj۴ HN$sY_ ni9w:+0hN@winZTM5J&x."Z =N``? @wIL̄j^k_WùgD}J{RىCׇ'Y$>}1j|M ΒM:U0Ŋ1'jqJ-a `>U@7;s ̂|nTNTّX(P``OR_,O&c&i_@f )pZrV8vD j"jRO^c }9RG:UfkP0A|z (z< ȹ]cM[XHTaJ/kb4E7}퀞&SDOu$i ~-~zy]Zt_Xt_Z.L1 l!teH%V"!RoHM!R4sbh$( y , fݘ4Y1O0*>,eB%"d- c=غ“ Oi*<³ T1M瘘Cf<`Fu3yWDWk}s{{.݃kP~uE XȾk.H:6)x .GVp/jkx`<+x۠}%7]%էeZZ S$:[ B1nL{.l.@n$(qr9팩PLOa_rs$#m2MЙSRTK#0My>=.s#9<V73γZ@'M'3fJDSN$oSNA6GDp7GC"{T->;0 wU'G~rG*9ܧN\8,8lp BU@;MuGrc{zYW b ( yj^xvaG%]v(antlr3/tree.pyc uUws 08x5AR!CD H(A"$"HԸf8=ݍj#D8_u6^o~(۲hc:Zqo~e׮wuﻪzjbwuwL~?E{+J]ku-PZ/fQ]+fI]+携6ڰ E>ڨjյ2*1WːZPIumR5kTs~VSy@];յ9M!ujVC#*Q+4QI(kGT#|w={L5&UxwR{jP]|{jLn{޽W5;cާBoC WQ#''Wc*wO]*wۻ=*|k>(߭ػfF'wIx};'U8wg9՘3wwVGy{82]wI.:zm5ZIB ñ疻QR9wK'*^+6Š~rg*F]YbRoZԊ+f:v7ԛ8J[9xIWOvzmHsZ7إFG^}%^q(.\~1>Hxs/] +aDW8|(k|\ϵ p<09⭈۹6NhN,raZs9l5sm > #f'wiN'0lzBz-ӊIeL[DbՒJxlf>[ޠVÏr|n&*H >nb_Z=dRp 5ڭ uy#mf''hYf"WvWjubW &)|2 "hBWhz!.]r)Z0"n^zUaM<&OL%J3ޠVJ{NGOCZk4@ ^?(C6YfQtjvqFhnD5v$&BZ1&OpM%Ͳ &%x,ʒQ!%/`f]@Liդ m"7v "SyK*8%1XSL66E}L剅ӻVY qe *loˠ[ -6CI\5+Mb8aLpp)GAlv…Q{v` m~X/'of9=.mWR~=bN7c&`Eb-)z6)~S_@ ?hj؆ͭ]DzvMM П^Xhzl("ΛQ۽ʻ&6fZ:yw|weDGk5x8K*IȐ9ثT.c[fsH8`Mi\O,5ϳv%6^)4FTghxxghˠS0$ `vFQB< i* RxvX"sCnW Sj]FB*^niVmJ#YZS) }ZuJ9cZo52VvL5E' ##RP}ӑu.3hپ%]![ΊeBO+ ۜj خtM˯ lXshܦk4+/`9o5;w,͹3'[5tPHwX.- $BO :3gZ?QoClp* 6Uj ~_ \QhINоś&P͵ƻ! ޤi !amwm$o&ôt'ôѬ G+܆;V4O&2lݍ^G"=߉ӗS`:r@rak&h(f1iQ; lEjg;ץҝjn]ePFV` i)cJfrxw+OiFovOmSq^'",4b >.yЬ,-|.c7vݰc"ÆͪBlDgFܕ:h>q.zYF͔ZB(1a[o=t|JaFpV`\`CNN뼨mOө܈jog1{L%Z歉l2|vk@Uy <3WA.Xu+&5w`%ݭ[Ϳ,PV*֝|<yp2[8y+ V*DP3['aM߅Vx/.Sz @1EzǠ'+uDzlMC_in,5LΙt5K y:p;Psb̰$ws{Z{34k*f(9w+YYc9?i#Se9G]X 6AQ &`ܵ'dzH膖IyB@i1~5t0vvo#hpY$E,t@}`z/"q{=XRz'@#(cCiꎣ(&uF;(󸑘&-Ƹb8؃M܆Q\D` TG*gNj@A,yZMZN.zmA E&FpRYu#$ޮ͵$vW 5Z]KZiK yk5j]Zٹ nC_!({Ev+--(]n0i*:H|ik"0:~%NKg*aMbs6dx(X\^V8ڊ‹]4owe|?i@y?'6}ڀab[m39 ɾ7>Fm5jպFr^e@Z` mwQ}AK:Qfx=;eD2u|;KK 2'h`GK!KO5=yV3='[fԊmop{bben2{DMخ|') B ,ZF("~xK68-{ :OQߧ.~#lxY>A'D޵q*F5C-.+h6ֹz .t8ۨv*{3Y x( n5 XRD~a ĐMO/zо[w?zwJ[8m lwK+ič`NO{EW{wS|k*4q6U4=jwgYZe;T*: {j7|%NdSyJ50:2*@R{a5S`R,|ENϗKE"K- ģ*Ŭi8E3u4u 7^V%NE=v}X]mdҳHa._@tXROk'Xv뙯)hE0unvijmoQ]&6;NZѯ1k?8,ib$ pG  ,k.N.f9v/Sq?`iT86N3Ʃq?jکdy-oݵ%R/U'Dw,¹ ţ^k6-QKj &ȍ'p3O-ge_.@::GMV'7x-[&Ɗ6i{J'orhX$dsTge]L&0]zvgc̮ 36vs-]gSέ^k)֭Da#1N|tKpzZ._Ӫ`mP: ;_MCfݳ_"tlj*jcLU@6 *srǔsypyG_3<9W5G }NS{G+/.: hɩ7wp/O7}޶Z>m̓Gu+͌zk^Oth 0n5:8T@>1ޜ3(b'>-Gݽ<|lh-cj8&Crq[F[wbe$aϕFBW;2. kC`zkH "\'z&kf<}ᩜ8$Q ĥMt0URzfJS> ;-ح$&#N b GHXyGj|O&D\3b iMySyb-"?-, >Y>E ;x!'1Ev?LvwnPfz1bY7.KJyv}9{k }LyġuB%G'0ΆJ efC陹_ș#$IqBX+" = f1c㪄Y5°C3Ûkivup,O @$TīUk6QEK5.QfptlyOI#z<[ bGF~s_oQW[ Z6t0Hh?/ؘ֍Z3j8=˥Y+:xg߾2tu C[`V̛ؽFLj#d1ώTx6-\9 12( /N'b0' 8uY(N_Vek8.aUWw?Uí'sf >QE9rn ,ƾ~(n2ZI! LSZ{du5g&؎,5DB0Ms|bTFQaf#pwW FG鑣0r7SXzfJSQYȦ{ >(c$gC]!҇ Ϥ|ai^kZ:dc)Oq%/iYm}d Y1?Tpі639,#٭!FVR+1<s[.nkȶԽ1RMkm3wε.1 )Xi1P7>͐w/fےaΉrخqNK΢-Y`q;fBx{i/ؼv} mY=lpsvs Kuҳ wQ0p, qU•S%p;0ӂfNLJ%$=}={-ΖE _D=΁\qyYδcEaüI |E>UG zQi}631yLa_;Vi%mW_?41`A.7uuoDǽT/5T44@¾0? 玆%HR$ "Ih&*"6' fe : K5ITi,XK2Q`] :.lc V!7yf3 uʬF Ye&jxbk||Sh ϦJ jiB1;(?o!"X8l,i?֑CMIU6i VZh~Ci򘁥b3}FևxY%G:Saۋ緅 ъ)n$"|!s^yD!$_ XԣQ:hֲ)~X(['2؃kϻR"#:ʝ*VҘ9сfԍ .BZ2d9۰\-ULVj4 V@{P=KhaбTaF C@aH% 倈!EZZ&S%Jk"q p \&?1 ASj॔'!9YQ2k+KJי<5( axYU4ڴ00zgYl,ZhZg h6UŚdNr|E?K]g:\L]E"n%,kOEk5xɿcELb C2۪ X 'OBj!#T0<8@wB &sT_3-=f;ʠ16c<%J}ѐWiEh~+tBLTn߁u騉W:o^gYKʊ2lǢ?< (JXa:OyX<R|CjÚ&:0O'qxW(ȃӏG|*EyH( aN82~yޮ1"c)8ՂW̹:l_sӗam^9uR O vowoxtaPfMH>s *qS6hX6wTp籖*V;IhRZe`ەOvRb[%8Gx˷۞KA9R6J/EOSCփ NڽU"ǯOP.F‹{yx͌spvÙ3Vfom?5oC~{  $kf/ML[;ʜ. *mzlwqG$]Иumk_yp%$yv#dWY$}!s.Aٚ^Xytm/זlgrh[ou's//Ҽե|Pz.mgT~VDÊ,;x~_\GMO;եeO5 {zYN6uXGe+7jYwȑdđ D|]Iglɘ2fL8-s}Y^Tf/ˏ@{EЁri.>YeWRV[Cր E[pGac(c|sX&ߜ4Ad>ߘI:@0ф6iӌ181t]*+L ,ڴبsY3 *kv3OT }q |& Ra>t\=56`FkڰUoot啒sgҰ Q 9G~ G#׻hA:WEsnX3Hy4tEv+n2f,lZLbݏE/XϹ2CV4d%*;`I[V[DzTBL9T}F?'k17.zmE1 H4М>&EY>0{Q`6UE4" J '-r53;F?(8`TEKBT`2YՏ:wvFRGkL,IimuS7Fs ɠ}nc^qRtV@{BG9*T**7DlLfKC9p l2c(ӷS<6GLb ^-e8ga)ۻ_Nh|IG ys99-iG}99Oc&'7PvrɽMNq `.w n?Rρ'+`3-ׁԐ |D-=(vi渿k!~w^3Od#%[q`XńTN|?Sc 魻lV0IQ@bH] ]9ut{1 ~L$Q" -#76LHfkVl$15o.Ga*.ŴTk@ րJpab$+TCİ,m{0-d{qE@>=7қt>3 Sv $EYP(COk`@WXݞmk4mX7wyc^Ծ%MLezAf_A3"H(iWz`i1jԟgToע.U7]>W71??0Zws7Ld̺ϲ#Fap|;HvMj6\$#Z!11* d lIRaKKq]ZERA |#-B>(_\oǹ.-[Ysjn|X8,\N}@S vcekdvҦl{^0E8]G3mLՂzA [?Y}Yfᝌ-=%|=i ^Sb985aO\؛%)q eI˲jG$~uCm_mXێbMtں/I; E7՘˭ g{4)].PV Ӕ7Feؖc g,}|͂~1Po4.e/ȸy}<$_!LDz? Lj74h]'g88#ƈ} |sc_~yܾ~g*R #f%"$Ҟ)t,W( #hm_]c^>3qCr72'-u:ҕ7(2V+mm1h8. NڼXx"8|Ŗ/3+ToaO;3=6x XǔuxJqG6g2O4XuG9ġvSiK/(kƱI?yrOniẬ$Lh#,\ǃPH%5$LJ,~/,.2nڻ/p =sId՛ v't 72Kȣ{@Rp((]:ޛEfT<>ȹ*LȞ,5}٤@$yᇛYR]$x5c*Az;{h!xҞ۳o߁R[7]zdg(vypn3]8\ d.>^oyxəxzɛQLneY0uhm&/e;{l ReWG& el@$` ğ`LgpL  bR˥Gu$냀 @.\B_[zs }5QuU8tj VM /r>X5D\k%3VRcFqSʔs~5rn&;4ѥRވP@:&aKm{A<>*3`cIBdi^*d PZ&*Ta]C)p}'Q/6X=V hSPv+b`x#O*RJ3[B {ZKkH˨W6u%j+3g[6fG杪mJFD`t ރZ')J+\ȫ#]k՚՞xD]ْ3+;\yz ki/30*1zR`$+jA*Om_-~~- y>y0 :qqAb M<$h62@0Q{(` u$[$ް~ejXz "kD6eu(`I 4^R-mе$F˂y%*tȚdwWkMM5. EZ&81k1&Ts׻ !B|ݎn-7똤ue2 %j7U s_Z cN~to;extbDz#H,&6W+/_FӶbXUF[]y׊Œ Du)pjFhxF\^ߚV$Dl=X~E(|RC_gv@0Il`v~m9~݂ؑoW rNhBǛkcq6mz@eyWhך.Q\p#{\\6HNAjZx9IP( Mo YDWa1tiE į͍tB(@z6y  ךܱ&|9q㚕8{Ǖ[_-|Ab@VwGv2D62̳~>FlvfEYRҹ"$ő^O N"i=iZ9&!榉2-4V3ns4ֆQü.xM7!_LF쬡^ <`s\ď"#.0W.ߍw ÿ.(r|o&9iso ϾH݈`xLO}`42A8mbæfތwDGl^AGtc(=ywy]^ո?ܧjW ըT~8E.TS.)eg*#0`8wZ znzD=v]XlS4R~fBtlaʲ?KS&x88 č3Ǖ~.q6{u)0 bQqü anCizߧSݑUWkxK:1NSDŽj>.s8s[9[k49SQ cڹv9.O`a$8T<'pP))yMDeQD.ISW-q@˺:#tXe TNȎb*#ܣagU ʒeo.qY`J u.kā0VDX) tBEX5:{:j|‹{&-H遯9ܜ_|Qa}\a0) ןU_|<$ dJDeG.I+T9W+|b:I| Af}Y!z4gKXQ Lc"ֹOIxl@{ ;s,9ϥwY`l]gpa5~'PG7#09~BAJ͖s;-Xyax>^Y$6>5b,HRd``|@e`w\z L\ E.&\enRS""r4x \ Ju9qJ߻8]>=L5Su<=x%+|rm0`{ 憽D*$`SgEֈ}~lwT"_R3Q^mlSN t7;vB?4 ëp΂i[*㔽l׳'lӟ޶Dy+霕tN/б\,ZxL킮$͇da`b k]I̿q9\-2猋yg 'ܯm's7+$:ZD?JAG*̖UHTj,oPZ*1ضb Gr9iӔXPZ{mJQϙEpO0O&Iz܉gsDzwH3tΆփgN߂Y ꆻNU#2+|;f|%6WmufoFDݙmJ.ᇔfe ~fwkkQ%UZupepU(S^&&2nr"tI,Ѧ.+Ή$df74'VZ=I3ut5 m?ڂ#sÂIjeu@s Q7{8:`GiÂ齣cLVuǠn[㙵QlKQd$"M g &>*kٟ;xqxF+e5qfys*ErcT_@7ˡ!y.>o`qROpa/?,ĩIi>ϔ&U(\ C '{tEӇPV.C}K>g&˥(woǓXDAa2> WuY\q$|Rb:IUERJY4}3Go[J2ũ!eƓgv|I-ۚdlo;L"a }aYG>G>/<̎ ?"ΗڬոI"-Mvȩdd=za-$orğ9 @kYwT;c-1mC ۍ%aCs"/E- J(њQ MsF; x9{'ThUudۚpݡG #%]mQ2K:IoK2{D6dxЭ_%V9tIv<1tC(!4QSh+s:a01xXix <߾WU=P}LDLi6'Q]8>cDaS U:ng7[IS#*tf*z3f8ot[Py !j{BR7%4ڍ:ӌ0^r9N`;J !ڮ5h9 1<EiwNsuNJWl/< ! Qjd0?' ~<8@h/8ʹ&E}:8ǂCE(lePB[dRg#*i19[E߭0E; PC MSUICD`u 5~.Ҝ]5;#H-vͲ{tQE tӼ( dԖY+A/s֎.m6s.NV&YW)Y-V]E*VZ)8&Dz[ #'i@c3{O7qʳ+*trᤉb6r1+Ө5@BuL-h"q̸uj&̷J bl6qBq \5B%ȌK㺯Zv$oEÙEa| oMW(X hETg5H\ks-8}0"Ki_8AL!6+`8=|[{&g:Mn;t:9*0Ú *5C>\aXI'$xj!e=A[|GC^wdwܬt)c ˃+E5sD'tÕVm] ]lV&qAiWWƸn4o{Y\91hʝS%e=)9[ O=KDzVb}2&;1U jk뭈=DhկM$_3Uatj*SkA:MZSb!uiئjSט+s|Qw&9>`윹CB}KEq!ke<-FJEd]&K3 4IEuZ UsbMށ :oP"xAKbBbQ49ib-p 4Qlq1`D'VW9I38-n˖C>*UzbDk"#㤟7uZ>p+#X>%gF6_ ֊i,&6%+a#z&cִ}Lj~Z8`]uRHjnJ +l~hn'% ڍu֚rY q$%$oIIe3PT%o33"oI) }Y4hꛡ _4$6ce6UfMn0 9l83:IΕ|G)<– -ϑ2)s[&7'tnN~!cH Łj h̀᱿cRL^2cӂjp?:!ZQOɏyJ63.X!6YgfL f=m<ĴKogC.)Z76d==u%QBx0⃵p#7BDNn BCNF\hZg_Yȋr9ô=%ST×;3i8rB c!Q=0;P :y"d}4(N+#:PfE ;mVr2ukM1%dGe6 tx|IǛn+knjc6F5 /o\6x5L7g)'-L^.jWmf-}fнPS=CfŖc~;Av9 ;R rÍ'i9-?tI(6׾{GTM:at’]NdWe7Hݤߗq̨G#дpB0Ǚjws8./ohXۇNQI'7: 3ܲ -CM&t~ݑQ ۳``z+5TX0 b)mڂ4HjПCFS duNO euM̘1#yh$||J9m9%;wI׀CC9(m;臟2Z4FsDuۆW5VJcA1d7Õ0KC?O=#爾`P ZxY$^Swכ831}32s>}̡PK>!B>antlr3/treewizard.pyc[suY,K,H"Y&lY \ X@`g 1;%QKrɷ]$5$TN/HR9*y3 .$=ݯ_vQ~JP I%-bo}GDYQUlVE4*6G"^c5ӻ ᏉqfCu9!bsRbsJ LKTO7g?kLIOʵ3Oמ2Z\;kjg?bjF5_߲pξ^G< 4 ve7y \rEs/n^_vyqzz&X_ <キtpp#/&A.a֋x~7.Kii%iĝOvίd6x2;IJpGA( VBh|frwMvQnEA7s/1s(ˣ#wv^-M b>^imzwE;AMd?6*E%M2{E2SbtH@!--jCZhQb| nr/B)Z˙iˍZ4&51'Qr`"IМBsM.|\ggjN7k.Wl‡t%%-g^Ԅ2Wo^[h%ɷn]i__iwnݑooY}t[8\vcU1eMYuk>i_G=f;eg{ƛ:Qm7D FD ;ʟƧ:VgduwFB ,?ԲE\a $k:i5Xڒxok}Zst'M>brAk(4b҈q Uvzܙkp,]/Jҽ@XtІjx6Piu$5lQBc[ǒ^I+QB2UtAʌQ/'!37đƲi_Bqy_r?V#PdEp[RcL}-@~~>ѺK/M._ꚙoj& &@U+`P) ;dտxS8#chlf*5K7**㪌uX9SCug5~Z N%N6D8 D6[CM 5*hy1ӥU!i,9(oXw63+&'OM[\Bm"WP;~Ku̞!:5 2sWlyDzCG#Ӓ|eac^*ԵT3rR> x< x a1f $bЗ/2l xAH)NKg(?XՋ=ōeFe,z7}+ڇIca0ʫ>KaU:{ ՗_g X[!+c8_6yATsF6#C~w%wlRcWD l%٢k# TturLp7͎+`p\1<;#D4rK0 ,N)OP=/[, ^A^F c;C `PgWɘgY5ldzGij]eWPI38m6~%ȹ# -稸ʵo<[rb'◠nH0Z$ƵIݕe})<L8M#dUmI0.D/e5֘#VΑʇ՚['5^ȵگaԳĈڤ d-G06fn$i4'oPLjbaFWjԘѨ=0PsRgJF[N(>7܋t- ه $2[M:t2v|/Ł9+?x;mzsٽ^|J}{%JQn|.Kr,70$]؇T<@OR[)>yQx dJ=Ij55Keso>/uT"ya˥m'aq<}zr}YG|RU6Ì(d]6`K۹˺g{mExfmzÚrjN:d`pG:e3k ] aᵘ`}K'CrǝԵ8Z h͖*u,#^q`hZTY*Q/Sk/|9 >+ĄZ +糼'cעt}Lat$ #Cj' Ǻnu*ȣP7;sDf=EwqqQ>^_,h"V (Z/GrŠN룭>v~1aAJSxpa?LҤOA/\sp0-=Rʓ] Y/^ĺn0sjy vqұCfy͐ZK'%um=`g9ACR,$:-SղbACK9"Er3BB S %[>s"[Jj{/ :g8rq/cUMHȯ) 석f)7N)2XIxaf8(⤸ ^q)` a&}EՅ4.ː@"ջ_nOȚ%wŧCƧȬ*xѲ3 K[~ YHLm^3%pBUG:[+&*'T-?KИ pdl(6B #S[ ]y8c$81B찕ʘsA,sICur9rO]/ȨG(|W $  NTNRC mNKI:eIH.G Uhk\/n-Gmm|jo!9X#CB%3=@Ѐ!c@^Ƀ]ľrs{KBCtKQ{R  8r8gZ)B3vu2BbEuVV,'/ (<n)Rw5?c C/oVppad.pb3[ႍ u$w႒'k`3b[\ oܨ-0ge̥m R?:bF|Wʊ](TW*څݽ2)%?".;f7V?joF{{77S8/c_ғ *vxȹٹsnOy9YRZlB!Si!L İ^'h٘I/ ICQ2;@@ `|_76Yn 3<دkrY)G b6}bc=wx4//\9uܸV6cƊ=-cr5[2I9n7k nY`kpSa;LFQO3i5=f/3ct\xWpΝ\3}GU 7/&qt??y)QaU>fDg l̶g[ Ͽٲ%̱p{$9 P^2#HZT">f{^ 2mf7a,d}if+oYաt/H:bje p)(==̠?UpM$,i> CG/?$1Fhx$`ybäOQǤ^6.WWq֞vI @n,o3(g%]H'\s~P%BI}[pH$:ܙ3^z0ATaGűSę1'\80_UC>ETKKKu!bVcwVZ{Gq;"o% Lꋛܙ(K-Zv֨Mf|MΌ͏OW3>J筱r1_'jCw^_W|]W"=\t˸`$ bvUh EI~l4I1>4#m'gvܘhWuqSkT7̎͞PK>y'AEGG-INFO/PKG-INFOuN0 y ?MAz@mp6iš=٨vy%`A߫ = =g"6 Γrȋ_vߑa[zlH`|g}֑\mv~л#!B.70pz-Ժ%U˜\wBq}6;卜vE߫Ɔ U֍X5 2N0g/ ($kYUT=JKY?PK>WEGG-INFO/SOURCES.txtAn0E a "jڨ%bX[n2#eOZӓv6xxj%>,,vR2$Qe/L:> HZ'q7ycZlO&M9X¥8- `Z 8K8AH8>%ΰq2AGv@=d` ֟ﯫU4UT_قjQwHD@^PK>2EGG-INFO/dependency_links.txtPK>: EGG-INFO/top_level.txtK+)2PK>2EGG-INFO/not-zip-safePK>[׬antlr3/__init__.pyPKRy>]antlr3/compat.pyPKRy>^? antlr3/constants.pyPK鱳>]`+$[antlr3/debug.pyPKRy>U76  5antlr3/dfa.pyPKRy>Q"O D@antlr3/dottreegen.pyPKRy>ZVͽ1Jantlr3/exceptions.pyPKRy>ytYantlr3/extras.pyPKj>81#]antlr3/main.pyPKK^>碌7~fantlr3/recognizers.pyPK&q>Q.V^.Űantlr3/streams.pyPKy>y .7antlr3/tokens.pyPKx>L>antlr3/tree.pyPKV>7?G%antlr3/treewizard.pyPK>UetN/F:antlr3/__init__.pycPK>NIQ@antlr3/compat.pycPK> -&Bantlr3/constants.pycPK>MIz.ΓCantlr3/debug.pycPK>/LN-rantlr3/dfa.pycPK>F_Mzantlr3/dottreegen.pycPK>4*$*antlr3/exceptions.pycPK>\Sl antlr3/extras.pycPK>Z 5ϔantlr3/main.pycPK>nL=˟antlr3/recognizers.pycPK>k75Kantlr3/streams.pycPK> ]>Ϲ ,Zantlr3/tokens.pycPK>G%]v(B"antlr3/tree.pycPK>!B>-antlr3/treewizard.pycPK>y'A]EGG-INFO/PKG-INFOPK>WEGG-INFO/SOURCES.txtPK>2EGG-INFO/dependency_links.txtPK>: ȜEGG-INFO/top_level.txtPK>2EGG-INFO/not-zip-safePK!!G;python-antlr3-3.5.2/dist/antlr_python_runtime-3.4.zip0000644000175000017500000026466512653072152021250 0ustar zigozigoPKRy>w*Ex antlr_python_runtime-3.4/AUTHORS%A 1 ❠AP׊8mR07->x{UQ+blO2-1dsdnpepvyN}fOázb+Q+I&]op .PKRy>{2 antlr_python_runtime-3.4/LICENSERMo0 Wl a;)p,OÀ BVZߏtR 18x4|νpo E \s1z?诏ɇڱ0c^zs6>>aާq ĆZȠn]|J|Ai>ޏ cib41&G2]8%Z$IuCBw2 y8zGv..|{f# '}3N>n. `6&}{̞7D%O H 9pc"fxImNp*1y7}U/[i`tײH/KqC-"D!@. 2S\R .CՐʈo a kaKooh%f4KcmR FK JE1"c|hIKclкTlG[CUT\PzGlqW5*9a}^Җ=iJKU.(Jq <w3Y2mYay_ WKIŸa#0[ov/PKRy>J'%$antlr_python_runtime-3.4/MANIFEST.inK)MIUtv vUp  VH/N-)-+PKRy>y antlr_python_runtime-3.4/README}VS7bSr0)CHM~>tt8_'gLen۷o|LsKy<;4LXCni$Mpt@FUM?:! 1ӫ&,u֭cZZGuLVra rBbLd~=Nwxt+)61FXZpU߲ 9kꀼmY,Td |ek' Ahٛ*Z 52Z0{_(D{yRVMJB&zJzHopUUy+IC|¯z 4u }\O.Uzj$<,5Wy*4qeWFb.vDv|Y6'@:vͷm@ I |Ze( jV2mhw*-AUAOmW)W.&?Tغnݕ5e!NN9k'}QcP-^]-s/ZC_a) tF,TܲQl( q87.&o&j;|0T]+ qʧ[$d8UBGk&ģ 6.8ܑes;5ıM{Y}ސ Qg}T (*6{Yrs0Ң d:Iu@M!CB*a44=MӋ.|QV7q6d4GؓwE o|O&G\7B) AlwGSAÏi`Z(ܔI@:*c2nHl2l2wBMW0'[fF_q؝+`EΨ7*llf#,|c{]Ӓ.:w3@mlKߚKxN1k!N``YKxAa]*,ީbtstT:(;9fB}jC0 b!$antlr_python_runtime-3.4/ez_setup.pyYm_qx$Wr&.` H'R徜XS$KR'E{gv*MUD.ggf}ű0?y]%-H%}QyZ$jN`$|O4I}%cnI >1M8.U Xۤ$H#t'OJwDsdWe^nz/ո,RcJBD&<$ x~Ҝ.)˼\T١i-ʌ֢GP2sR$>j_1&,-2/ \L+SEA=|0V& YUePR>\G0iWo~#Y2}ݝm]6Ayp nn\Tfn2qoiMԲM'ؾ.4tEa8ȲQǁt< +nk%"RZ ׳ǡ'RڑkK8c_< /!y(ZE#nN(zJ А~ĝX-ҡйrK/mfI,K/cGңoZ|Y4dpqq@A$N}?]} [,e gyaC?BNmZfV ^tb[F܇u dRDhd"bxxI?wkYwC +N"-wcS2V삃Vh ۾9B'@>>E! 6""i Byu/d…$G&@b\?s WjD:&]͇m=@~ye܊*Tvь|fmTco`j.JQ]6S(!Jt 7sUѭ#>'KX2Slh5)ĴA_؀Y%| "ǵM5w %7]roG?Bz\Qdc+h*qاEZk[ŢHAVae Q%P>AG'(ufm¶FŪtn:K8%ۭ(G{(Hj+򑖂c>~v5]#YRYrI,xsӺQP &2{.A~ \My&Ԓk y9|R61%|3 hDUhԺu /%M@g|j{[5-xiԀ#1-ڇƝCC4!n6d&S˴L{G9+4[Z3i~>zYv4-,N zf-pu{=\mr7M7*Ak٥y)Ec:G`f3YaO|g2M%S r X7c_/~^wrF ]Phـ~fF&vʤjc1}/K WܫrP5P/{t խu7FP_U׮b٨,~D Tp=4gZB(E=u$iMTIN[Tz^5M%Buj k4hj˜ۃJC, (C/=d*W*`c3 9;@y5t0?xeyZ*ljsUМҵ?Bme2qwUW3mvB.1O@;w67((r0٨,hg\恖 |>ᕆȪP~ _3(2d+e;T蠂't3LqA}=K;ΙD#QA24e!j4S5\-dc^fXQEusyYLԇL &BhPRpxid,_)>/)Llv'>KT}nlz jz8߾Nv])æ3{~mJ+uW߄Vnzh0!9ҹ>Hh^O7ʛʹ{EpS7*P[d-4((= wVwOU*M>PK۶>lfx#!antlr_python_runtime-3.4/setup.pyo۸wBr0]sa-k\iZ4a@jDj$Q#8a{H}:i Do>gd)VZH5Q\"U$+Ԅ0#@K5\89P?j@~"#qtTQ.TP/ӑ24J9}a,S'kn!jaݴըR;E)SʊZy&#x<>ISe#+ #e**d,5gF~agLGzKRCH&w.\ȭxksDW` wdwEt!-9yL;rrusշ/<qHzǤ:rP-x]wH+!Q Ir,שq~:l$"vwBi!^ʁ(30VnT]QĕZi8!`ɷJ*#ΖU*hgfa?gs=Y!Jtss f YUgӃp"h+|k?C[IOy|drC&,9} JY&`B([NP!뵇'ddl݊ta2ڀOjP\Qvo!a͵nP-\,+=C+?L&0~pD_?}>뛃Kl̏BmaW D6x}yr}`A8 ۧf__25Ggn^`A]m=b6)(o:ޑbDB=)<5#4}5( [; vHv uD#|D2 gA-]3WoDGNu66@?֢4|~ݺowV9Y+,`0Ig"*bx{2?Cd+PK>z!antlr_python_runtime-3.4/PKG-INFOuN0 y ?MAv@mpLk&)oO6fŇr^IAxp1xDO0H~Ñ Γr+vȳ_ߑa]jOHo|&}֑\6~л#!,B.3лpz Ժ!U\7)tP7rV|4zntM`gwtL Hq8<QTK%_2zP7p(_ʷRPK>DV2;"antlr_python_runtime-3.4/setup.cfgNMOKˏ*ILO*IQUsRKRl0,/(,83?,PK>[׬+antlr_python_runtime-3.4/antlr3/__init__.pyXko_1pP8)duwBKtL@TJ6.WŚ/aY=3Rd/5ly93s//..CGImVw>ToȝG=]Ѧ&qmP^&]).VEC*˨骪[34n[RBD'Ԗ5jE]7΃.tZLVq>ZeiikM;U,jZ|h| i2.}Jʸ5kӲ7'W-,%Vee,ӇM1h*72d_U8].%UF-/O=!*lH=d[ ډP=u0`-l+MajS&X i,˴c i LTAexy.6®Uu&e-V3@R0*8O,,X#Mi, ^:; 廱vi. Ҧ+Fc糝lBƣ /UݼaYZ#||cx0`\ x l,Zaյ&eqpZgn|UEnA3t RJmS1\q]Ӂn{ѺIkCgO*dLS4VcK,55&59GkRDCB+{ 2]J*lWlgA7l8 {IV 5A2]8"fRdV=![',(+iJjո7)47I+f0 u\蘃eev}6Q5]I {_NC1tG[8*uaTq hom!]] c1YͻkDj \N4(^)04+C H@G(]RB `mY ȀԿ](!KZlwZ=^~Vl!LČ9KpȠk+t{"e X(5"4 '霭>瀩Xu,**.E)Q2ԔknY"ZAcVy89 m$(SsF1. 4>™"a$,4~s yp(L!+#,7QU U}ŵY`z#>n8kcx]S?cyas/m0,N|j@uʁc]+SoJ7_0鑻b={/ 0E1rchȟOf?8UDED3ޏ,ZDm{/ag~Etќ 'r,!,S?\ޛ2Z# lэsܛg$£x O"b4`lDқp724YC+,$M{S! =Y=[M*b1e`)O AC}Kcnr0XB_ e/nNbKE3>y>.F?ˠ1ZW{gGo>x~r>C^jDZW8#N?l]x@7{| J?_/GW,q|[=pǯ}NOV.˗w;dyǣΆ-#>|UU_H*K?W߂ryg3lو>&Ng$rs/PKRy>])antlr_python_runtime-3.4/antlr3/compat.pyTao6_qp?40`DdAPE7$Ѡٯ;ى8{NkUD}vt:fnM5_&s1֚pooxvτF( 2fqu4t.lp( 涩lk<ܡ?@Wf<Ӏz $jZӅ[۞*o?Y>vq_`e_OSWtQ-v3] .:bh݃#78#͖G'-cVKH }^LDB[l J 5R~,u!\SC@)LKf$8-]h^jrM\I4F+_Jh.Soǚ 3Qe\QAX L! *N# .3T%q-2MjkEs:<D(Jd!bď"`HE,y!> SgJ]" DJ(:Q>+fͪ+-u]yƒŵҜ^PJ"cJ%GdEQk-윖 ,|v| cU!K4<̓|?,Vmv/)g!ȟg/1%W;jP_PKRy>^?,antlr_python_runtime-3.4/antlr3/constants.pyUMo6=׿b=4A~@dʒ*QbWh, o(CO D g޼8z*>R?4if7twژJgK_^C:-{X;>ftVӇw~{?=PZ=!6PzP3]fklkM{ʍi]to잺F CWJSCZ]ӱL/-h4MwoU][Nim+^uGBUW#p,Dxd0vN` 4.T(Q(>j/fk<+?ik5\pQ͡MM?c8#3q\N0)0>f$X4PK鱳>]`+$(antlr_python_runtime-3.4/antlr3/debug.py=iwǑ_ =?6 swAH.Zѓhcf9H1 8~Yٖ꺺De…ޓ'B|]I:z]^^墽Ϟ} G1)6gAb~ jTf2ei8/0E/EI"K yX%:0IJOe  b#ur)6ir.C~8Q܆X$2NB~k{%24Nd -,bpE= wt8qt4)XOؠ?-4: '#ޜ.}= ~ ,{ wŻ7Cx>AyI3MFG3 9;q^q2|= )z7;0G)6009юNJ`viX?}:R"B;z#uKpIRXZ{x8ozZt6fr-i`H.cR aKM\=h.5dNb*{,`4M $/^}3Oi4iM6MWTNUfW^q+&x)h7K  s^;b`T "MVrS )i;F^4($# ޳AR."h4#L% w[)=aID߉pJ]2EVK5g=R3ܧj,2x&ycmi3w pF~ufp\#l_ηRu%ڹ|ܧ ~".!M1x br#`(e&\~uė vżEjqvu v:),T.r5H7>}qq v$;VxVHZsNX2kz[kƕ AץA4K-qJo g /{?p N/1.$f1.a9at,\O`A{[TcjU2P~%P$ERFXa|i Uԛɦ,U|-ʽʾgx ]|Q(bqvDf z󠈊%R Mrfpp`]nGf՟-ü!@)K0h.%w\%ɼD$L)E$3iP`rItYI1E23՚)s*0Q^K` ~^%/yi?NRx bXS_M,TSy|0  1MA ʥDSIAFf*\\a 8d P`kbN,2OEtC:e0ns1-!~KaNgPY."lzOBZr D̓e"v%"@#TAfq !1>YvaD?v-K롾&y)j17CQR9t*n<Y}9՘dVQ PΓ:gݠW iYUݸtFèC֩lV3 BE ~Qdzn@NvRX/{X!~qRK;A;FA3]2e2Pn@!#Qley%G~ڶW2 2Set3W w *% &^9-n90HHzIa޶%PwQ 5QciEeTm}b<U["d &k9O`CG;q$Ͽ#MM[tjZ>v|re~%3_S`:mMW,I#0<%3VNc.&`h38tশv#7%%M͉Н$:flh+oEÚ3eyV N,p34.͠qjbJLJHQ uΤeL E@@Iv ڣ%:YaiVgqX-" }Vg$b0=$V_ϭV\AΟET^*6/s{XhI`MpjYY>="•SuOggK| 5=K2"ĺϥY:n#50.#,pg ,1tGG&־MӔ+?En> yajN̛ѺʇU|iAJqDT;RlKAo1-]+R#,7D=V-c|Gm-Bt0 (9㯱1qWATAJ/@{`A&-_8\ @C6`q.:1T)/b_o` }# s x_[}~z韾O~k`* /pYZ ^ #B?quKO_no s"D﮴ wxuh{IaĀzIRӒŃ|cJy䉘 (G!(Sd :N#Nhm>t%7I`pdT˃ϻ d +lapߓKm<ӎwB[|=tTf]!NEхExB>9x&S p1P=?CQ;!UF q}a5EG9Kzn)8"L>0,@ `'0*xBܱ,9YS8U*pF})1TH B'wVWdFa~Ǧ#lcXS$czwfpm'~nv'`=b wPpiN)857͕0 '`8%Ɠ/1JLvdd6\];4޽fWgkك˧jΠk"^M.1z䶶Y|-?7_{eMG=փiz;5c:J> ۲_ ,|2k/E4NME)o4) ҘE;4םM?wCC>+4tgF eL͚&)"qpphe+ _f}ufR~E2yM¨'lp_C +~Kt4TJgKZćOB[/lddgtVx|UE|VLy_VNrɜQGoAƿt'$[03xllQ.!.A?ȐjT8G0=x +Jb2KqM|\js#/#Lb9 1L?@{>dzt2aϟ=kH*}ZU qlH˧U^9*x_`BW zSe JMeFVE v s[ǴzuuĽ8p'QMcf$ɞa'^}43=jYJ]S^wM[Z߃\}<3ۨ ؠ͌& U$׺rOVV CoT01SVᬫNO|1Mu7(2Oƀ/b2<+7CcI{{kܚ nyk \o熱*sD%rioq[yNutN͚kD7Mh5iTiHBa,2/.eߴf gGgf`8+x&;/'HG!"Yr\ݕm1+3![g543ծ[\ _35Dc^2 u7,+@Zש-җxkUTdW Dݍk6`G9EIaЫm*K:Rd%᜝-VqdYuԬ]sCC`˴ks~%YW4׾W ScȧPem@[Z]T3X:9,jJ}w݇yQ=#=L m ᷳml^}i{jƨ?}$9դ.t{5 x%&0{M-n⢠^&wwqոȖ"^a7W8@c+-"MQʰ&>rF垭ʎш*㪸7Jԭ~OIA>y3Et⥕ywC{?%aPnC{1]\Pn>%6 )9qVIvoyRT٪ 0kq25;Mgr1NV>rC4: UvߝgG'?ǣÓa`宒!!ۡ>܈Sm{_n&{^bh~brRiڗ|8΢}ls2H_X_UjeD>A+%YJRTi- ^Ρ3+*L/J 6`V<[olS6>y2ܰߩ[E ۭ/[֗/ouPԐtI04o1FÀoR%-PKRy>U76 &antlr_python_runtime-3.4/antlr3/dfa.pyYFm,S)#\dՀƣ-fAqy]N#P Ւ`اtKB&Ne}ܯfiRy𿊅l6M"?/c_~n\`铷ԼXnbj2ߦחo^I0 RR2j-gu$gA`gAg$W@+ jKDEE [R$π$Jf<H(H#UdJ%`l)2|I d xHX$x9+d^2'3iI2F&k*IMYR( 'eGSh9C2J6 ,kNȈh?dZ`ۊ"I0ݩ\[օ`~{S,xDÚeUB2% >]`L8K @EsdmؠP>N:#6n+ 1{FnGl:dO~cs=Hί#g<hC2B"wOzE7CZ1ҽ3aqQӼuӻ76=#N&B;mx4D7رozzxQt pҷht]~p~u=hAzc_azSk_|:T4ݝ{2'7c&C+u?QȪazgk@%]"w9s6 ?@pk;GF̺ʶÝy#2Fn׫7d;YiA!6դaqPh& hiۑ{Kv-|C{W(x5Ȥh(q:Hig$hG_c @LCiSn¦rǭB9k4H iJ[L~ MFJN*a\D T'(ʘbiv%pIҗř,GyW($}iDM5& K%2[FKjP,bq%1c*Zml %anP=Iȉ历.51B| WpnUEG* ~kGS`dY[L$Q}U `t[+ß у!ԗ ~!H939_J+Mk^'A躆/ !' }X,c7 %bx߮u]ύE-N+<2 Uq y0z rY؁qhǿ$'7t %UB` $m̔vHE4/ G@3)+6zAә}ͦ,;My^f$Eг Lh2{ O֥x+U|M?7E`O&N67-P nTе Uߪi]L;l4vt娟9 .U`x,҅@)Ok|M͟pXJ:#sCo02(!Zn.N]PiX=*0m̯c쭴m=ǝLVӢ4{êM˲˝iXRͅ/3 զܤrN8=>ь_&]:cf>2Lő<&-?L$O)&LWVǡْS}y LdO~58*x&\ZSY :I_\ᬻUUqF>v?Rȅ\﮹l_>"n~g }^,0 BIpƉ77-B"kIY6[B7Ӗb n{$:hEn(G*_RFARHF2jkT`*dl ʰRRao:oA{UմVBVER0"n +7cX-Վ KҷFa> h,<9Ć"X5,ԬkQ"O D-antlr_python_runtime-3.4/antlr3/dottreegen.pymOH{~#ÊdRZ݉^5BNPwO/@vgfqr3 a/K6g3,Qzl)3$m2NC>kcHX   $#h00Ib8Ee D,x$ (i>a"dil4 h4`ga|O]cn9^6pr߽x| \4E0 E`Y=T](P870, +0ftXdmx PMrD40"B<]Bi"LDQ32FRDm [reLML&doI@ĉ@KI#*Oe@N:.+c-(DP**BA2lSX0ӐEYar)[V4;6)f(;GoIHqR% $#E"8(EBP0]bL#9TdK>A̐B'Ud j6;^ ȿo=w Wy0tsyw Ǝt38oȇЇsےx i v`:}ǿ<@P ,wȽz()*snr. W /ktj8iVQ]oD]]^_mrۨ=tpz֥un{Ь \Cѩ;ȷ|8a/N>AH>gX=˷$s$Ah9D]wt;A ._(=i5&[Hc녍.u]`Y+ sn6Wdz[#GF#בT|v<9!58stlHu/UTcDȊLy2U(YlbQ'IQ\Ea,N(8⇋l;::j4d&J=,I%8 N|?n4d'uGl&DNԡ,'Q(VQ=RȠ?CGKVq(R$hf\'9LJCO'D.x!O*c3ڊ/$ND6y-o|Fؓ;ۆ0=濾g ) ou@Q2ƳVOݔ_vzM6gKޑ1 h4|A;8B`;!?έEۅ<55y 1?aΩM]]t#D(j0$l__'_|#ď9'&bcd[CjScD2(#Ұg؟(؇ov+k@/$۷0ŷo͌GS\aIqTQi狱l%Y\& jEcD"&@RdKϿϰӼ<8w Y5 E@f U XҞy坎R$t 8]rGzSuV&Ջ983,ipZ=YOØ(֚ Eg7ŵ!8Ocm;PPK8險/cbqQdND%{#|ӥZ iV( <>g8(,XA/8 dFu0ɳ-0~H!`o/JGF2E u֮FPHm0iUEf)%ž߬UAޱ: !+ѮPm=ZAS *O g/.*T->3U;C4ۆIkl3ތ[Uꑡ3$J\xln x^zv&2j*bU] uO?=K]2p^Pa+ 5֣-Wj3`# s3ZnPQPW`IIv!9 64[F}g\)_T0J)p3WR-z֋*_Q 'R@*CJ5z.iL9N<0ڐ,EŁz(Suktm-7];qOO񬖞vQ1(;DF_@q7km@$-ku'<͇?B6OrA?|L_ҍvŬht2T8z@"7sq> (1LbL]d2ONȰqɔE%fEi[ٙ.k{R:-{)u{D,+\$-l9[4Uff""=#~?aJbO$< z@I b/7DJ5~Llɫ+$n} S:Z[T2Y^ta #ڵy0掌wPKRy>ZVͽ1-antlr_python_runtime-3.4/antlr3/exceptions.pyZksS15 *gcMe$Cbޤ\oh"Ծ _ v@`|_N7U@`d L0aZu2J0AiA^vI ד6I»ܳ;WTKOqɔ } Qb "$_7W$ކ.Ŷ<%e_l7!f)a(i"W20.HMCbb̭p R/.@!|3 Xh8%74RIߴ~FD7I4bp1 q╣pcJÍD]^a ևmX5y.pZh1MԎ|I54؃Y߯>цcI}ȱ7`P$[b(5-YO$6ctXw0U@rd)4R <2&thXdŠf]%e[#r<#x%7I`B.J#bRX Po%&3jy|&_h%!eG^"Xn! ue-eJpɫ&RnnN.:Fӡc׶net/|2E Y6 \]3TQF.1^IV{ 5yr܎3+ǖfl!>U /\ ɬtI@kF+*U6!Od}T[Lʒ6][Q&ag?#DbFm&FDuLycQe-c\G3P*`߁ZH?8QIJEh0 kx0()ROj j__s>+)KJa_0 gӂ@63*8/'`^%Evj!;M)GSu4]O)}t)}de\X4})H?,Ղ.VL=5{]RE= tZE,uEV3KR<UV7r'{7hk>ޖ/0]Kcn̔L$Gi3Z0F~]4@%Il`u@WU>ZV>OvKޏvVeon*?ʙF'! G"f_RUv۪ `{qxO@:qB<`q Gkز`eN(&r,G@pElK0AG& jê4D[>3[J}bڶ' J퇗prHz:ͫ4ˣ 362 h> l9z_b󂢔|>i1 홠#jk? #U!I~4h=c:_yfdM3J(iDg6zQ:N(CX1UqH-d fOyEf* "8v㟞_U&]F '*,_iw`P~Σeӊ8RynJf|mK"W݂_xo|νP E^\PO#~!קcusb$Tb։: :)y}J8'gs{s]|Փ/ E ?ʌ%hWzngj%ɲx<#޹ >'iN鼻3{?RtWty0kARcla^՜^U~n5V~ՒުoU2q4[.A&f6L71GPk]R0kaIp9Dgr+*"ҙՍblٗu<--鵠\9iu+3pvvYP̛"Oꃐ[W W>ؠr[M˚6kN?zor<|w*PKRy>y)antlr_python_runtime-3.4/antlr3/extras.pyU]o6|ׯX{@P$W Nh8Y4(:i;+9M.y84]ɄGTuuzN[|Y9*;o;AMA|e~Yn=P{4#ܮrv,&I|ޘ[kj{Myazyəͽ>G|FJ;eBPzrt3Ps2&Qot{b ~Jfm!T9nk }0 Ҁi[h mlWOvL {Vfz8qZ1K!' r;C@Wf<Ӏfj %@[>{/(Ųm3UۿX>Tq_`e3n*46 &<1[4 XY]cў\r o4 hQbo zwNí9M~d0٥ sA/F"Zd9}X>:)wG2EAYyL$6%1Lez5y(%Z*l:쒮E-0e"ݐR]MH0W2*0e/L! EZlLčH0I^+ (?nI2b)"UxM,-ğ%Hqx^5梜JR ʲB72dl%`Zq!9@` XQY"*R,=Ev 3x2KYP~w ^ fOv!0}ЦM)T.#: UD^41έ, # CjTYA80|ۏӡv$/)o$?ޅ<`]8?v5U߭GÍGL1]}͸vOj~G,Svxfn "3;ľ{xg*zIs9름9Vng#Nu_PKj>81#'antlr_python_runtime-3.4/antlr3/main.pyYQo~D]e5@hʢ3@G+5K;F~3HJ"[q@|7Z9Y"e0{X:: n]ь%3'N->_>^1/ |&HƁ(f1(KxdaO,R)LF< !KD <@#QgsH{BgA<, .abIBX "1;$H)ER[0_xQ0K%z"$J-<O%FQAC D)m*,p9F~٤*(haބ h5Pgق%2(V/,DirR[u<ʦ$C&wI(Yq`I"Ka.&qV|s%QfD#(9&iY!a{9gZcl^Ŀp\wv]ص<aF%Dqqٖ{N FCQw |S 333dct}?.'):0д/U>[# s8ztj!thiDh`V'c4ɰ bf!qӽwFpyny`G`׺$v79|۟;΀ ~w:3x#ȣg#rطQ.+ 4Qyba^0B W>wM v߯EqɳWYC-w푀L㊣Չrz>vځ}Msq\osC^g5nE%%/2- (5 Mazud3x(tj,^*C,'dFoLӾ\S!_`B]g2OeB}A7: \ YzZ>l3][ f$}J%lmN>/͈M¬ͽX,3#Yi(a/ h]Rd "%!t~(mi~ Y(^'B\u "1qTd<%dqglKDeDM´aɵ"d,؊wE H*.:6ZTnک%lYX+Zem-SDV¦gV&yث(=c#D4UFӧOlHo3YBpх[&F"[NJ/3}Is+Xp1%ęխnczPNlVSVt-SLmJMY}e1ڋYB)vޫytv0/raPՓ\1ШAz=h;(.Na@Z˻Kv>ZuKmVj,`6dȒټV|.x4:1m@ ~o澤QL=QӰ(Ė0U OS I_kb6)$߿{״$;zCk? K\.-49"W6c쁫.:\ETKeD)â-qƌȰP PkX}v7ENNu7Y'߱p4@M /IpD"ԥS]=9[PO:3JWOJ=ǫCA(Ӣs?)ɆX\a(N$}{YFuoj1mzqv#ɥ[ !;"ݷǗ!wk%LvC\?L60qbRٛ;a.VvjݗO_nFo$#ԚWV|KW-{UϨ}X/tKel}O.f]* bWPWWr1Z}*L-څ6C;쾼4WҊzOr}ѩ v0J} <}nW|M >3o-фu>oB_ymVw߰+'zdtnG]h)B 0=v z TʣQiS-0У7BJ7JF@Rrou{5^_in軰MPKK^>碌7.antlr_python_runtime-3.4/antlr3/recognizers.py}wǑϫ ,;yw+R$d!HIY*0 'f3Q꫻{f@J_bLuwuu}Wu?>?:ΔVY'eݻoe[f4݇ޝ_=2M60~9OK|ƼN_. =V2c:tUuM7uV&fS&MUlJL<)o̢(W\g)JdU̳E6K$%&-WY]s.~岸K3+y/U_[(WeЬà La&#~%5`jd (~LZZ8!sL`;J9 N!N8~^"*#؈.*2KG9Ջ}72HMy3yo`7 fU4%| WE^p]ɤ`\ (oyUqCFLNgH"fS"qL&Uų`/'gӱ_89?cɩ?wv!'3ٙ990W&@9c6 yɫ9>9Dcx/`6yp89;8ڟ"'08>>7g/􊞏a:Ϗ Vt897q؀ y ?i7'g/g.@L9˓>|H<9ONB Bм}9O}}D\?#Z1~pN؝>0aa7pL^'8yyl"A;x)gYl.J875xJzY~gYZa.Ӌd]|Lˊztʾt8~x $rrd~?8?|}1ۏGCY0Xih鬸̉C*#ͮy!ȸ$L_-},/k8)7OY ,M㸨'^$2.Alksu ͛0\+seqrX=(V"ǁ4ym<}J!LZ{@Tݨ0_L 7xBKBE@P8,t9L(R708 ׍ςDI^fP0iY81i=FI 1N&Ѧ)Ah&\]z2Y5rȕ"w_vPƒݳ &k#e33,7cV3 /Y4M DE` i5L5+{,"rqѯBp?b~h7oi:X (Kǂ .25yƑG^3@ MH)[ƚ` zOjfiU-65J+65"24OKV^`+k QV`0EkNn`=/@I%!'/Pg}%t ;Y WYǣlSvCXhh Vx(֕s&+@HQy![ #}nn*Nkk%sPt7,Uu`r 6e-SX;@zl=CwS_r|@ {<|7b0 qS0eH"g8g6?3bO\hhQ'TWQ^90Ļ(\/k'T[BPj!$A~G>*9$B$r 2̀yy9/LlV]!TH⺨2dģKBpS۶f2l1E%ec(^ 0zOpPuoP % )!vEx5fMژ7?@$ԃ9(3DiK}ZfNqbΐ,Ft},0SJ {0_jR) ~5-Ȏ:ԏ١u/ (|֊j3P -1]ǀϒrc'(/컝ܾ}7B{b,w4m}yafCй4W 'Q&0SPǟ>k̽eR GUą5N~3ƫT" ~}5?#m8< /e+=8ø<S%N9a==T+S щyQRQgㄏfSzSX-J+Hl!%RǕZ8|i! ^ 'p]a G@J)`-b@f2vpltGYR0.'G:osDM4Fo4Ԗ.RABԌmv iyE+$`U!릤f=m d 7s*‰+%a u ]d@4!RNmT*)tIֶ#.<<3a2<4LޔYZ_yxyVɍrwyxAЄ^K/a[Ҳz1.^qB?ՠ# ~a={c¦0O |d}

Lr ybh –ȟDO;)^5Gtp,.Ȏ]kr.(VÇTFN\yŸLi9V#%bX ijlh8{k]!Q%f@A'2"r0l[%[D?+>g7G /kaBf9Oi5+3gދ+M`xmDai@W!s fIb<*s Ztq jMZg4td0|b{TPD/TSIwj!,=P/$O y~UW҃V?ŏ 9Y7Q&AllIkj?ӎX;6";wAn zŭN,H(}Ϧ9?܎{uggs|Fmr)->¥L+`i'5SgW0 9x@E-8[Ϲ}k'U AvV0Ϝ{b~4.<żJ,_1e,Δ`[4ɎOÉtSҹ\ V!+׹ n-J4R6Jʆjk6d NWӴN&{cMrx7=B2irp_?!)[F4t'dl5tF)CL|ԒHglN*Ͻȕɝѧl n*tg[,үO^[wHlķz/n"=u%⮱>X2]8s#v3JZ㋬S$4wړPBT⹺@ o=z{)~DN)%wۢ2OWgv}~~4~L2a-pӓJ 1UTw+v7F)ZxGlKO#\jh‚:XSk[܎0}SLy~bWt.4e[* #(TۣJw%^BF'P8ݖ2UbH|R/`8a?ȭ@q%; ih[T}sUx?ᙰcTj`>D4zL]d'q>-;N hVϴ.ٲIRʫ/H-v "QtG_o+ MRlܦIVoe"2Y9Q< ^>‹9E\"Bϖ9Eyy%K;;ZCNf+UTPhdthTY?ca39+K1K;É󜺨b?z}4Ő'[ɋPpUh a0$nxTgM9b2US-, Ts(mQMzڗ E-W''N1Kl (! S^\HgcaHy~ DruG=F '-)@PlNe$ycfS7;}t?uOὙQa@x}=v{RxC؃mm{Iɝ\@`y\r$m"/C91~txtONՓ''f-~{/HT S>| \!F$8HkW?CeQ;RоqYK'1}y+|C@wT=z/+,S/&* [iUT&#,MJHIYbVeBhx(ӡ'LN% )"ΪvoSɁF)VS:r< 95 pt6>TG7O% 1e6 NEzbxdr7LF({P]s 9Fj<ʈJmKfX%@;\:{b~Œ~d"EhͰcFfISx+.vRN4?48zN 5-* v{Ho*%PK毭wY<ԇ>/ #I [^&h&H]uJI]EZq]x[1UA2Ean@{ZZ1s}]BCʁb#g'$u:Y~DZ~JaB* &7Jk>w'dyՋ4j}tH&d ,MO-zKZU9LĜfAɯu7ه%0}6+@bj)l&ޔrLB!z*O=0C%ը(/2VGY6??\ʢɜ_sY5F0( 6ǜ׍u,hףY1+]lG:ɖ6~aMͺ-[`6c\چ9L{\Ro*~C;ݬθ0> X+ۦ[:#CqBէP?'=hL:pXjMd쐄TMQ_PeGl`7jO9,.$p{Omolіg5?3}WPOMﻞ@ܙصR"A6(ƇOy/![ zDӊ,HÛ@D.d (5[BswuXd2J=iVPvFC-31Q{O_&8mE,OsfSæѠr,9QDпhM(ۛHM"dZ 7_U%RH#_K \iaah-柆pϑrϭ{V9htԩ{9{ٶո"1KXq#{x7 82Ml)-&$H(e RDOK0HVpݞjm`y{7s6wC_n}c AnNlwm">/ú %vmO)4lD9?7[dBr*\%:|oZ5؋tr#Ss)}!f}=S' T\ +Pb*g& q,F!G|iqFTsg\JNCikDJd}Iq%rm*PHC1jjZO_A9󟡞oaEG 6y1\8F\-[Kic5kj@:q9mR2M]c` Op?}ۧ@{QA9:ͪlwI!>n{k +`$Vt?r}JZ" tui#08Mךw$€n% ȫe+D.m`)(?HJjVZd7 ̔ަc|& kEKKK6taҝ:[GnJΑm׼TƓLi=?܏ig}F"CUZY5A|bLӥrNO{C*.b>@rGT_uΓK.qJRI^Ir/z qL@}N)UIE0u w%C=:gUܘw jԳS~WɺCzK+"3>A}y-tGH  - [UUì *"f|#gl.ҽ\'N4f~^`a:6Nlk܊st/2TPL[F 0U_?-)6?/?#ˠyX_-_邤~&b !M|1 `Z2s=[e|K"D. fYt[m֣%%."M:[]bD,NV29QNyUT Y2HI io3(;"f*0j,A`)QG,%>-Ttze,K<85&>NŊ^0V@x}^Es[ŐO"|&Ɗ8?.%4ѵڋazq*6-+JW{Qx(,F+OFwYewH@2eua}Ljz4葠&]f&lEk"tYaՁ-<10p:E[Tr Ƕ~^tZC7k:m?bО0rɆkU'~GU|,O[6fδ5#ڒeU=m>uUtPs!mm(Xu@+#n֤9ϝb{NJUz䍍NQ 9^D6Q.VreDZjjL]%dcV ժ@bj彺bE6#Lܩ᪝# m AwQzkDk#\m 0bRr&U15=mk`/E&T5 UX 2Lho2IwO j;^&ttqKP3cd }_ցVVR{&UJo2n+3~/tyѹ,ݝ E7v` ItWTz+躽 Cjjmi\asHG#x[!LؓÓ'#Vc^UFsM!z@(x߬V!^Dj+SMtuTR^ҝbջYq1)sjϸQ<DDpLuIspn7qbBxD{ҏ1=~ f9Z\'G^s7 6iE~{!bV&ՇlSqkF+(#`;C[Dl݈nqU]FwrENNw{qCE={֛4yqq9/;$Xhts,;>s; W.Ou?QN^㢞Dd \=n#E}?_K2rXH`2 QM"SZaTs6{D~eu ƽԶ Y-ݴ&- lKc^ Z`ӝO$džUJ9mQdk0l[ PQ*{,Ga\ f?6˰!I)ay@{T*RfeT|Q_&a4<4 ru_ 19iu-l7&piu>ac1nK$l/_LCؿLx.xkM (TE(9qwc8ǣljUU^\WZ"/jС.BI_ھ<~N]y?<~]!G{w!6鸰i_J/3?ٗ*Mi#Qydݴ]^T"sb,Q5 iW_ņ% تUtD:\w7\\`,_n \yA (ɅR|Bu;*cL+; u{My땹-s/$n:P$5!,Nk-@|ǖ7*k9cj^ϔ\j>X+OYƘuX vmYK[N=rYv9SWQt.gy/X)ȟEk6Z6*!jwrn %WZ2o3ꌕTXa):v< 8DO:29d[\>u^HUA6F2Et(U󞂦ȹ5YOų(wRMҜH%l/yjwK%^}^OpVUQZ$l5{~\]6$aGk$d[@;d@BLaݢn 2J46?oj%c,XF~h)l\{)u@]΢}j$ƎN WI ֝[čNsoJ?? 2x[ ֏@ hI:+h?;/|PZ [ +Or΋g'F(i;Dv+X<Ū`vW/l,M[G25V .)R|(Sqx xc6+=5×]'7~5ӜGQRe{uWhM>&ْ_Ho)u˲lcX sPmC~C7y,GJǐJllVS2_/0f"Ns8cA6|mLdǒ>QVAEA[ JKo#A`Tcun:qJ,l- g6d*Ts< $!ŸK?)\,ZTK42#y:2ƣJƒ[ Z\l8&Ѧ}A> L AZ4Ux3=vARog PK&q>Q.V^.Ű*antlr_python_runtime-3.4/antlr3/streams.py}kSIW@ Dl؍g0txlI*ARƚU!=;{U 2O,Lf᧝'uv_O| ߫t΢Buf]uQ06]e85Ua% (GyEӲD\yDif%AV4[=u*gZdΣE4 DO&̖QQs/~) LQrfi2S@2,^ ^JY:e^LE4>IZz ʁh1ij>B0,`92:9h%`x he^a!RxePYĹ%9u'30!KXMFlT<̀')@^ExE.HPjOyy(nqAGT g"3Bɐ9f ~7ex:8Uu~Hޞ:>?_ލ㱺wgCPF͆qO Oޟ'bΆoh6z7uFN~/_φ_i799VGz~b N㓳)}x/||v9~}6`0hp2A7qLzjnp2_ǣ_{0{urq>{hcJ "v0p~2P?]\"ax0ex2TgH7xhOip8!hx>FM] y O8aZW bԇs(hx2qU~:48? a8taucl0aa4q\ ~cN ߨ_47t'? ᙫaYQ-WiV;gư9ᅒg~$Ey{?\"siGބi>z14?v@|Mvͣ","Is % g:ܯۼ0g9 ӈC~ jg,ym$vhPr'~gEiEBHYdQ)zW qyr4t,roA2j24 HP(g@7#bXJ] gV"Qw٤rP&<2;dAs:O lv=;^=9Oa+2 \QT0YܢY~B"W< <@@B %J\ѡ;}F8]uqrE9(x: ? &xќ设ΐq[z-JgIܸuaÞE@E&dC5 2K *m2"bCSb3d7!~C֙B­G@I2t:-,lU|@ Ңo)iݮf3c+1|4y9r(EQ~?KchHoT؟G y6) *m[Q1(skwTz9Y|pj_i7@FN̰dBK$BCڱ uZ4n9 >$ ,.x$l%PVd1C_eLRmE"{FЫKFV6l< iS] m:@aX$K ǧ5E`-'@d#Dp.0A>a e^'b`R dr rAvE8 Ve,g0QsT 2YppR b.4jZ"K%յ}IP2M?&EfZ1OI(`shU0Ii }4JWØbai d'QuM?\6Nd4Tyu9UzD)jV4 [n30d#-5aӜu;Xs $eopiz C 9̿ƃ$gځ.nlƐ(,wBpz`,Pe>ͣ\iW]r,0X͛ea-quVi"/z؅?/Ģ$xV'6ff 9< (M:lktvp?q+^%Yd9 ?dY۳#{ڱ/*P$|%l&0@ *WL[@R!Ggd,C@]~|FG%R Lqa(5]gM]}s/l(@X"#Pk$Λ :Qd245Hĵ48Y6/s0C@Ί<($W7o.@,31xW`ˉe *? ؓhIr'K<..*0Ћ\x4ODm qD}ZA iyuo?e#RbyX:  F >+P0Km G`ЪS#HAq$vD EKƃfHqxb66AgA:1ksg\ozHQf02GhHE(scc`mL ;%B~prbcd] mp#i)' ab߷#=&NUDhrc9U63$DpkU`-֨ {>ܴjwV+0jG:\'*?s1 <|ug-W&' 8+txo{ p(X%1UR,(+Pذ"? ]X`D~'sDvN?VDD=JTrz)JSt~׃&a`Ф*ihBkȎ9wAG~؀!ȍ[>m|O2_qbR΢PEU^=U14Ҿ$h<{ca/j\rn}.ѱ&o!f-t2<Yo`SI.(+%1E媖)"a܁'_` G}1\`q39nY)Ek8T _!s,ԦDt'VMm gБTEpzlĢd8zi8]qm~N^]115<^M̔m*>9u,UqF5-V}䖎m Ƶl9t]YM{])S[McK(w{ B ^ڧ{dʁt G9?kEn{~;:=C9n{Lę >\rBAL^COa3uu+s V ךcODI7F[՛2:N.²+rQ^H=3Fs#q#^ĥc| B_,1^뢱K SW ٵMk *%5L5Ґ'սfǑo{8 e\+>mχ7jĥmBYNU]i͇JlIya劥. qKY%'HXu0)u&<9z+-tA-r'yI`M+;R p* ljNt`c*L9G>nz2)?@RzH(emMBW/MAuz ^H" EYlu!g(:ppRrT#\8J*왱2tLb +:zǵ] _1P(fl(iƷGmJ…H H| >tyx!=8E#Ž%]dvp+c-F- T&akN_9q q_qe\@z@[^8MZ &kt* $ $dX&:: ,˝K.tN\k[#0HؓssdgC .Y9@X+hP-^RqblMN^ VyBs:kQ]byCݬvк[Q+rlkxh5M3p9j%iܛ:v1DԺ 6:PWH9瞤ҙE˹{#|s.@СcpSj^ 9Y: }BwG\"%pt-umYOڡ&YY)+cդi#'7baKmϬh~t?ZtG&֧9apݐ.bt^M'}6ց5")#gf`Ҥ޿ Ǒk:Vt'B;^ʊ^xwSh6hB\ݻ9\6;S#ך4oT)f(rK(ǜ&x(C}7DLY-UIy*Aѷs$gG|\A~· #fb/Ä Ҟ2iXlncVewx C \9 _·f0'K>;Qyͅ#TMR:96<. 9<7NvK@t}GMKfd^%Do=iJL Jە&8*ƨrɉc!$(NbQ(g¯"s(&8e7w-3sT}xY>i1Ά}4^AxxJzb'pfDrשj#[AoQ)lMmO0BK,sšr`anc4qZ6{$ aQ [~^Wdd:. q(~?:mPY*.Ծs : VͼYi=^uC\St vRuܿDtˌW;%1':ib0ĎODښ? -dX}A><ĜMtpd/MŋBH "Rf[PCQם6~l, `v`T+:6!oLŲhMW`I*rJM:mYNjLmMskD cπnlB&17p[C،wQ~6e(܎2v΢g/Őf9 4yXThӡr}E׈TԪQ,ǡiG.v9̳ )̈́D<1JV }<&5 #*B{vuL-gӿ'ȨķPgD3t%%,/i//7,|1O?=}SOUksPYꊨu(#%_cՃ}1Cfk]:U~7oZYO#t=n?3O k˪0;|diÄRͨ^''k\t_,٨5IZݣƒ4άZQXh}-58A/6_fV/Y»guf_m$?C@2 ]zaVX4B|$]E].ҽ,Z4_0?9(븜Wq%Ѫzj#_~O\-0U.70WpFGUq(^S},׍I/8/T9K0Ν2T[ r1^-Czy;WT)ɅلzEp w% gv%X!tV:ın!2'ں51wua otE{ C;@{ݡ5ݖIi`-ٵ>$b{YeUu5~z 3 B8JEs@D|F30T2D`"/,yJ\rO{,ԭ@m@n#'12Y /`ʢ+}>0r7}!/"ʰIuiy,0;ki2}7yֵWG:@"G[k{Z]z;Ρ# m.=JgĤ =ѡVQM>u Moڗo1?2*cpV_ʌ&dYMEĶ~+mD- ̿(;`lPSMH_z7/u^>U;v, ޏ0 r{44J#0F6]{zL[rfHƄEVې DeD٦#ʯU_CѷC"u^ jfd_rQ6Xt'ZX>%a4 '.N` ?Un{>nWDvyWHF'W ;xLjwFo/Ϗ`rrtnFj_Cp 7t4OxNL 9P%OruLv:ͱF?;Dupe`0h8\}]ɦu$0#KOe0[ $hh"z]}|mA>ȮN^iULdƱ:16%.nΗ(PըQ8]<`<9"imFI}Lp V{gM\`(ոqţ|Y$Pڳ;7$곏LayLj9ȍ"*vgm 2:On|w Z_`:jjсʦ J:Xl2ţST^"Ū}v/Fݖ?Dլi&aa~YD0:h~-&-Ƙ?lYφ:Q = -h{ŭ5aTЗw&y ~0/B ەBRHvs U'GOZקetesa,9fbcP;X.[n,[R5xu;~rvKdBMcV ȟoj6;"0@0ѫs4\-jVDkǀǓW* |V#c }jt&VUb<N\˃7Ρ&l5XU[z;5S7/xrtCʡljxd+^b i2G~;TkBY`~;B|] d[fY0y%lwTQӲـv} 9UT8cZ27ѰQԂNEZK1Fn%iU0#P]:ސ#vbt(,n&P]am,g@:iԸV^dk5GU[p<{V.tӣ:H#W*%ZU~)P=Bk[)ͮOW smmAYWSYsECnuީď+UMje;\W5QC,ؖ39U) `_:,i ?%bOH7(]SÐ9klh鼛xDYo1Y_y;ia? bg4qPm$PKi}oIv=:϶G!u FM}XdQ}L'.U*gJ imbةGG?OeS3Rӧ>Q],etEI n[U :$L:_tB!FY?r!l{mlf{]5/1 "WVn=0n^젰iWHzOZWzu ݑd7a`y r+P8Ku``A"%1m5bчqa_fDCc5k@rXKx-p}ig=$-sE6 ٚFrFBv]KYz1 yXg<ݚi1<FemoYD²K_üЛX RA8ma,fQ/-;˜4G(>G "'ٷ0cϨHPa|>jMȣGڐ* L1|A$BsaܜMx0r__rPUg˞ΉeZVdOG ej9{@N\=5uf*[?Qk-V~ݢ~=1,4`VA:q Sa7:8bz\H:5y .)antlr_python_runtime-3.4/antlr3/tokens.pykoFs+ZFM[ppDD'Rqp)reBq%eofv)-Pyf퇽wỵ}t+f 6X ~gArס#*p/B.#2`(IEJNj29DyB@C! 8#fi9(L&$#B@PfiYJޥ >Ө?dO[et@$tl&S|WrR1E2J8- ā\)'ˊ&VgiYPv $g)beL h5DƋ˨h fQ)TeJl)k A}_ׅk!1.ˡ0"eCe~77BBo7!9 \p^3s?sƅKwQhx9StT{C.^ϖCvܳ1D]uBb<1j99\zp8 A+T.r'`xMIl.<\PpRpwB )!Iޛw<+?Zh? Iő' gsp|bހsa/*7Dy1R zސ;wo^&0G|s9 |21LXΧ$V9i&*p4 _O973[QĠy#t) rXB `(Me)\ Lk,#`h4a.U"d_l "jza0[>;jPwBVt'(bN>smmmgb6F~H4D"^ s2;[:<(vMYo1RW72MUE)2#/j0Ygs̫߳_p46+Hf<ؙ0VA3 hDcUcCe "{!D(KJiBh 4a$ ( 1u9J+0\"on<-on}e{ь(fL f0g972KXպdخZ+zzu#ΟX3xP\ZP}jKUd݊y-mnA& ; B ՚)`bqR:hnym켰 *+ۊcdidyFCo돹!/ٴ2e |v~Qӑ&'e._+xq#ŒWjQs2h'97@}4e]1*duush;¨\ T2G!KR" .[u3vn|ag5 sEHpbTTzo=>X f-JhZN-OI)mkӖKƀvǞ6knb>`b;"A5t^3w.+56JOԲ9`ў؜n)7ӁWɡV!$f aEaS5Mju*ȕ84vDr<߶,ҶKt'oUV[҂ݬh\r8uZ(9=J^)QQڄ#8Q:9Yge˙*9Qqvi>~GfXئމ}jؘ܋+;<9oۿiDo@(R0.*;/T*.eRundS,=L,atZPrm"LrcڲȔ2XgWzH)]*6~ʡ]zGO}Vbq(L ye(/JCȸ1YJL>'antlr_python_runtime-3.4/antlr3/tree.py}kSW˯聓B 'un \8 x|nMw w{`3L,{^{^ɟ6Βh7ʲ?]Uy6INoϿIE,7{ bml oZ$hҼt:M|^VM2uɤ!T7beyZYU7667776:/OQVlpqϛ{rXΗU~}$Q7zwO2*|'9K ^;)kuRe0]6S8 W =E%yeU^0{}$\4 %i ʪY48W]>M2f:-1αQ `Yּ޼ꤜ 1X ›L;|$>(n^̘ ~gB0&llwIYPӀ50/2bz Q*MV6 ~Eʾ'ME 3y/`7%fU&Y1.=qˡYd /eR0. T2ĺ4#I=F"2Gԩ9 FYAgß/ |>;?ypt|+<8Q㟏OOo+scVt48?>uqЀ%gLJp0_{?.%x;"ˇ|/Ϗ`?\ q68>ypx|=EPI./qZG: ˋhp2<>?<NOO``yz ?>=;FX{/?>Xr1<`9̬599c|~28 . ;^qO`f]2x< Oxj8@ilEӌ粸Q({{ WG9OQ>b|9͋5jW^ml3r*(ʥFW|/\_N`Ao~>x;88<';ͫlT^~Rٹw5—9aY:MEsA8/5mVw٬,LǙ64Bb8C\O8V?o wԻnեD;)q?~]iwY:>Cml0pMfW9g踪ʪ +, s@YT}x9x4I{$F&9Si2¾kwdfH7}լ4+~8]UȱeUwzu|M"o>~tSei!n7H 3y545Ni[6ͫ0 䉢v>cV:?rQot*;CzI(~8DI:>?a`l!ypHG 23ha" =-۷ sM Xf*\9ޓ{Hc6)/*tciIk`T{&afDwBE5{'߅.z|yƔC1^LRk؆{Q "aǗ$o1A/8| SeD GLKn\npt9-^JVEz_kQ'>.\,&96; Im—氄(gcǑ%rYIG FqgKlWVθ f~pagr3%&9pPP"2K M A`ȹ U2ɫr# 3@H̀ N0P&oQs)<ʲhl{9Y[zޫ3@q¹~ydq_pl=j|I㋬t oLG hM?ny@ $PU,SBQL۵<+9A$ߚ7t[ = g]`.{UЗ|J~>[@q$k6&dDc ТeTl|ߺ+!EQ.RC^b8f7bj=)[YH-(cAtIp;KSZ5ـEltԔC&)EKd=J@  x4!0yJ. cu(Kwh^CM yx MRb3( ԬjCoGh1YUc~wOM?Y˰anQ:At kԌ W$m&|7C䔥,&}qo.- @3~*<*Y$#MqqGx(mi*=ahFþS g7FL!Oo4묬 w;uz?MdZ `'T(4ط+ϹmͮՅb>%l43`|ԟ2cMxmSh]6]EMoY3_Y:y^fͥ UuYM9,3@Nʤ1lN;ܸ;C%f/4'벻tԢB/DX衵!~S"!J2wI7F%j]ap$;1QcjhC5’^܈^ή)|.=lQu "y}:<:QKz^>KC*d" *rFG+]J;\.YH8mQ3r d1@ C΀Nnt)2:ok8򧮂'm@.W@78+2GQ]d'6b:r̒%f]2Cӥ}f.N:L #LQrD}7@~:QF>UBvc~'bm (p at3lc_'&)5o8q顉łYxiz+ZR䳇 .ahۜN_*V|Af&%G E1N+k,JChΖH>|o%I5h3,)|}ϛ^(?ʸGqD܊n%9墙)zo3Smk{0ts³E`Q Ifg!;єަW dFH6H >=RZ|g89OУy[7Q ;!zcxFzmr8r^ٵ'Ә"X b! WI9 0=1{Xx{Uh/'v@0ŶKYmI^+A9/H1' z+KdVu]`+bZ$fHOFOR)|՞^R{9Ņ2z~[[emlmoKB)l]g(;^/p} 9FM=w,pNl8VׁoNwm&T2-n8>Wh|uZ!E7h[g/]v0*rrϸ㽄[I<K+ eoq<юrϚ8OL-63AziU%߀# Sڹ?~J?䎒 x3uO?3+z+)E_Zm?U34^u~Ѐvb )wsE=x:jl]ddqڤ}|z5Ltji<Ψziί×7&oлNicoޔȁ :qSNOgȧV<~p'De[tҡ$'hAFcT˷Xj_v\{8pΏ3+"1@IIܨr䪧T99;a҄:h%IҢ(QjTlzK׍:G i$и7>h?9=;L+3zj4f컕0Duir"֋'$-쏁 cO,ɚÅ6%1EdJTVy}۲A[ɛ;~ ruUi.%}Z7Jk'SӅo.tĸwYR -ψMzzvez"f]Ec>1 &X3ak; cͪbLOKy5NOBfUo\9҉@W q v-#pgɺe/osBw]xțM6]nN:#Aw^ }[5KMnVA?aJV OXG 6_%m73F\؀ѳv؛ѹ=+6Df-+u\^H8et?n/Ǐ%{Ŵf mOKa5UrO[ƗN-PNɣ)]O QTov՜{,i9z5x.qűg8_29*Urr3pT<ݩv" H0=ݮ t$ SFI԰}$:Fّd:}{- $CrT'T:S-o?ZFϖI'L''& tV%L_1 %GU;:I[J~3ˆweڷJƳo'c$ՏqάM8YH%eB9'71viFli/XKԐ3{"Sm.bl:m^%X4rׂ*X 1Ny:xTA- 0:([T /f|mʞX[m9s"I+ȶԺ!C:M3*^c]%X2DnfUKKA[VL\ -h\WI4e=kH,Pqe{5;->-I^,< :RڅZmS6݄"uA+7WnQR%)X{o8ߌ.-;DX'De5N5w$Dbn.9ܷL0g[}{bگ|'M$r=}oyɟ (Tk[3JFCm+VQ,s^ 4krZmwyg1t>&f[C[`rX.TIdJ*-rg3'q jp4Q%)$_RBW(&LfXP^z7Ii% K !64KS U6kR&AsDBuf i.5u\ ˄Vhx))>$Hu@Pbgq_;*'NYJQ:6Ԛ!V. R#FȌ`AXգF1k㍖o' ?UaLź,uISȿ /I)Lc~$3, z#!-ы YEBwkaGkǩѷE`ʦɺ̶X ++<4{̙sg#Q>^) uWrH)êeŠF`]I#+jaT7}_sOk~50{jޗ߼Y{K$v+Un~*ՙ42˨YchVMhMdkI?YTԍX'`svQeGqOH(ٰ̆%u{61l(DY:5kT{mu7tm]ӹc}%BLCLk)Am[l'1B4b`GFaS7ee*9W3~B7+UX8FrSv^i/2h- s=qpt. TX,a} d,u4 9ELo1K&lr,)&[ǔxdl̀EfɷeXt|ԓGaR<9Ɣ![:r]v;hxHwCX:s@̼DSďUDXDw?8o{ŝW6QHLi5[Oȁ"d$P:4dq" \۫")6|¥\hktu=q% ŭ >E.Hk/w0~ʢ0!]Ism.~s20nx Q졟5LPaܻ\tdoI*pd7A("KՖ) UŒ]kE(*Kn|4\{ߵKh)%; +1r/={㰤}zlf*hMDX)j9IBs,)T:@4ZrJʋa_++_kXa\ Am5!tT՟@96SM\dN vsPq|۽md-4*^aӕ!}D[K6\c{٭Ccz_.?r xWO)3/c ֱddn![!v= HGns㰖$fGeY_6W$Rm?93,$/N*^♙,Th6Vo$/!Ӽϟk"Ztfu-HsU?ZB ?vhSg5=& nuz/z|vOYã"h8!1flТb!RfVAd{&xfRKmVͱE^zK%JTR0mOꧣl}otnnٷӃu+` XG{~P[?c)% 0_Km &FV~[9w,ZwAvh>I \DeG eA"w|Epn"ufゝEs_Zq\Fd1WLg'ZXr(+ 3n[>ab/slD HMBl_E˨"xH;Ok) q]<|Ӕ ]R/*"?DʲŴammb Kc1=+k%T(obP"xqT2>Fv{Qjr@Q:*R+aU:ouHl v|(#Dg(&s|orM+ּJ#)~,t?J=V5;Nϔb -2]aB1mJ7 6^ɁFiT171o7͹0*CPJ* [a},r2%'-~S-PhGJaո)"r@Ez_mţܿHKv֚e9X4سK5&DMVhDᏸgY]{W8Gvo,~;k]-g|G}4)Q0u s̸CLRXܸ Ke:R⻬?vGI9C[[ *;C; y:Jǥ1w0璯a<&mzM9v>7`#UrZ-nN9FYppc2H: $WKwF~%9sآ|1)mSZNDb*˕&ی}XM*RDXꒂ'gJ;UpYWex6~rOTu~i Ж~HL 4sQ]tܯ2_=8UQzu? ķEP,-F+39NrḶʆR;_&1*M=rnO=ifANmoc|5<ʧl";Gݣ <O8Z\&b ~),[ʷO;4F zyCOtFRhJH \-fsX/zZLj:}hܡdNΗXJN }6aۼ(F͒b>֗O>(BR *,'RH)M\f++]G*lۼ5XRz^4x#^IDtG«J@uqw^,ȥ)Øo#hWN6g W"eQaѐ~swСtԔ0MzR(kL(R\*K7C,D[L9^+Q. E P;$SQ7Og]B!.Y, RoC nC]NI:2Wy"]tj;]E_a2탆Yڳ{Xr}cw<"x0t8 v:_7/('ØO6;S1x5Asj!lk-Š9шjE^„w&IMd>HLi diP WʬD7Uek|/7YZZ7]мRT+bPT:u^O޼4RG:8TI s<>6dW3)J~8<*$#L KQt=On_01 !4 cM+N&o -"Eex,Rgy?ق Չλ6ސT̉ 4givdD^qWy,[řIfOAOx^:n;1ݶ$z&d[z,[Ʋwr2(@樽w*w.ȵxQ ygTJRSuޥrw"i'40 q."Ј&Q)`@ނ3fh>()&:(.^;Qz%\pD,x58IE3 cu7}_GB9>e{Lv@w 8N'7,oxC\vMr׭e9G3Ixc $xh=)q^Dr ܦT`(^q`ںXM6뗋?jӢcSbqwB5S -a5P Φޱ8qܝbd?{pVw?ȔO1tM b $Z > VP^)2& gU҆:/LRxH;n.pI-Vn+s^|+z4I 7epGQU N&˳P%<Ń#GSEFdgC:>m(NC*܁+O$h}f"m#\=+≊Hv2q֞izljHHg2<47 Gh0Hgc mmUvmpfXB[B4Ke"mD!x5YP>k^cذ]apU/j "0SZvhCNmlz}h}p-;Fd@٨. Ys 2OUqB巧@NۄMe,*yu'*"ޫYj؅r3 {y]) 6*#kn3)$̗nėUÅxn!˛1,'yBW(e]5,Bi]͟0 q!%']GjQD.啯ԑ{oZpZj.}=g>f22I GJi{3O=%>p1ݕqF>Ĝ?A:8[{HǸstk`TD^f e3&SK~3tVhLÏn-oӮ.Y[ODŽG1d4qprA/LOfY GuA)ޢ $ӤQK5Rwo*XF-2D2;EMvWR쩼LMY;fvIW򐩠!7&NYP`Spӣ 頷:˥Wpk)ni"]h*7E B~N@zRM NȊ{b͔0b^ⱪE$XNElImfҳ驵'~|4G@ݞk"z5'B??1؆0sMk}(XS ՕsP"ݕ#U|ĎX qR%E5/-;O[ 'JL)3fO*: 0#]bR0CN˾uQvh=8(-;]Ӽs,vEj4;¬%XIK>dХ&Vpn5}@3X̙;¹jlbepмJ7 MD-ΣjE<.ө' f1#@@~m6T/âtgjkM?ݕn2:?5fC4Z<_C߽|sVaH|%xK@d}C˚ P:*õ}Ql@LVsHbm %4eS6U<ޛ*U~:yn~mq,>!RI+W5VRKn}`T Rf:%KnsyYG sTh|7mTvDW߀ ɞEeAuUŕu3I}R\+MEVO ssbHGa9{!c 7oehMlEӿ&tBTx[- NR:\Ϙƒk*Äk5{GN1k!.}n'#KN@%HJ"X.x˳:!*3ьJ4VXb Q!cA'wܝ0! YO F>]4Oze؟ԦMyTtly?>ƈwnVYP97 g@( q\,5GL r.jZoV>e.4iUW'zt\KP ~V*$ikJrBᥘ"W;F*NIJ @An]3S6d~Q W:`yle.4oI&Hyt& ӒOeUCTҤ}71*NJnbRD|/ѡrhf"v|HORh@)>m۴Jl95J_@N~-{ԴHcs8=eÒhyd ~}-6Ƒrô !kd(,kI|"y i CSv:'Gc vu,;]6`CzÄdġ 3s+y{](HL br7v0WFd dX~Rg9R7ʿa2{,3 AN0˥/)):7exKqi\]8V(`>bQxeCA r6\t.fc#΀XڢcK K-I/X#Lht^Bu48G`U0T4[9MzID͍Tt ޶cM#13AԵ6xF-+%( q:Q[t䯋l lMv I`lb u3ߑj}O<(] X/F/z8U$ppiR`qS$lA'`ί1]yǫoI<ְg ǍW)&BJFe1w\<OnI0Sx/(i!/;klbtG@{e@עH]%ثR!&8yRyJ&+=q#3tT٣D2ɢP A9i)A4>5ܬ-&W axmODֽNMU,R0P4^uN&"qZ۲SԑQNs)P츔 d/D#>TWi)0Jϫ)52aNi"2ƹE3:)2𾵗E##?`#rکZ?9c3]_W!M ߈ m5uҎmId 6J_WNɖi@EIA! =`U#ʳ,v:LrN&wNByJ{%<@JЈ[P"CDp؟ O+Q AyJ"εLjꊮ[Kbݠ 8+*%-G"+=ᚖo1H`MvZ]͋Z#GrwOԉP}ޞ*[YD-I{QeUQ`j%#M1 \˾ eGőq7?G-antlr_python_runtime-3.4/antlr3/treewizard.py;wȑ?< Ǟl67 bp@wӆh-$VL._UuKxwsܼ]K]UM^g]~ġޅ?0cw)l3Y_x8c`xV$v=7ި',4<;q"M6"WGC;w~>37Zy|?|tSsM|A|QXw0/KsxK_Gg'źWh^78'fp9&6r`"2b[6UɕÆ# 0g&ekt.qtO|=Omg5]v;vj˫h}{t B+>ZCMλщtOspP@@ɠ&Vj7N_dE̚>* $ݻ[H<:8sXl4`{l0BQdN6$ @&6:x|uأaA@gI!2LFOeAns Q|3P&9:Y+Zg,|?BGĚ;]@Ya+~e|ڭFܹ6?HIKB?f }5~jˈUFC:-ޞA\qUw! 5 T|N30cvPa_jC1 FZO GCu\I'1Q=ƷHxvJN+ЌќԞ @ ECNXUV N z.~lh(+1BRX邇&Pc8W_l}+Haڞ4ZH|hơ [8#JK?f`z-`6kϐ~ƍB&vDxi+deZV]+v.0eI5Kai֫QCۅͫH ѫS݅jw!i=v;vqH*,~6>W0P`2]HSP X@~yI_8.3B@a8G?}*̯YN ?ۖ%uÍ*r%sIҳ %q}62-rAgD*B oէz*$4ԝ<CHSCQc BV(o rƒɕF "UrS? YPe (l~KD[+.ttAKF@[)B#"vژZ. &;QL~Cz&NB[uI a+%Sr/pk  L$h$ VyǥGIJ*WP])bةf>}ʨ ^)# Ytv 5j*Y}. 8z֦ioznP<%PKe[퐈j~W9~}hrͺ,ϘK%#z6 <cu&BL'̛ 9uj)PEyEUKb ;X5GC$ ν`3][+T0Xݵ}$ō\NEhi\<BǏW.y}Wa|S?^XXp=KH^rVV`zŅ^keQB62Pt U=%ƛ{/xmqavU ecYh|԰1[zca%;8ѝ%H*tڍ*lվXv9$7 $X?GѽVJ`K0v?XT5c@~7MVÒEo2x5idBC dN#:ol)O@&'"jlxyȧCҬwݑZA] U !N/ Im(\Nw2A G(Vsu>vc wxρC7H"EciPJdi rJl:1`mCaIj5 4鵒c]?sVQ*_v F'Rċ`u jY#rY:G kPJiXqg%%Ji`C`[@|EQ4Q91)E8bg8w)wT#n/C|^",5=7b[]ې[UǗ*.-+' Ub@CWGi+3ChnWeoGs9?(ؐ*U̳gXz+aQS.E }c6agH^9!_.1xB^bƏQ.2jRt:a=HI?asR|dL}FK[>~3\My]s`aAwn8&)tb D{y9{r8Y ~>_H9"ۮ"1O,C%x): ZB=D:vj8xTu4+=:>QhSg+qE5d! JrC5zhJRxR%8 ĤxݦiE(W1r_1h0 Zwy) `{1S/26&֢޴OJXPQ. v  5׊uk::]/濕b:ʲ[Cmh/>T?]]SuZI`PhN .}' r 4\7k-:`-ŸJQS'=@Vp 55M[1R \7sx2!Кf?U9Ј47d¥ND/i,< `Z$J\^jyj I^6[PvGڬ ԴJQzּErJ.`GBgЧǮ-KgVfU4pI#ɻP=PꦈtOC$$)RjFFT=( o3夝i-(KmG,]J :0Lq%~* W e-V-o3l,šc5D7U;63|*:n4^nt|,mWuf8ٱ/,$zn\_8En0SKŠ0#>djMk!}<]a=/θ7h&ԤX{lS]G{aH!1e,{'6Tգ\ \g4 xb 6D~HEnWSvBt"DDUt0V 0˩LZpFRT$iRg3<.'2-#7ڕT!ٔ:Xr 3 n]l譤Hf0x.+pi}qeɯ- 'KZ5 2@ۥ\TKC-] Jʢ>zWD HsA+*j ŊJH*T8gSԭԼ͠&ȝXGRYʼAWV* %]%N%.b2 ZU}4Z[?>_rKO44hv'lx7!w(WrbԂi,ƣ^KKvDP,dvt%اFs/߭ק2'Ta lfsfK(Ō  gfBQ5eBiR řbHv72 {SW;Fw@yT;90vFh:GwcH ,|8<p "S* GCus0ZzKjxhPʈŊ0Zy@%X ɛtܠ$S]!Q7Bh<$xxリiI]$Ygw4W(hHt +0"qvImšlAÃ"=Ҧh]%p|^L)G=/!):c#Tks S)="ޘbJAcY;Vh PhpM×mND61Fa9u".H(SᎊLV?'GCs4m,@ϥ} y5xۘglu'DO|\n&WC~3lv shm?(^ u%)j]7m.R:FWHDK8% ر_7%`#t(+ƎF PK>y'A?antlr_python_runtime-3.4/antlr_python_runtime.egg-info/PKG-INFOuN0 y ?MAz@mp6iš=٨vy%`A߫ = =g"6 Γrȋ_vߑa[zlH`|g}֑\mv~л#!B.70pz-Ժ%U˜\wBq}6;卜vE߫Ɔ U֍X5 2N0g/ ($kYUT=JKY?PK>–Bantlr_python_runtime-3.4/antlr_python_runtime.egg-info/SOURCES.txtAn0E a "jڨ%bX[n2#eOZ6aOɏVRIb'!CR#3?aYku{ @;&dJ \r0) TS j?#b!kWH'|_zXoO2Kantlr_python_runtime-3.4/antlr_python_runtime.egg-info/dependency_links.txtPK>: Dantlr_python_runtime-3.4/antlr_python_runtime.egg-info/top_level.txtK+)2PKRy>w*Ex antlr_python_runtime-3.4/AUTHORSPKRy>{2 antlr_python_runtime-3.4/LICENSEPKRy>J'%$ antlr_python_runtime-3.4/MANIFEST.inPKRy>y  [antlr_python_runtime-3.4/READMEPKRy>}jC0 b!$ 3 antlr_python_runtime-3.4/ez_setup.pyPK۶>lfx#! antlr_python_runtime-3.4/setup.pyPK>z!antlr_python_runtime-3.4/PKG-INFOPK>DV2;"!antlr_python_runtime-3.4/setup.cfgPK>[׬+ !antlr_python_runtime-3.4/antlr3/__init__.pyPKRy>]) *antlr_python_runtime-3.4/antlr3/compat.pyPKRy>^?, .antlr_python_runtime-3.4/antlr3/constants.pyPK鱳>]`+$( .3antlr_python_runtime-3.4/antlr3/debug.pyPKRy>U76 & Wantlr_python_runtime-3.4/antlr3/dfa.pyPKRy>Q"O D- bantlr_python_runtime-3.4/antlr3/dottreegen.pyPKRy>ZVͽ1- lantlr_python_runtime-3.4/antlr3/exceptions.pyPKRy>y) {antlr_python_runtime-3.4/antlr3/extras.pyPKj>81#' antlr_python_runtime-3.4/antlr3/main.pyPKK^>碌7. antlr_python_runtime-3.4/antlr3/recognizers.pyPK&q>Q.V^.Ű* ,antlr_python_runtime-3.4/antlr3/streams.pyPKy>y .) antlr_python_runtime-3.4/antlr3/tokens.pyPKx>L>' antlr_python_runtime-3.4/antlr3/tree.pyPKV>7?G- Hantlr_python_runtime-3.4/antlr3/treewizard.pyPK>y'A?,]antlr_python_runtime-3.4/antlr_python_runtime.egg-info/PKG-INFOPK>–B^antlr_python_runtime-3.4/antlr_python_runtime.egg-info/SOURCES.txtPK>2K_antlr_python_runtime-3.4/antlr_python_runtime.egg-info/dependency_links.txtPK>: D `antlr_python_runtime-3.4/antlr_python_runtime.egg-info/top_level.txtPK `python-antlr3-3.5.2/dist/antlr_python_runtime-3.4.tar.gz0000644000175000017500000022510312653072152021633 0ustar zigozigoلMantlr_python_runtime-3.4.tar\{sFO1%ו)/ e"BT:Yԑ\N;""!~DݦJd3ǐ09/8:O( fծ}yf=~ovvvwmmmL&B<]T4}u7?? ;?w_uykn[7֓Ɔv.WrֹE蜌0gtby82Da(|>LC*m`|)dD|"OH8F*IT"34^, JqWgJJb72ԥG0?]"~9yYG%Ǚ QDPˁi'cbW"#÷+o'#'(S$,qFv~ =C_Q I]`3"+əJI Tڢ)vh$̦ T)1`B$OCFYZ%ׁ/ hW<1/'I|'#֍!C;)&ZKU3[TXĚpW#+[S*nҰ`!P7I $\W01CW{߰X-< Y@i}B&fb_+^d7J^VM Avd5 Հ؞)6=1[8F*c_'I[My| GV1[cJ2Ml&iL. mXp:i"f+li0.RƒA.OX޶rbi$|A:$R$ЛΐLfV>g7jKkrUq,бCĚ8hTvl8KWܻ+ |-3ʮ@ish*呺;1GP@ħ PMպơzI- ,uЋ_o??fLE6h<>!)f290שLCΔ*S'xeO$_"q5N 9^M~D ݎt.)͸Cdlت.(jgZoT&z+,պB%a8kH{ھ^nhm9CGP?7]$}c#$^"ҏ7֛WbN8:H \+ua"Sӛ+ЍYѓ 8YO"<} Hy庢\Mb d40?О9l_])% bmu<8(ÔX0 jk T8:Ax` Q|~JL Հ\ { EhN#2r)N7-&]W Xt$( _Hs{7:!|Gxg0ώ;qz68A)z%{'*z{'#19>9:聜qOCG]o;84@q[ O{};^O%OIStUcUɒ>2/Lo򻣳9qK8/h` WO2U ;Qnb~(I٥n 16ak6gp#k?ooݻfݟ'[)|JR)=t ^mkljmӘ&GJ1yk2 +rvIM'O@cQuc|)Hi^#z;eH ]巁K1֍UQqnsv<:s;[(xCq$C1"SP\>z] 7Pؤ7jFE~-*`z?ɩv缇,TVO%'bc 7h9sPK sv ;xvmOwsum[O>E>u߻oo흧פkro9$%,N+Չ#f?V#e쑲GM113GM-1$74_Ldױ(mm{o{Iņff*Z5K?GR1[Bk*NISL/_^7Vw嚄8iVbBoCfn - Ҁ+c*jHa_ l:4jYæ[ȲK_.yDa;յ-QmQ5I,!tA&1;TeL1\>SiiJ.kER}O:z`QI# 1 nD3O! ʳ")B0"Ml$ .S 2M˰~Tij᯳RxeeJ,hޢoF2 CEh5mkJѸDڝ}C.KX ~=Wu:+|<ԝ:` ,x0~e۱RODwu Ym*kbz[7mwVxPcʅlJϢ[ P#AQ+njHlD9UkkFL\0GpL(d~kqh' _wlTcU;!w@ :jo405kt%Ɯ}AIyBbR.-,yQk۶_sw{_W=! M::Ya~d9M-Kw_W٨[]oVoւ]!nլ4CVVBys~D&cґDb#dcyv8TI@ ͛yCUZ@ /fKVPs nn_V*u4i:aelbUZ|яuVuwDk3F2TzF&AK캇\A-}ڞJnw@cZ@5ts%-U{-9`q0nR#w)L1RH&rl V|z&&Yݯc!47@駴.HMn} 39r,̬z,7` գdBoY X.b/)]]UJmJj%5=(TJM%X=ׯ8]nZ3#eb`s:-N22Oj%)B2J;J[Mx^ %^tr,8ςHѹZj`)qYM[nWc?77OPyyS^@G3dO⑒`0!fOeZ/tt@צ/a$Az]},Q1%hйV-cyG"4&ቅ׀F9xLSÌM E$:AI[K.jV!<+0p*hh(_^e@W:kdVi1}ֲo-Q6sU@Azbp--;uw=* $ Bk*Ʀ7<.ǟuYDa^N;M'|VblwQQo[:1sK`Vs=]Qa_ K>kSUwv0qJUjӁK?9S#$ *u`Mtjڵ#;^/$;a!#5|8yVMt#|R- K"YJح\D=cU֤QhBP;|I10}yR (2_#QR,dsٯ b`H&/6R=je\~wnUu[`gfc.Nd55 kIxwPVh8 Ougt9z%(2Ij:N_FM$,{SݢMqzDO%Tp!BnUr1"m#r?:'& }c JC|z@އr\_!+>i%ʵٵ_t| (uJke[bɆr:''`>߭`&ʆpfpI`‘*Pgbߛ,hP \锌Xvl*"%x< O;iS?귑Xn"&/Amv yjrNz&N8;+Ic#HeG ]ٓnE1ǘNIt9(<f|kf<0%7*&tz.e;y,%AE)r~1ga\22 |3fN=5yQ1TɅu1/X2fthdZ}l ͙3yc`UBE;YnlCwS"&!Tfz\य़G&.-.1LEc!ϰaaC^XblysV;IS1+DcCa8e6T! Ehv\[*sYhGGfdθl30ўmy9%<<:-m]= Aom עKy* .ﰚv?r ~gqf.h|T=O{j.zIKћz|[Ә(ѓPVͿUJ=6QoݭJ[:ͷ]GuK4K'h2F!l IvmQ%k >QC :* (DB LƶeX)l-dh 3$C ("M  |b.t! %F5hxlG !ȋ9u"]5Ԃ1w̞Vkzt0̊%3OQNFctRdp$@ϝd`93P4Pgɍ-npΒZ"ip:K%}Ijst1,vYL$ze?hEg'dq>B*U_!brA| t)ǃϛ)Z 9 b߳ sWyW;.I{B81z4 1Hܚ\{c z8&hPVxDĝH t ϼɥ聆 O+U؁ gSUOs[ϵTcшz{80(w|FwIOj?LeWA-jp &B.׿z?Ȼf&uq$WFAd Ĩ)܅D ɹﰹMy+Pޘp^^J:.ŀ6.ȭ-OZ`VemO2H0Ia12.N;pn?z9L_lsPA'aFn6]i 9.HMN nt؊ؗ?j;ģJ ~=h+O׷WEcLY>4Y~2D:5~99& 9a%D'iE$+ћ?'u]SmRUk(}oί /[^{6n_a?Fq5 fɥ/u q|q&}@4iG 1Ur!h`)ol+EW/#\cSܚ>usoHS-\[T 8~)GP]d.hpؽxי+2bK9>8Dar枡Gb2RB0.XS.S»)̳{qׇG _;͉rP+sqZM\ai dl!+fʀTd" Rze yÊc9ho (Y 6< Sa'Q1;VgMB1eY墇tZ'@^U_7Xo' @M1(, ;A+Ni/wp2JQne@:(aZAo1xpyծk lj9>?WNz'ǽNY:n ukq S[Oc#5vIU;eZQw\*U_aqU"kS\ Ugx`>׬p% pyWj7W~/| n._ Qh> eu/.=(U]qa0-yL-xqw ?ջKpuOkz 8XZKo1 dIi[?mTi ܫi{RޟfXrg @44 HlQ3'h|Qi *DdGl">i δ"i.20+R,[o6Ve5cQ*h7UҺ7k m}se䭷zXX,+y~kk?g6ܴI{j1|==%) 65&bqɂv{8UQ@Oo_VN*hdsRx6Aw\Rńۋ\I>Ar */Dmz5A~n=lKLrɓMdx'iڽOv6,ߥ+8;6ggRy;lҳc5*9{^M[AS?NG_ #d{٢vN?@{V7~%ᅑJK_ii4a8D \ yQQ;OnϺI C%6_~2w*^åաe]ͨʃCw;"n5f0د0ػ4X lu$~;8i, dhN XXIv{zJ2Wѓ KwWQy<| 2A Ko޹)Bܴʍ=yI5ڡ6/CYTydڒTgaQ2(*,{J !./ztnfIB{qZbCoC:E t~#BC4j DVa|f%ȏ c@&~`;؆mWfʞʬTC\MqMNgp?~$~}lbo77ܠoͯz_} o|m|cE}E%/ `0:5F4/y7tLj8ދ&|fӛk ѓg gG`$ҨK%:1qA@ b #NMDXr8tEO~əy*hz1EGbΌ%GC!60L:<#L>9#9ǰ3N< N 8O3Fp~eREǰOB2 sGrAH"= .%LsXH=CK@Sa\vhE.~߻tzV΋k\PO!n*31m<*hlc8:>|9yϯhdQs _E;{/QOz4z/A+GpN??Љ^ Ac5^~ :|P/%f'zs4EO^Ha{Fzǝ}==΋3v8pjH;n9v{}ΫzQ˟>4({r{98'^6:oEHq3ءΡ <3:9jD? $q{D0-liAD?؃Ϗ|0xpǠ=Psz?Clq ?58zB5~EO}<}ܗAQϻ|ѠS"S8[Y'`Mx8'#frT?@8P T"UI\+أEC*qKwJ9ˊُ[x'*!pzĭsdq@RDvtt!eP5ٍ@4iyi~VOs(Ѕsgn@]50PAbP8c&QH4p 9F&L{ MFH鶡Ґ-s)Q\ PÏsc| hYØ"nA5LD*TTKfc`U-Nƙ @F6^UBG) |WԲ&y8}b>y8b{zE{wjDuB7}8b`J^6@;Ą׹UWP/68Y(iQ tkyWՎv4_~'mB>K-30NMv|; fq.5Qc*¡E胨wM*vOpcb oIK jx?_(哫`VkYǹ4k,=6يgq3qI!xy+AwH;xbU,hkͥ EN-@ .Nf~su՛gn#=Ph5QG9鷭S >-'!y'm+/+\(S>=wϡa!3 % M&r@̇\l r@LRN5ǨiNYsbDFʭIFS['_뛍Y9y7ra<޶&8HO1sc;'3xB V9Hv=Ѧ1l>f":j(Tςantl6c sTΚQUOFM3UJbbe3sa87AЬ. ҳ+K! 1eRnkr[5V`,ujmmC!I2#_%W 9Zc3=v֤oCAI| >y745rDpK^DZ_2GIbKSS\g#Q$߿&o$9╀F2lZ?l7>?+0z+?Oq_[s٬a4H3h^9Bz[e#ŋ0ns @ρ>"" +o{K!lƈT# (sJzgxQgY/wACEjC΍ ]8?BAu#l#@N_!$A iKAkT-LԳ .v1] l D7C||-ʻ;FD]Nhav5G)/‹]&xMo k!k3(N":)6qh8>K0? kfSt׎@Ha|1~k*RmPT#d&*N CTRw9J^/{bm(*2`&ZHo[a'ٖUmVr4=`Lˏv%7=3;ÃSݢQ iGX UDʛsgjQR:6YAtG+DntBFq&ͯ`i6σ]-4v?@Zc*z,s?, GŔeA[Vߛ ;Vg`U i5s7;^횥zq󷥇(DqY:r9#g~d6d3qM/\yg Ik&V3Bcj:ݒE`JL*`ugL, N|k0Z5isp͉r WeDX 5oB$aIUs"7jNZWC8 A459{WNV;h%/|5t]@Oɬ9Fgʼn-.Sn|4nnl|.7xp<^|v~v~v~v~v^怗\RsF40No^LqtNo,(.ЫTX+wzT[Sy l݌B,Epu*w˥ϯS.hTx^[RVR9TU)"zq)Möǰl,f㮒x !F ̋FeFj9͓Hr2d(8[b8Hn #Zn"Hr!^`yPOkA ZZ4pgvT1,mVdEBtMYjE/>ԛ[ZJbOQ;T/w~}&bGK%qj.ئ=ya vCy:<1*A` f5 oE9) (n ?&A@oIYpOku=AmSQ^ ?L AsP;GqMv̲Ps1&Uy6*/l^-0:61!wyl7K'o?~V PJ):OOa2+94ṅޙk_jΙgm'`O gUW˄[n'lW%}4flͣJ*0zs!.Nڄf_G-8{9ah 4PvichN<6s,%umCF >Qwi6Tn,#@kZq-n6:XP(S޶342V2 :Q ]FFth@s/+(N4>@eʝ ".q':}Q?:VʗV@6YpX,vU*IPo|GfBKy=B %7S=ʝ` g E1]Xw*d7* W' `@@dnuUT f/e|OP_/gc 뿼G=5!J˷5V>ykf+oeӿ=ųMCzV, l:`!f]-CaAJSf4tMGg2W?Ե[FLG#G|nN1;9 ~^ȩ^jawT(^% l<͠O~s$IH>?l?'^_wPýACP%YSgۗ/Đ Pe+ } /l~ <} ?>,v}>]Ůb',Mn~̦);2W|EQe:T:>=8Cθ4{5kId7]~1%O]u*1&e9QLd%QMfo偑cߐ.1G0zhxV3}3a-R0&٩~A6}p9wE`-{1ճ.?9w>c!?z %5YsYpZ‚AL.)SˁynJNQl!{Ʈd}VW/ ñr)5{^(dڒwp-Na4Y}PקsYpm;|),8|Z,J˪"dj:yqt=,rF9GV>Ԭ*]m S7W\)qJmdM57ooGkk+ZImi{u |PjنC L:mpr5IX5cPƮMf*2l>ns1@ ˤZdKDv nk_OCsz)z.V5[^?w05%Yު}:$Zkj" yrsc8M$7y <ӕmZ3T׼Owg+1_}hoa k`ݪj;4(dEUZ*h`R>+잰nEwIx7jEkNV$d*R:I9;3ng߃]~zp k6]`@ݶ#KuDћ"D>,' cdW^]I|L5ͯF }T gj,*QT_q 3QgsƸHಂmڲx\' =U RxJ5|j-ۑkUipyij^[nX 5lwx&[FH>M$K$-Aެ;R nϓ^&> o.˜UԅNINonp[a(`&uMkAa%ib-X;Ȗ /9nJ-#K~Η `ZK.e_.I 6zͷr>}->>9s_>}rW:QxeA0a?'4=}˅Dz/wNkzE~q_v_Y+[Y|AǦ&<eR \ڳJsCS 1V|dsM[=d_Tcq֥4*DXo X @2oHէw c,wD_tlnʙ.nhْ0%,ab(ѫޫӣ˝>ʬ@@\6=(%[bnwHHrq-ʊh\8n@H%p+`A7ljZ ETYEz#kI?]W0&_rFZ ,"ic>wi>rs"hh'-(T,iY R."1>cD`EA UgE2 <{nfmkUMr2 rƫΎiZ4Ƽ|J\2u,/K4E4בW8'ϐ8I6(!s& ,%m@w4a` #]hUr^*C~'.q߫a^p2^Sylɰ؎v&E=T}\/jp 27o@f f:."#xlSg MBcjbVjn'&=Ee%37jT+4&h㥖~nyf<ϩINb+ #޶@Awɀ|F-8z2dA3>'<%wk҂o2U}۽jD)UflTo^%J7:n߮IV}[r4SήWJta:{Ԍ_+{LؔrBF"/.lbgi.|duq|v-ֳYJ9d}5!hܮP!ZA3pR⒍6x $~cfhe ?Gdy\-Mhq%9yY0QFg#W|r}7I'3gK 8"I"v`]x:y9? VJԵ3 =?OhF6.~~̌Xpډ <( <5 ’AȞDO3(5{tp ,&@g;kHz uBڢ9Q=]~1g|0)ׂ"bdtk89m⎴㦊s# ߁FdjGaB(ܪ%aݤRROSj٠a^}ض &2GȔ&Q0pno_́ 3wsQrI]|c5dVn>Asމ_wRG7O4W /[-T{c#\ս䢣5Mvb{|anE=vFdc8JX8 -wZgB`Ț-v.xܞċʤ&TD6(o s ˻luY8rhf_)Z][vݧr]x6s 'Y R ўYqJ@j :rϢշ׹i7R\Z\r 6EϏ`7XKÉ4Sbeұ\ Z!+Z&@h9(lT/I'dՀL9HS(Β:QϲC)*)/هAH*\~xQ¶ FJ oNHY_kW|C"WR*mjMsO'것ӧ yf*Ңs"}8zpDŸzoa)hj--~#t?-c%3sDoDv"-O^:&5lIa&D6%7 jW`(X5&{A(PӔ]cRSmhTs6_MsGSŁPJ8`1A{C| 'ؘT*<3"sm7#\d~ïP)haa:}̕3klk^o :s[竈t7Ybty3%3&j4bz7;݌ >@85AkaO:YuLH'Č:\gQ/89E"`\K[H lD٪xN'E^0Vo?.LR)/3j#aα~!~ ;yzK"Fz]Ӝ͍Ah##F%u]ʟhC2.;PSqdȤ>=Q|@>Ϝ$dTH֠Ne]c^6y*0tO-F ]P2瑵{1k.uC&G #Xed''lyPvl#i;eQ`\+T rkڝD{h*i,)D}5IOf5'- `g4-`W.3[$moyiϏ>Щ.-8 lr,+T'rGUx?:`_UaCа2gd削ehD#M/L mEi72Yxhޟv^5.su &.qgLSMf]^dj U6I›e9zAwsn,1I_Ѕn!};tƨQYV'%,~pwo6R$pV Y_jbpv#=zN:/jwqm@3:]GϢ7kYV=Zkѧm Fk2!|&ܖ-\>vT{xAhen!gAf؏)g3"l#1ng4nc<$ 3> mңC|L=?L.]`7o Җ?P4a௡p2DȾ:*fOkg?CFKHͽ_I[,OaqUpS?xUVQ?UOC/"ʙ51e> deC ]vU2cQ gPӎ%LN@j-t=7!"Nv?'&:c$N;:hnwLڷz`x dp7 F(khI]sHKk#baHյQ;z4f,u;-9g`v`DpI_QaÌ>) ߱r-'^8UWom:T?2; 0hEHvXR..}P)H.0VS&od)Kr*BZpy8b۩@o0h4Aum 锄.sT=類,K,9'8SA2Ean@unsŐϵt  t1/mF3NbAkEP"}Hpu{vORi *tR1v"IklNSJ)JAӄ b,.Qqt\CXJM~=Pʛsӏ~Ngťwcq&SM<0h/tTE1}1q7^ua|wx~}:xҥv#d'/,'Ɉ"CkHa !`Pαg=1]D?]Ϩؒ@\hB̶N % r.ܽ6x#gdc[-3F%M2ceBBisa$Y{o@99~e<^tdEs|bO_nE@:A](:XԌ(>#5Ab4U9xҠ=fޔkI0tv{(\ޒtpK8\2vߗhy;w|$j>m ˵]+!F/Ks~o^c'I.tÛ߃\ސn,HLd`,"^|]ҹQlzLbLĞ FirVmؔ̕ Q[/q J%Ef_r<6Om'& hH"2ZboJ eE˯D>tX|i#ឡ{[Vi%PQSm.O߷Kq8ڪW+&}dzW)t}Vxsb&&4H&i\Z-qRQU9'*l3v =eums\PQ {݉2OU [ˮc*GST "`ʖaL6D`_QG;#X;L[]螽f5p_J׻{F $"(\RZyh6ahAHesB'@CBa[ 舜,φEWcHѿkqF$ȸJ5CY+;k:7VXqM8rTB -}2n+jýC énPh8`W0(,Is< %b`KMohzكZ]hW:QX*Yb֪Z+U*& )1\${ـ3,~[mr=p=8n ,~k,p+"\r :A1lu,ld2C5m -}\J!=Ò<|% !8IPuC#EBL0 PZ"s2DNNDZY4[ͧ]y2[1SbY쥚ht1yѫ,d| ":4N7`hUZG812xyC9,I8#9βp0t`hh lKl+M>wN`\p̋a&x׃Qcu!o>ԯцRzA8>.%ÄTkѵ3go1Aދ?F-4 k3}Լ2Ds~6ϋ glk.duڈwKƂcԊ++RTnϋ}`HLRI-bCLvÛ/!{w yj Ȟ,̉[ 4g` 9'^]]]^^vLl*SxvA5{Sqox6!@ogxԵɉDZpfc5/sd`˸h7f^5\2~b)!ܥ5i1m~ F=?OJFtpS[];*%9 < tC֎cM_pЧx어Xy^ȂL1Zk3kqګv4d2j@+(Z#`}h1m< C~!"B6\7!Iuch>.|1LB~y)GUg:$w[@l̋AD#*ݜh2^,<_3OB +.4Հ5m xZYBjD '>K!^8$!҆W ]ף^ud6E 'l<@Ar'6RH[P+|4`1Y`o8=E_T jnqA1 ȅBBYQ&蹁ĥLU6R0o2K2 3J^1ʴf ] NJ782- 9cj_?9sK{s!r|m16똭KH[=6> ̎_Ekw9S)f"(s_^Xn.&`lz.6 cBJTcЅi;53G(< KGU!5C/415<TU0OnU-**gғ~L66gǷߣu0p F ucHlՔ%vVU4opU9dT|qO.$m{s?8q࠷_9)x).FDU-p/x v*Eݶq} m]@"%dNtԷ*!%⋶&Ϗ'#*Z钹CeSHbCYO:ar+ֆ\ԣg7.8٬fTo6wyVlmE$tVڮ vh EXd抋*bޓYemVeme0?_3n{[a9::h) %RgkPPU.zhU =(Bn*CV3t/b0INYJZc^ Ȯc*_rkH0&J*u:LJpX Ђhar:܈ۓ9|%>5wbp9[@ -`:EtW*Տbaq,Cf$RQr֨ &6/nBNO9(eoG'H>D{`\""4Ɖ?c6@.;-E̦LYO֜ +]ta8O7y80CR֕y: WLF;% Y)̐:԰VΆkwOTzJP+׆ B1#VP凕x[/u^ë8~n $)uZWQ{ 0P]ّԶ7C&/+|K'GÓQ[S(1gJDm+J2TZ@~]눗:`.^>b܏PZgTC{O>Ei \v\v{V~Uץp3C_V:MXSU_W;-sJaΞ>,ad°>;ct$W g-7T&r]H{):v EOpR9BmR,飺0>eS f^ Hb6gUNA], i֫p,JMm$Z*%QtXrWoE,2Vџi{񆍻b sxuX+OVH}8[ 〪"0g! ^ eҗTƿ&Dt'˻JR!}Fy4iʜq =dE࿒M3x}^N7pJ&RnifMo꫍/6l<+}s |cӍO6/66}W_D_XQ4[\2" SSX]|0hD%"ތ!m"*Fb4y8)SO3N< N CR?H 54ʆsV=65zJf)"1[qz8Lg;y9#II~ rH/ήP0wePX&d˵~^\qE 7DŽI󨠱督^?>:ۋ^_G1|D?>GGL>1^NdvkUu΋~ ?8^hv;GQ1&?B(~ w^`8;/{"h'jb#Dǯ{} |>=<8 <_F{;v~G-THQ||x z{HwSw"_F'=`:F0O$ zGG'Ãv@G<< ~DN=s QG݁~ z\z=k W^Oh&021܏Z2a7t? yWrrQ  :3,6#Y& F#p<fQ9;AeQ7xhgq4,–`h]GGD-D/tIn~Il2n\&E7f/-s{.WB0NOPdpmM !ۦi5+Rqp}T))xŘt&iNt\*5Aax-]'Wֹr0xwoy[^Hۦ[|U, *g&Mju5U<[=Hbdts &V%a^>gQSε2K2?Av_ah⌀o O3A'Ӥm .ޛxs.S{a9K~C Me cφvm2D?Oc);1AjwFHk"TCj|6]8a(96L'/b,)(9f$dQk %CDE_%dnM .x2Yǃc!"a yvlٰܱiK]7l$"2MYlGkmH$6q&\-j״?xymq.xg8[D$֩Rv\ px> X&3.d1q\S1S-'3եXa'WJMGQC0,˃ +F#&E o kE{/wl}O8RE o%$lڑyܯP-#Ux461)sGnC2;खcʯ TG,ZG|{Uij]qc'GۥM[r)*k BdF)4MW6nl߳,{?]1)>%g& c3@Py F=7edh i[%‰^c'I7Wuc4^h(2 pp +2-u4FooĮ蓶WÕ̢q`y.QH]krc|>/H.#u$5$ йh},8#P%:xu<хi9J!.viRPnw@{ҌkA0HK!׸N(` 5 hޒ1E0:<~D46Ōp29sI;-]ϗ;?5݅Q9?[VUh1k;A6qF4W)q2BQi6]B:Tb r$]a K5_",h:r #yx (6(;iA4 2&ͭ#W-2p7bU~Ɠ'O%cysc3 m`$>f0-6NaE8x9@ Ab-P'C LU, jS> e%۠*[!xduΫg`j1NS=YV3}cl mL*G8L&$oxK&0{rmQ4p<}~8 MKD9Dk}6N[6M "TgЫ=;H5^Ԧ2-ۘs# 1[ȼL(ӻhywQf ZzWÌ I=wP YyέT4@0W׀*hcP0`MQqv^O2V\YA1&C|{ұ-hDy Y&FHQ-J2g9* |&2WNQWM)`_JANٿ\;تߛB܂tdi p/4MEr9`oS-B 89eJJ21)lpǚ$SsFiШ`Muǵ¥qy/3U)r}Xq mQ$SS60xxxF-d2J K4霍~;&0V3:rJa?}68Vd0؅>Sn)OmuXˉONd+Iڤ"ς.l٤j;@-rǑ@m'QP#cS|!gDa}Nm~xߛCGM+Œt ((v=r+XtǨ'6appB_{/\IdL!:ƅbk+NEs~2z[W@ܵ-T:ϭ=>$fmBp!#Aȯ\GQKT6Eld;p3TR7Fr:%88Yft ܼ~S5)8-uyU OwW<ڏ/&]fkq˺斴 -gjQ~4&8 W`ў%db( j#f-"Y7Oa\=ȧZxh@(s] ~le`\˄h'2΀E[+.RA ߠ(SyI!)!+Ez)*]:La؈Zd=2+ҮsI1{!;斚:4*[mAgeaGC/z8`/EoU9Q 6U)~rPگ^)*ɹ2e}n/!("]"A^e.Zw%tSz{˛`TC7Ҁfދ~g?|r|!m$U53.665j/1AܠCbBR`hJ>\@y6#)(늏ЪA֣ZpX+;҃^ڼ̗XDwSY*apwA%7dLbl [Y)y8$Ȫ\aJPõY͞@_1lmvlQҝz={@'~N^1J52uα}Ny=q'vqN-GUiM,`.E_p^fV*ozgOmǸ(2GkLbķ{/Pu 7>,R-z<,d\#W0;=C9ZF;% Y+Kvz{B]Z;g:XPzOm:ЂO+ ~8wKQW.~1P$|0IZ&bh$x N&3(1-l`t2xR୬0^a[M{gjk:h/缅 8jW#lSv㑡>H ?̆ʹ,P\ue BY3%l H+wuYͪg jQj6j)[,/mn ٞA()\a$&)?mΦ!;x%$sJޜ$aj3/}Ou|UЪKMb4 C j%-t;XӴ 1qOݹ+PPqg:{ ^,v)~yq=5w&Ln*E-8A.}=DsN1D.IsBXoՎ'F󊡘pn>ͥ24/0ωZ4 Ȋxnu~(C6m%F^$Ʉ Rڲnϖn&V}A,℡W$evxOgp7~GT>6כ<㋖$$n24%2)dIs8.MuoTJDJR!2rg'z ÙM  Q> 'JCrHPİPr1GY8e5O-C 1zbōcZIMt/^ߌ6X^*]*ru ed'=u[LEnJ(CY=}>[P-JpbCɭ9Bۄ8bg(d˙rWdyK6*.X)~:mPX*|{{gh]\jI6_ P3X/?w*c]Љ(KbZf b'\=+67[EPc7>JN't1oCUh[0c&c }ʇۍ[&x!Sl@wZF =/wQcEkn?B6v^d(u;.߅3}ܮ$K3 D; <}JUڔNjE[J@YE rvRpN3?cI v[])?32e~ m8Zn,Mqn%84V*-NӢ ~X}+DW*G(az".E]``egBv%1Oz山dr{$IJ[0.H-B]s9 yAMr&{.V}+Wٞy;` toW^K/, S7BuڰvH"|*DZSk)l/}12OMXSKTWZLܡ_GR_JE̲:t;r1gF M7g4 faRGfH~C)x.X2{UDϹkO-%5vfIdS eN}J~z ?aii3~0χg͇yi57QC8 8=u<=%(J>ƨsW!Of N[ᦾn^[zh17x |{vXZ {§c, ,:j6Rn $rQg'SD!O=vorѺY~0rPW2m) <(qpfm䳷sc`[V,0V%\v<}sNgpćt\.Qw[(t*E| H2{E=byQ6+ǥJqM|ˮo! FwL VM4$`&x-R8k:Y_XF'{/9Km0 Ť)|`o6t'IG#'2Jx  `6Q gP82ar LM5Ƨs"˻e>HyĚXrv!zL1!2¡hGOZ`nۺ&kIؠŽiMg ~jqWkWS 39BD "tצy;B0d։w*+vs}[{oYJŭFhSI vBxdj%AXɉ@M. 33Frcض/"_ɜB;9'piyz5dvJk7n2M7oxvt['hHdkٍU+ߡ`D7LkҗͶ~3Z\+f#pYƬl.vWwC2X$А۳KD7YvvˣSK CjgQ+Vfnpy Y8>JknZ:]0@I&{\<:UM9R5T}nQWB#QV^T'^~XX<z,FSl^`SiQ95b.>Hz #Ipbl`*-Am֘o7zѦ1L^=7yK{fUAr7TѸRm4lBۈ,WJ{q ۞#oD#zR?숉v̬ :@OE< %z>?/udf>^@֔L\HR]Y>Q'GfՇQ Ci|ai$) /6k@1qrEoP8~ B+A튃 ?y_f'Ed@P?.D+*ΰYM]ɍ6ځҀ3OuTdbT)/>ͨkbDZs6?yC4𠪤pS*MJVJ__ؼ!b.0eQ^UJu1Z/O8JF"+ʑ W% [vʖ\hau ;&7yXp2!LcSbUAf@}q!8#_~éYI6Qs(.lI,nGtQ 4~= zlcrrT2[җM%֮?Ea^MB[TXOm=iTyjJ=jtBIp#T;-fQp>jn3o/^欆$6678wN8|q.]  cp>*H|r"j \Kl זbCha!h֘i8՝ŷ)Fr[NUf5{qr5HCCP<4.AFkơ?fZA>d uv/&k{qF$rS`jRlJFf'ٖQb^t$UTwM1&g&Zۚ ˕3Ng;pGx'DA#kEiB՘2E.TDo](& Tܝ?FG;'*H1 ϯ(1U!<3"E΄W) !jC0 |yf s3v?c'pU.p (?3K Q2bo'-S"L Q&;ѻ+~Vـ更6)*C>hM>nT@z4{HwLMSGQ!`Y7as'緥$On^}W]%'[ ye7zjIKN+.,c{ !sGJVCUW6r(LLɇE&_Om;j/!UgQ@de(J6+BիR[}-ST+MN'CeۚX]x3Uk04۵o$P$Qi;Kz*64-WM1aIjϣ/k[Aaެmaw̳.Ѡ$YKǐ,RB0%:A]s,B)uWCIҚc>|F-kN)n-)fd\&b <'Vm@DCj~pݍ P (6 I5in=[WOfC|`z;n!ASD!o?m*2L1|F*$4敟t#ru^؛&]LhWq.5EC lj 7~;51 4`$1P9RD-'%ƜVxF-vt!yr建pB93+}x{[k˞9 8<Vn&M`Ӱl~qhW*#?w2xv::r3|?~xX{7c|O6n~}'|_}_퓧O6~ qGtnsqF d,>wEBW "eJ{|܄ϣlzC}'__ QlaOz,@%#:O=L'  ,H:C;\ /aeS4ZTx] ʈ!vkIk2 ,Y0+ŒaJb7yc ӸԀ}r̀:0 "#h4#tdnh p^{@dܑ\ aq[7 S.dL /a5g( [޲+\w5R,p^\p (nx$yTXqt|rQ/_E/~/{ãw^[v_ޟ^#,%~^V{Ǡ~D/N a/m^e`B}0Ѡ{s>9z}x # wwz{H~ w^`8;/{"h'jb#Dǯ{} |>=<8 <_F{;v~G-THQ||x z{HwSw"_F'=`:F0O$ zGG'Ãv@G<< ~DN=s QG݁~ z\z=k>u +qM`dc;vQeS/zeovXyAVR W;| ig'׽OP,|$sth} խ~t,RƕT`6"8}-M1pd*&': ͍?ZtbxaL,h%@q}WR,~,nro,&>~s v+KK'+P{6d M] sxMĻ;lfpiʿ͌!:k).ҟl,,cd=阚Qr ҙCkKہQ}%C07]7$J6_ɫ!E G>>@''DS}4[Z+& KH7 88Ɩ[p1qːû$撿K2^m5",e_ @ϣg5~QT*Bfc.]zCO7/.A#Uc>-էf pS{ HSSZVSOCs0?胪Yb\yEEl`mRvzT;ɨz!̭<[X&lJ)xXC6-;R)Y*wXx"h!aSfgoh%)A=!)Fߓ;ad+O:ev8A@L&fN=|01-?x=oMnvg\gR冼%$Nb0)onc޽0b偅/a} R=iFݙ⣴*hPPN 7-Uߨh!\#&2֧ru't/~zf~g*;||=lj-+Ù0 d u`"KÝ|I$$\W "W\%į]\* k3A'EXPELL^huOR/9tHqu 8\ͬ]$ӢrŬe}T+z>lGKl&Zn K'Ɋf@}Akn+;Xo]xn%P0&WoaW3,ڪwR9xO dT@Ƞ*(Tb=?ax;FhF1+#.cou4?f{3yv³<[4 $Z~VtِFO˘*p(0M0lVrs)灡W0KLqL ]b;L^4|߻ ,oS҇`[،HtY5%&55^qꡆB;eEpƂZBي_ AQW+gL '|N>wM>A sY,Z(L.:EdXNA4#ܝb{ĊFCXfɒ םhTg0nE'GNE]_njʫhC2Qrp[k\p}3t_Y1yŴշ'U7AS3.엟$ ŅY2&GNùڍg#E7vv#(dϙ_İ "$I4a#ShW I:-OaǝcJh^&u7;BѮ3!a+>CPJ>?#H.(+jYI,%pDSxZ~+n>>0b o{l j7DɥrY8j fq1nG0toV"x0iql3׻,cl*؉ɇ錂*dR;#+ta#OzQagO xLS"BgLDMIhb]^$("TbG}$9jlPORs4\C%f$0._y+{ք |JL$lc9͐oSS7mqk0vSA !@=r`wh:$-δ0G4 /=p`j߶YWӶ3$UkljN89٘o=*ཚŎ 8{D ¸8h`DvM|=B,J.+O$!xX7Y,zinH`<<jưc1~&\atLT>m u4XM[T9k@1:H)sB -Ԁ8I2C .1$=t{Ypn"QAnp򫛂#ՏrQGq:|Jh D)V{4?H]- T\&Ђ$ C ֆTQ!v{_ǔ]*)JBy\WpSALam6B3u&QfH6A$-F)1wlkF-h[xiCWc Cg"%ZS3SUƒY",{NKVhD}oRJpg=#("*6kV]Y<-ψkN_{E8$_A;@IkO9ZVZS$1cܞ3Ʉ@\GkIw/ym_D/KG瘡pV5~w]:q7yPץIApvۍ|Ӄh7E@hͣ!!|~LVswM( &-3&`SuПY|ui˅B'_e&{>RIJWb@o&?C}>7zTܛO$Aj]Fl9nrx`A9Q9˻œW 3wmzG 2.Ǻ7y4r6,guqmbVzj}k 6x"Vq.;XzC&܁9i0C.<&pd+wض7 aɜ,m]ai G/ DͦaNvi2Az'<ܔaK0 k >UR :EI~yƎw|>V$.=Lm @<!pZӂqRL g6gR7T3pODkE}jr/ۄ۾t*};|8LGXY?wcE/ML I:4;--0xa$1D<6xNy(g>csW&zrWt$31_&.*wDL4}]6?f?؊nX7fq?:g12+fg:kw0dDa%-Li7xמ51<^/؝,eOkN:<3z%lIKEW7F .)=…ygZmɳhئYlϸ@Ok&__}|kƅe#Yfcq+enxG#+Y gΉ?o;ghhq ){'++Clmx?S'^iA$lQVXBr *|쎓IBC멳lU^66oCU.AJ T[ߧj*sg&Hʺ%aN7%d.%d1qU%˲F%;Ǐ*ģ)I'lr}K.mc~5 :]dIʔga=5 3A5j6LUPQhQ_ L,vn\ky+3n΢޽sȧ.5ԁPWΚp|W/ xV5 "`MԬe[X&P&k:)57&dd4f;ۚMIWmL׉"N|2d]P>:BϤZƼhGņduKn#D@~fFeT\?K^>D않U:kyގh#WL yk쵀qAg*Idc  ӷq\#6ƒk kE9┣ߵQlK&ܨ!yՈvHԿJah,U2]䵑צ< ;JO8F?Sc%0@\t~5$ W>vbHN ˝c9j bnrColdn ttd{t་?oil\vFkfjHj7;Um5c,~`wcQ'U߷k1#b -OMhM.U/liQVd?9?w8,ᅠVoU>G1T!>w=^~?$MKXpǧ6\YqA?h{=̛Z'Z$BkowOOB' wNC)0tO0SM5(ʿ1zSnZU& vNJ+7l@7o{9? 9ᨭQ\] )~^+:^ 3a zQ $O(ڠ]gl17/\Am8ّ7(aur$r:8I:nv}Cw2HV(2 V謗w9)(UAZ`ƁH1*sR|&nr?8CL0sQ\*Im4)&ת0( NT<6?RDDZ  o|JjlgRM}I37nE q5yJ0 2y|@a}\…6J# F͔ hzUʳwQ"BS~4o(;k]h泰'4 .r4%hJՈXV]L J61݆VA|@c+*ZO[\T9ؿ9΀De" %a^Wl7p v=׮ˢZot9F$%uM v\U}*׌ (@I ˭I] Է~2uTcn2_Q_<,GA<@U5+$^\8L-A*CK#Gv4=T=/ zYb-!Z/. vkIWyGBV;E9, 6b̌l|D`Wlz} Mҕ3qvYp,xC$,lŸR3mYcQLÓ'$8 {g4ۡ'tx}jNytrj#QĦW-W cv upbR; wnxq&FށGWI7ȫo+fb=Z|+jx;A^< a!\ N*9I+Kz5e}' 5Bǒ*{Y,:\ fT7GaZ@g=3?|[.Vϲ>J֧!A܈'yT] w^G$6dXs/t~IZ^&w&K=t{}oktHZ.m~BHVp6bYzaԲ7vvjGHyQ{MGfʱp 6+.9tо5qB F +1q_=JעhLSVY4'+,V1[Uwz}b,)q Xd+:^'OF"5[wL[<ۨ-vm҈)~5ͮDz$)GɁr_#A,{) jܧi\&J75h֮.h-n}5m\*HXQw|yoi^+5As|nܱN @q,jMtY;-y3ʱzΩ·璻J_uZP;O1tqR9W?DO8'GK׎2#Z$F ~3ŒwWyoݔ.rp(YM8 |$-S:R~ Viβ䑼i)D+B9ZbhZu{g5%#(R=lNklFL<2N7/{yl EhbfZ"n m{nꬕ-1ymmg bcp7H>C*2G]^MV1 p~}Jձ2h>"_ -O-]WCjAT}x~#]c]vȮ-.-&>{{ T}ʮ d Mģu~B\0]q#/"g o 1ZRid1CשŒ]52&͠{`e&xGerƳ:d=4XnGesyRk#{JvDýg̋YPǚتe&2K@l5nXD[fݕee%Ag#Fj˯a+6 ;AIrS4_5whnLoboNE-Á0<#řbVxDy#܄A7_چf-ˌEBF'$*^]w;sͤ!oGK ]UL9+Tt&Ɛrߺm91V\am+$F_ySfl85נS-*K}e;6Fz6sjc ߒI6#؇p``>ci;϶췷ñcO[ؔfa[Jq~)AWݭ;s2٥_@@De_J ]|QM8dX0ɂ* ֺxzk6g*u u]Cjulʌv]&xxm*.f|)gƯV`g' Mn$L-h>5|{<&*l*2(kBqӈ&U~j]+搖٥K8N23t8(+>$jPv7c.:BRb CxOܐ+DT #FJXTESBA(-uV)xZpE`OiX&3c[ 'HzCv( 8/!2(!آP0R^.J *yUԎW,g B%/fv_EY!T gcHCU-V%? i+ZaL7^~[?y,u0@CnUaQue@93VF¶&D*vP\RˋI̿b[ᩯI`eAiU'WC"GZ7ԗTRTӫU~ ֆ>ӍkX2{wmmz|zGkV-hз7U xDke"^6`J1\nIFʽ"-I^I*DȮ< +~>(rG52ZKrH(o1v;|XJXBA :R[~3(R)Z[TvlkM@ x]xq1$06Fg# MxLqqilLy`"oL 3xL S|.>m-}t EAQƀm㞻c2ubaہYVR'tUc?klZe16L ӿ&xwlPZVv?~ mڋA If%RBqIM5PwNԄIuf 'n΢lŭ*O&  }~K~\-8]4gQ9' :Ĭ;HCOϛ6Cy*'tM$B 8Ito7M#m/2Rӽ/0ˤ2 ASm;b v=sc#GM<%IQ 5MSa11`cwcR MvpTJ@ItvzmKoQeY,مV񹙲ֹH+U9}~\I''2172ܧf% S`H%9L˘_hK/kS@y1k>cbkm+=wT RZf;d9T쏭r`w8p4kK4K WX Px:]9Py~mu !n1o+舲:uYf9ĻV/I6fa`[26<_L~ҕ "R]=mG&{FR~$R+zNkE)ضSb1É"% _#lBp nzd] ߖg~x3~5$빨^WB%'خ|ƺX=]ͥi* -YoW.XkbyW^)>yi'+DTTA/.co-*i_Dl`礨K㒊t~ϺuZE%jJ^D %WָC6n]&ѯ;[խ:`rjhX 1LJCG"z*=-uO>F(~L/,V*␥ncx(.r[T }ۊ.;F;blIjXZ \Fh>5@ȃHW':Q@9 ")4w߻E4MҨLGXV5&^4jT{+18gΐOћOAԒgKE;K2Og?ĮƲ`H tA}>d${|{ҩMn(X$q aY˜$0šd_V.۶湓MtK(oU1(Qa8+@q{a<a@16*2+a >F+ 3TrŬb k3 F ?e՚X$%ǨtOuΏS)gD*WP1di!`Or1ex獋b1x16lʕPaB\bno]"'c?YMʎx}d 72p" 6ELXQxO/SqqitZ5'p@c^]R$P$:!Nwn+B+sK?5JxyhA tY\o2 f| @y/!ERp, .lvLv<t`eM6{y9Je=C8OS];OE[(1ǕcDT9X8d^J]|A 5rMqکq/Re ǠO! pG6'`V L aT:</1876.Czl˜3RT]6a~|9w>(01\4)q9[-0^J#}? z5G~vpzݿdwqέKcCL`Dǰm@ !9bˍ%w }8ߘDHWQ0%O^,*7«e3^nd6`ϩa هhVh ,Q!]tϒP9Eц#~+vf5p1G/ %]l7DhةTUzOGT{;=z/=B ?7r?a;ۧ/lFlӂ>򳈤(Qѐj,?ϯ0_*\̽ˌ-Eԇ[̀|'{`7X6ɐ%ٴ!c<,޽K)I"2eKR mEዅ. J 3? kĻ].0cRڏoyѺ$;N+ nԛQ?2_ 8̨+\RQC U1S_NSE_/z[E)y+LO+&/I$nRaN 5 092kCQsd $r9jB~UҠ!IL&>iG'KB݉O?1ZE5g\]Z' s~w{_ݲ(J~wO$(<*:2cݢRܓ>\FF= 9ȍ%R[7aQDFI7㰍@@!ԩ-kPڤ"yj @'bINj=(s1Bc? Y&8뛠 rsJbd0B_\v/J;քd~|=5RVE`AKCpԄkXe&Hadxq\u,6G#W\U ?7[&ŨgwQio%?g]!)Y< Roø tdre}cY: WoD:>(նڤ>>zl '0i_ɬb C by~xz2}$Vp>WF/0R]B{kV,V(9X)Mxz Hj%AbN(g4Opcj[A]Ti`ijoŅeY뇢Z3h7o`xնGgB p EAn 1հԼ(J=&Ucï_w%42F!;:l-o۷ 2qB ,z;2=8ɋ䪛͋m'&M8-`ˌTR"m`viȁ4ݩGMt-n~,9CFZed0LH$ե]= W07^)2&2Jfո &gE:q:6%~OF4KhLڂ*Vh.spHnBrpCjHU;qxxN^m 2\5jqϕE]3Qlh_GBַ05wCVyWO h&G"k#\?*⎆H2(WS<T!q9|M=Q2.&f@LAP)'4q{Ia`ʸMh+j, FJRUBAQ<B9 ȑ@xvf3:&3Y $$fCIz]5YSh<gt``[p7}ㄜk^iUC]q;BKo'(ǒ>!/e}d :1TgF*>>)[ Ϯ\d⤄#Awdo ={/8M >(%xØD bI?%y0Z{ic]MS % Txzknwsi+ ֥b*V-f$f# $ _!ӫRH^237P% Y1K! $70v\fof8K | :U isK”VޑrrSoJU$$. S," JP8TѠic3wIp _ SFs=RҚrNF(h1;C[玈8+XP.+R~.餺h0RڥTidu-A jra<qZ|,oN՘n"Ѐ /J^)rW^evᾪ!S{HWcH]%\~L+'+1[H )ށcܚIm |' ![N :Cq̯եT X Lc]yI)T.)>ĸ}Xrg\Oq#A9bП0@tt Ck 8Gxd,S#XE")WhAK%g S-cߟl{a:GR)IJ2~ЉCiqnˌFF;9͍@YL_V ATPSb a>Pg& !gn<)Ƴ ]U[ ؉Z8#Xte]Sj:W;ݥhST3vT4,P^+Q w:cUt12Wrnxu\X,^)ueu~\ 7l,2֚05-K#hB'O4F*hc'48ǾI%V6٨vw'o_̀.Yq{^k̚MRbkV;xIO^QSǪ J]9pڍՏk-9}#(j#_72YZW rLU9{J)%Ʀ\4qlUO &(S#W {!~|(2:WԲv}|O˝8olCPTi~ hW+fi9 XHA)u9":5N[P牎W=HZf ! ^9$)~z{nj.QGҩ ά*7+<`߇FsHȅ8adl-=&kp 'by`mV{̮c`,"={d܍gpoPېرһpAuK>HJԴy[ZjpT'$Z`JgQ*M23Ewe/5FLCvk#ǫU Y*kEKBxc[ܸ ]^~gILr1ZS8Ld S "vm=2q,k=w&֘zz1څ\@+2D;תȷ>oS o###h%3,4ʀ*E2Z]bT\c&d;j-> *!ƭܰu9+Vpȁ:+*UVL&$XNOSd0r?@ AdکZ7Kb|ccV(*pզa F3Mhldv bQg*$7vD bR!L= ]b *WKWw, X"eV Rkh*D1 NL?KoRi/X&8HI Pvy#x쏺(OąxC0Qg.R+-҆!aǾدaJa2 5H)\b^.&Tn| 5(C+Qu~[0f6@6b㕃kKvI|f7hځiʦJzIS/!(b:]SZdǹ<983K#<{lN$Buyyl*ryʨ؋Yrc3 "PΩdd)&vTTpEcU]p*I5C( riϕvL,*¦ G%LjN ^N`8R^өf?~x:k<u7wc|O6n~}<_tۍo7s~E[`3drwskNԚHvF$OrF ^hD" 9(?c`#]._ǣ4xtN~xpv rG#d$?`-irEsXB7v^Md$VԨ|yps~n4{>` 7 c[ {{|܄ϣlz"a;|G4 Efc99gQsV 6R0Y:hoO =9O^A/p ~C$万PDTqxЎ~<H܁8aZ/0҂݉~!`n$ਿ;ЏA#ktaC`b;?{mX1>Чaš8 ~ ckj}D9VNI asU5 :mǺzWg "0R\B`I9= $LWoUo}׿3r,T+uuVhzv;;:ȡ0Y :SwIWNAfPVyUqһ_R vۖ׫-qZ$LlC۩:\ biڝIFѝ IIn#ޙ4m/ iZc˥8a#iZx֮ssٚ~*yl*[b~Q .N}k27-5c1C SdݔWOnO$/΋Ym U nF/KF߹ɔ!%fT@U0R R#M4na)i7էx$,a($,oUɁ[ȜdP\DXjH)&Z) ~a@O4rNrڳCͶ!*0\|OX.)#o;D/#j>lORQ3'ZnBJa'إ$W$= Ĩ2f dhDxsvi3..V`ffO!þڕGSM U's; .)w{ (+%Nlm2T2nѸ+n&Pw-OƅkP>-f| vjnWJHjj!bpJIݕ3MZ7  n yuYvfU^mX/e>V7S:DQүR):Y^iWك;qA8B76;̷T̈HZ D]J4aN Li+wr)A TiF3rlO(ZVR-ogJw!4 Z,Ayaڨ|VXq9.ߌ'`"MsKͨaC#1 rO[},\1 m M(/+H';{sا@aCrWë #b}؃'-TkƤ烨f_aK] ]F@01d9ȶ_*rJY9TV9)D># g3̥B ,Dq1 ,t_1W>j{(s/x_uWˋ@0Dɮ]SnkR\0y-JQ n^f Q  /3Fq]"Hd3.貮iӪ4W`˅l/W)EK*$WpU<ǚ}Ri :2 3<]@ - :9Aއy!#'(яz5\>^cTaƩ\c,:>~/W91(djpoU?8;;4N;$H9p\B@UlFA' M],>Jd-Y2uDߕݻqu!&GY(4y|DhA&_ 1^~B]@D*֟-FlUZ!<0t\k ZԔf8 H P4 w%AG0=["%Lg DFے#n_dR2,73@˜|ڼGpYx:{Shȍ̂D U5W'⚯xvA%.5 wEzI+HnS 3}qēyh@#9? E.]x Zr^"…G8pRiHs#%r]y\\Z%6nW'D+ّ5`s4J:2DB>@^nJ -qXCmg:"}t?p|ÿƘA߫6lDG jI%]A!"'h숼8Ne૕ ʙz, @@._Y@2k_E-#˲Vߧɭʸɼ'o$ֲ7A5&yfJDﭖRU.DN|[r@8bNee'~7:%Q$ϔ>)J)0틏R JzDgCj${O70 5.ehYwO~r\\Bq^AgH4J^b=-{- Dťha-@SjǐCe&&`4xTf?YI>NDf6KnrQ#$cS0UJЀ ;r认IOLrN\ˉ:J`x5h?*c 2jiHL%ECMipZo'cxĊ*XNN0ػu鼝~J1"R ZOVqHDeNi]?Duybx,u&9ӧJWl*M Mډu(3r!%;~~ Y$oM"bôZ6w dpSR35ʫLG64uCvAk#C'J.^pٹ8tq3o1Eٹ~]Of*0Eg\:yޚj`ͽQ=gTq´;y,2p3g[QS\J($W'LoQ#?.U,ơbvM J,s;@R!jj苏[ 6s'Wʜ MʼnA֎*~؂cW}KԚ̨:mawޝetdhkc:z7Ȏ1 [ɍ^aCtʎ<.EӛVQq!^{|=eˬvn[{)_/ c;wy-\V[͎دye7q 0@lm00z2_ sB,b x\}7?KԮpO`B*]O~4(l-!|u__ͽWx}]޺{W= z/{C?ê` WC@8}@0F^?k2kЂ"֝{χ?R k5=ȫ PdPN_;.tn> 25p-#Z,]wv3a"ʘ%-˫6?~\5 z0  o0]İўNEd11F "rxayďe`0^^X(}?6Ucl+~Nfg'?sn!d% GPҡ v2c`_`#f SL7Ft2^-ז۔}#xʶ:`цŋ!2%NS+/RZlGL7NOiv^c.)YbjxtjAh@虳[bOmq| 'A‹v1};a1Ji=1:)X€43L[KHX >=FqsJ<1|y\O}o/:>IWjvJ' ĔXnIa,ص'&y8+SMݙ]Z0k6&$MA4waD$⃰'5Fv@LQ!l}]"Iq'̅ц`:̂V gϟnG?wاy`/miZ-#"1qLJJ.6 3hK@_{fCLh1i[O}ݚ5pkû?-| 1L+|?-Ѳv G 0!>Hg&' > } \a Cz$7r\gƒfKr'5GqgdƔzH9 H1[᩷ɔo`928ĹfMp\,ӐskBG6Av!\I"2,)\]` lOtQ@O` ,'qO!٥0aO>~'ת>;@rP@֖9HgJMxj>y,GR?-&!VfYco`RO`I|PYU9H6SAd!C +;ƻ暿X~am)2ܒSa~ ͗%%y2UZ9&O}֤_-'WyEJf(ySgeg l`}:&+.?$|Ύ?i{~ov[}׃w@\BDH+tUgi%=bH'Q+ΠۄF="AqB҈g4`lď LJzFhB0 rZ}XnrGbG4@CEx5ZFiLa ޯVҚ +E4&Ot1?-ϑڴQ֊d=Dgw`Zb'GIM)shGc21E>Vl>HB>ssp3gCZ'd4i^M/Eֵ5k/S\y\4.MK=[;"j@; @iJӸ'7-,uM*w\]CSWhC4 ܗ%WNb!e{mmq\,)GFG|ha1ܦ U&A:IgDc 4H7 *H0y{T`:P5BCͮeU_.Vр1 5(NъҺ}-*%IGB)#q@LFA>g^0 .j}/(NA(fl;&MTCKN]\Gjo7`bdzd UM19*F9Qdc!y I6:)#)BaYQ|W`8t=.H T 'z".Z)b3VATmk=xJ`lTst3͍mf#WflF'"|7ch=Fes,i#24od)%|056;!$[&ZګHڬ>O_yGfce̸GLԌih6]Xa{j7QqQK6/6zE9|_ WŜ o-(ASےWQ˕\sE}I +0R*ڡ}NH$vnͱwZ}5HR~+(a(FG"%! OVUٸ5\KCJY7Ų…K%Vƍr4xQ!ra 溰fIv tYk4+Y)s[v(ELK^}~(zPC"hm "lg9& >T+?T5_/kT*=CJS;BMx;ZKb`dVK@>_GLԬ .|< sD5]kxyCg4¥<9ax&>_4/ /!MhkJ= .4I&>DssO$K!zM^rM.<#]$zԈx؟ez\"ʩ]g1W\>@],O>V 0{ܿo22Z r{. i52â#l:fjnN9k⏔+$ V@feEBtu@} #u@72DWw"akSc3 5b4r2\htn7ߺHT'?%0vt4Q}|~@*=`nj(ٟ~rIF zTit [ fEв$Ŵ zWD(JeB]eIPQ+ .w6K 6@EHuJ8QT)GGODyFq䝌h²Ԣ);r> N,Hl|JirC"< ۏvbvJ4"8Og;XX#Փ OԅyW Չr R 5 fft;/kb5_d,r$c3u[u̦# P9QNW$g~ϥxFW{H#䟑| a?Z7IHzky-*  w i\S ڵ|U]|h_tgewPHG>,"1δS?ȼǛWD{ asJO[":/3hPĬtW-$|vk_?#f&:^ C⟪yœ,T5wT*Ů w/jz0_I'CUv18h]:P/m d<|а'ةܸ}!@UO/(M8Q&IrZ%# Y/)W'=%%~f* XEsx[ۥ B/J̌vkǕoyj"m @LZj9{cOteoDL= )E+ z }71Vpo6_j&ƀ/j3rX+O Y0% $( }s 7ܱ7𹶽Ŭ=_ȫL;vUV(Qj7dT<~`&k:Rj }#+1w҄IDkKakf\ p;cnkM:,`OA'AB tz[rboz 5 ZF37Lu &!.p _?78abhasaCJPr9:{v!-g9#:EQf􃛋w6Zr:Bz~@a KJ( 0U9fgsqB!V \kAƥY0I 7؅YRS&ExB(t> + q{\N}8WՏF!.jkt]X;4n:ppݞۃaQ^8!1"Z6oOAm@[EMpDOb~(äjG/F YDQ=[lws#T"Wvho}"]g)t7Hણ|dP<xa2_,m ǘsU "ކa'6z!i I'w\zcӎgac] Hl_xXON ܏~ށv`<3;1(TۆNQ2 ՗,cx!O/;+!g+#z ? GT gx*(] a[׬antlr3/__init__.pyXko_1pP8)duwBKtL@TJ6.WŚ/aY=3Rd/5ly93s//..CGImVw>ToȝG=]Ѧ&qmP^&]).VEC*˨骪[34n[RBD'Ԗ5jE]7΃.tZLVq>ZeiikM;U,jZ|h| i2.}Jʸ5kӲ7'W-,%Vee,ӇM1h*72d_U8].%UF-/O=!*lH=d[ ډP=u0`-l+MajS&X i,˴c i LTAexy.6®Uu&e-V3@R0*8O,,X#Mi, ^:; 廱vi. Ҧ+Fc糝lBƣ /UݼaYZ#||cx0`\ x l,Zaյ&eqpZgn|UEnA3t RJmS1\q]Ӂn{ѺIkCgO*dLS4VcK,55&59GkRDCB+{ 2]J*lWlgA7l8 {IV 5A2]8"fRdV=![',(+iJjո7)47I+f0 u\蘃eev}6Q5]I {_NC1tG[8*uaTq hom!]] c1YͻkDj \N4(^)04+C H@G(]RB `mY ȀԿ](!KZlwZ=^~Vl!LČ9KpȠk+t{"e X(5"4 '霭>瀩Xu,**.E)Q2ԔknY"ZAcVy89 m$(SsF1. 4>™"a$,4~s yp(L!+#,7QU U}ŵY`z#>n8kcx]S?cyas/m0,N|j@uʁc]+SoJ7_0鑻b={/ 0E1rchȟOf?8UDED3ޏ,ZDm{/ag~Etќ 'r,!,S?\ޛ2Z# lэsܛg$£x O"b4`lDқp724YC+,$M{S! =Y=[M*b1e`)O AC}Kcnr0XB_ e/nNbKE3>y>.F?ˠ1ZW{gGo>x~r>C^jDZW8#N?l]x@7{| J?_/GW,q|[=pǯ}NOV.˗w;dyǣΆ-#>|UU_H*K?W߂ryg3lو>&Ng$rs/PKRy>]antlr3/compat.pyTao6_qp?40`DdAPE7$Ѡٯ;ى8{NkUD}vt:fnM5_&s1֚pooxvτF( 2fqu4t.lp( 涩lk<ܡ?@Wf<Ӏz $jZӅ[۞*o?Y>vq_`e_OSWtQ-v3] .:bh݃#78#͖G'-cVKH }^LDB[l J 5R~,u!\SC@)LKf$8-]h^jrM\I4F+_Jh.Soǚ 3Qe\QAX L! *N# .3T%q-2MjkEs:<D(Jd!bď"`HE,y!> SgJ]" DJ(:Q>+fͪ+-u]yƒŵҜ^PJ"cJ%GdEQk-윖 ,|v| cU!K4<̓|?,Vmv/)g!ȟg/1%W;jP_PKRy>^?antlr3/constants.pyUMo6=׿b=4A~@dʒ*QbWh, o(CO D g޼8z*>R?4if7twژJgK_^C:-{X;>ftVӇw~{?=PZ=!6PzP3]fklkM{ʍi]to잺F CWJSCZ]ӱL/-h4MwoU][Nim+^uGBUW#p,Dxd0vN` 4.T(Q(>j/fk<+?ik5\pQ͡MM?c8#3q\N0)0>f$X4PK鱳>]`+$antlr3/debug.py=iwǑ_ =?6 swAH.Zѓhcf9H1 8~Yٖ꺺De…ޓ'B|]I:z]^^墽Ϟ} G1)6gAb~ jTf2ei8/0E/EI"K yX%:0IJOe  b#ur)6ir.C~8Q܆X$2NB~k{%24Nd -,bpE= wt8qt4)XOؠ?-4: '#ޜ.}= ~ ,{ wŻ7Cx>AyI3MFG3 9;q^q2|= )z7;0G)6009юNJ`viX?}:R"B;z#uKpIRXZ{x8ozZt6fr-i`H.cR aKM\=h.5dNb*{,`4M $/^}3Oi4iM6MWTNUfW^q+&x)h7K  s^;b`T "MVrS )i;F^4($# ޳AR."h4#L% w[)=aID߉pJ]2EVK5g=R3ܧj,2x&ycmi3w pF~ufp\#l_ηRu%ڹ|ܧ ~".!M1x br#`(e&\~uė vżEjqvu v:),T.r5H7>}qq v$;VxVHZsNX2kz[kƕ AץA4K-qJo g /{?p N/1.$f1.a9at,\O`A{[TcjU2P~%P$ERFXa|i Uԛɦ,U|-ʽʾgx ]|Q(bqvDf z󠈊%R Mrfpp`]nGf՟-ü!@)K0h.%w\%ɼD$L)E$3iP`rItYI1E23՚)s*0Q^K` ~^%/yi?NRx bXS_M,TSy|0  1MA ʥDSIAFf*\\a 8d P`kbN,2OEtC:e0ns1-!~KaNgPY."lzOBZr D̓e"v%"@#TAfq !1>YvaD?v-K롾&y)j17CQR9t*n<Y}9՘dVQ PΓ:gݠW iYUݸtFèC֩lV3 BE ~Qdzn@NvRX/{X!~qRK;A;FA3]2e2Pn@!#Qley%G~ڶW2 2Set3W w *% &^9-n90HHzIa޶%PwQ 5QciEeTm}b<U["d &k9O`CG;q$Ͽ#MM[tjZ>v|re~%3_S`:mMW,I#0<%3VNc.&`h38tশv#7%%M͉Н$:flh+oEÚ3eyV N,p34.͠qjbJLJHQ uΤeL E@@Iv ڣ%:YaiVgqX-" }Vg$b0=$V_ϭV\AΟET^*6/s{XhI`MpjYY>="•SuOggK| 5=K2"ĺϥY:n#50.#,pg ,1tGG&־MӔ+?En> yajN̛ѺʇU|iAJqDT;RlKAo1-]+R#,7D=V-c|Gm-Bt0 (9㯱1qWATAJ/@{`A&-_8\ @C6`q.:1T)/b_o` }# s x_[}~z韾O~k`* /pYZ ^ #B?quKO_no s"D﮴ wxuh{IaĀzIRӒŃ|cJy䉘 (G!(Sd :N#Nhm>t%7I`pdT˃ϻ d +lapߓKm<ӎwB[|=tTf]!NEхExB>9x&S p1P=?CQ;!UF q}a5EG9Kzn)8"L>0,@ `'0*xBܱ,9YS8U*pF})1TH B'wVWdFa~Ǧ#lcXS$czwfpm'~nv'`=b wPpiN)857͕0 '`8%Ɠ/1JLvdd6\];4޽fWgkك˧jΠk"^M.1z䶶Y|-?7_{eMG=փiz;5c:J> ۲_ ,|2k/E4NME)o4) ҘE;4םM?wCC>+4tgF eL͚&)"qpphe+ _f}ufR~E2yM¨'lp_C +~Kt4TJgKZćOB[/lddgtVx|UE|VLy_VNrɜQGoAƿt'$[03xllQ.!.A?ȐjT8G0=x +Jb2KqM|\js#/#Lb9 1L?@{>dzt2aϟ=kH*}ZU qlH˧U^9*x_`BW zSe JMeFVE v s[ǴzuuĽ8p'QMcf$ɞa'^}43=jYJ]S^wM[Z߃\}<3ۨ ؠ͌& U$׺rOVV CoT01SVᬫNO|1Mu7(2Oƀ/b2<+7CcI{{kܚ nyk \o熱*sD%rioq[yNutN͚kD7Mh5iTiHBa,2/.eߴf gGgf`8+x&;/'HG!"Yr\ݕm1+3![g543ծ[\ _35Dc^2 u7,+@Zש-җxkUTdW Dݍk6`G9EIaЫm*K:Rd%᜝-VqdYuԬ]sCC`˴ks~%YW4׾W ScȧPem@[Z]T3X:9,jJ}w݇yQ=#=L m ᷳml^}i{jƨ?}$9դ.t{5 x%&0{M-n⢠^&wwqոȖ"^a7W8@c+-"MQʰ&>rF垭ʎш*㪸7Jԭ~OIA>y3Et⥕ywC{?%aPnC{1]\Pn>%6 )9qVIvoyRT٪ 0kq25;Mgr1NV>rC4: UvߝgG'?ǣÓa`宒!!ۡ>܈Sm{_n&{^bh~brRiڗ|8΢}ls2H_X_UjeD>A+%YJRTi- ^Ρ3+*L/J 6`V<[olS6>y2ܰߩ[E ۭ/[֗/ouPԐtI04o1FÀoR%-PKRy>U76  antlr3/dfa.pyYFm,S)#\dՀƣ-fAqy]N#P Ւ`اtKB&Ne}ܯfiRy𿊅l6M"?/c_~n\`铷ԼXnbj2ߦחo^I0 RR2j-gu$gA`gAg$W@+ jKDEE [R$π$Jf<H(H#UdJ%`l)2|I d xHX$x9+d^2'3iI2F&k*IMYR( 'eGSh9C2J6 ,kNȈh?dZ`ۊ"I0ݩ\[օ`~{S,xDÚeUB2% >]`L8K @EsdmؠP>N:#6n+ 1{FnGl:dO~cs=Hί#g<hC2B"wOzE7CZ1ҽ3aqQӼuӻ76=#N&B;mx4D7رozzxQt pҷht]~p~u=hAzc_azSk_|:T4ݝ{2'7c&C+u?QȪazgk@%]"w9s6 ?@pk;GF̺ʶÝy#2Fn׫7d;YiA!6դaqPh& hiۑ{Kv-|C{W(x5Ȥh(q:Hig$hG_c @LCiSn¦rǭB9k4H iJ[L~ MFJN*a\D T'(ʘbiv%pIҗř,GyW($}iDM5& K%2[FKjP,bq%1c*Zml %anP=Iȉ历.51B| WpnUEG* ~kGS`dY[L$Q}U `t[+ß у!ԗ ~!H939_J+Mk^'A躆/ !' }X,c7 %bx߮u]ύE-N+<2 Uq y0z rY؁qhǿ$'7t %UB` $m̔vHE4/ G@3)+6zAә}ͦ,;My^f$Eг Lh2{ O֥x+U|M?7E`O&N67-P nTе Uߪi]L;l4vt娟9 .U`x,҅@)Ok|M͟pXJ:#sCo02(!Zn.N]PiX=*0m̯c쭴m=ǝLVӢ4{êM˲˝iXRͅ/3 զܤrN8=>ь_&]:cf>2Lő<&-?L$O)&LWVǡْS}y LdO~58*x&\ZSY :I_\ᬻUUqF>v?Rȅ\﮹l_>"n~g }^,0 BIpƉ77-B"kIY6[B7Ӗb n{$:hEn(G*_RFARHF2jkT`*dl ʰRRao:oA{UմVBVER0"n +7cX-Վ KҷFa> h,<9Ć"X5,ԬkQ"O Dantlr3/dottreegen.pymOH{~#ÊdRZ݉^5BNPwO/@vgfqr3 a/K6g3,Qzl)3$m2NC>kcHX   $#h00Ib8Ee D,x$ (i>a"dil4 h4`ga|O]cn9^6pr߽x| \4E0 E`Y=T](P870, +0ftXdmx PMrD40"B<]Bi"LDQ32FRDm [reLML&doI@ĉ@KI#*Oe@N:.+c-(DP**BA2lSX0ӐEYar)[V4;6)f(;GoIHqR% $#E"8(EBP0]bL#9TdK>A̐B'Ud j6;^ ȿo=w Wy0tsyw Ǝt38oȇЇsےx i v`:}ǿ<@P ,wȽz()*snr. W /ktj8iVQ]oD]]^_mrۨ=tpz֥un{Ь \Cѩ;ȷ|8a/N>AH>gX=˷$s$Ah9D]wt;A ._(=i5&[Hc녍.u]`Y+ sn6Wdz[#GF#בT|v<9!58stlHu/UTcDȊLy2U(YlbQ'IQ\Ea,N(8⇋l;::j4d&J=,I%8 N|?n4d'uGl&DNԡ,'Q(VQ=RȠ?CGKVq(R$hf\'9LJCO'D.x!O*c3ڊ/$ND6y-o|Fؓ;ۆ0=濾g ) ou@Q2ƳVOݔ_vzM6gKޑ1 h4|A;8B`;!?έEۅ<55y 1?aΩM]]t#D(j0$l__'_|#ď9'&bcd[CjScD2(#Ұg؟(؇ov+k@/$۷0ŷo͌GS\aIqTQi狱l%Y\& jEcD"&@RdKϿϰӼ<8w Y5 E@f U XҞy坎R$t 8]rGzSuV&Ջ983,ipZ=YOØ(֚ Eg7ŵ!8Ocm;PPK8險/cbqQdND%{#|ӥZ iV( <>g8(,XA/8 dFu0ɳ-0~H!`o/JGF2E u֮FPHm0iUEf)%ž߬UAޱ: !+ѮPm=ZAS *O g/.*T->3U;C4ۆIkl3ތ[Uꑡ3$J\xln x^zv&2j*bU] uO?=K]2p^Pa+ 5֣-Wj3`# s3ZnPQPW`IIv!9 64[F}g\)_T0J)p3WR-z֋*_Q 'R@*CJ5z.iL9N<0ڐ,EŁz(Suktm-7];qOO񬖞vQ1(;DF_@q7km@$-ku'<͇?B6OrA?|L_ҍvŬht2T8z@"7sq> (1LbL]d2ONȰqɔE%fEi[ٙ.k{R:-{)u{D,+\$-l9[4Uff""=#~?aJbO$< z@I b/7DJ5~Llɫ+$n} S:Z[T2Y^ta #ڵy0掌wPKRy>ZVͽ1antlr3/exceptions.pyZksS15 *gcMe$Cbޤ\oh"Ծ _ v@`|_N7U@`d L0aZu2J0AiA^vI ד6I»ܳ;WTKOqɔ } Qb "$_7W$ކ.Ŷ<%e_l7!f)a(i"W20.HMCbb̭p R/.@!|3 Xh8%74RIߴ~FD7I4bp1 q╣pcJÍD]^a ևmX5y.pZh1MԎ|I54؃Y߯>цcI}ȱ7`P$[b(5-YO$6ctXw0U@rd)4R <2&thXdŠf]%e[#r<#x%7I`B.J#bRX Po%&3jy|&_h%!eG^"Xn! ue-eJpɫ&RnnN.:Fӡc׶net/|2E Y6 \]3TQF.1^IV{ 5yr܎3+ǖfl!>U /\ ɬtI@kF+*U6!Od}T[Lʒ6][Q&ag?#DbFm&FDuLycQe-c\G3P*`߁ZH?8QIJEh0 kx0()ROj j__s>+)KJa_0 gӂ@63*8/'`^%Evj!;M)GSu4]O)}t)}de\X4})H?,Ղ.VL=5{]RE= tZE,uEV3KR<UV7r'{7hk>ޖ/0]Kcn̔L$Gi3Z0F~]4@%Il`u@WU>ZV>OvKޏvVeon*?ʙF'! G"f_RUv۪ `{qxO@:qB<`q Gkز`eN(&r,G@pElK0AG& jê4D[>3[J}bڶ' J퇗prHz:ͫ4ˣ 362 h> l9z_b󂢔|>i1 홠#jk? #U!I~4h=c:_yfdM3J(iDg6zQ:N(CX1UqH-d fOyEf* "8v㟞_U&]F '*,_iw`P~Σeӊ8RynJf|mK"W݂_xo|νP E^\PO#~!קcusb$Tb։: :)y}J8'gs{s]|Փ/ E ?ʌ%hWzngj%ɲx<#޹ >'iN鼻3{?RtWty0kARcla^՜^U~n5V~ՒުoU2q4[.A&f6L71GPk]R0kaIp9Dgr+*"ҙՍblٗu<--鵠\9iu+3pvvYP̛"Oꃐ[W W>ؠr[M˚6kN?zor<|w*PKRy>yantlr3/extras.pyU]o6|ׯX{@P$W Nh8Y4(:i;+9M.y84]ɄGTuuzN[|Y9*;o;AMA|e~Yn=P{4#ܮrv,&I|ޘ[kj{Myazyəͽ>G|FJ;eBPzrt3Ps2&Qot{b ~Jfm!T9nk }0 Ҁi[h mlWOvL {Vfz8qZ1K!' r;C@Wf<Ӏfj %@[>{/(Ųm3UۿX>Tq_`e3n*46 &<1[4 XY]cў\r o4 hQbo zwNí9M~d0٥ sA/F"Zd9}X>:)wG2EAYyL$6%1Lez5y(%Z*l:쒮E-0e"ݐR]MH0W2*0e/L! EZlLčH0I^+ (?nI2b)"UxM,-ğ%Hqx^5梜JR ʲB72dl%`Zq!9@` XQY"*R,=Ev 3x2KYP~w ^ fOv!0}ЦM)T.#: UD^41έ, # CjTYA80|ۏӡv$/)o$?ޅ<`]8?v5U߭GÍGL1]}͸vOj~G,Svxfn "3;ľ{xg*zIs9름9Vng#Nu_PKj>81#antlr3/main.pyYQo~D]e5@hʢ3@G+5K;F~3HJ"[q@|7Z9Y"e0{X:: n]ь%3'N->_>^1/ |&HƁ(f1(KxdaO,R)LF< !KD <@#QgsH{BgA<, .abIBX "1;$H)ER[0_xQ0K%z"$J-<O%FQAC D)m*,p9F~٤*(haބ h5Pgق%2(V/,DirR[u<ʦ$C&wI(Yq`I"Ka.&qV|s%QfD#(9&iY!a{9gZcl^Ŀp\wv]ص<aF%Dqqٖ{N FCQw |S 333dct}?.'):0д/U>[# s8ztj!thiDh`V'c4ɰ bf!qӽwFpyny`G`׺$v79|۟;΀ ~w:3x#ȣg#rطQ.+ 4Qyba^0B W>wM v߯EqɳWYC-w푀L㊣Չrz>vځ}Msq\osC^g5nE%%/2- (5 Mazud3x(tj,^*C,'dFoLӾ\S!_`B]g2OeB}A7: \ YzZ>l3][ f$}J%lmN>/͈M¬ͽX,3#Yi(a/ h]Rd "%!t~(mi~ Y(^'B\u "1qTd<%dqglKDeDM´aɵ"d,؊wE H*.:6ZTnک%lYX+Zem-SDV¦gV&yث(=c#D4UFӧOlHo3YBpх[&F"[NJ/3}Is+Xp1%ęխnczPNlVSVt-SLmJMY}e1ڋYB)vޫytv0/raPՓ\1ШAz=h;(.Na@Z˻Kv>ZuKmVj,`6dȒټV|.x4:1m@ ~o澤QL=QӰ(Ė0U OS I_kb6)$߿{״$;zCk? K\.-49"W6c쁫.:\ETKeD)â-qƌȰP PkX}v7ENNu7Y'߱p4@M /IpD"ԥS]=9[PO:3JWOJ=ǫCA(Ӣs?)ɆX\a(N$}{YFuoj1mzqv#ɥ[ !;"ݷǗ!wk%LvC\?L60qbRٛ;a.VvjݗO_nFo$#ԚWV|KW-{UϨ}X/tKel}O.f]* bWPWWr1Z}*L-څ6C;쾼4WҊzOr}ѩ v0J} <}nW|M >3o-фu>oB_ymVw߰+'zdtnG]h)B 0=v z TʣQiS-0У7BJ7JF@Rrou{5^_in軰MPKK^>碌7antlr3/recognizers.py}wǑϫ ,;yw+R$d!HIY*0 'f3Q꫻{f@J_bLuwuu}Wu?>?:ΔVY'eݻoe[f4݇ޝ_=2M60~9OK|ƼN_. =V2c:tUuM7uV&fS&MUlJL<)o̢(W\g)JdU̳E6K$%&-WY]s.~岸K3+y/U_[(WeЬà La&#~%5`jd (~LZZ8!sL`;J9 N!N8~^"*#؈.*2KG9Ջ}72HMy3yo`7 fU4%| WE^p]ɤ`\ (oyUqCFLNgH"fS"qL&Uų`/'gӱ_89?cɩ?wv!'3ٙ990W&@9c6 yɫ9>9Dcx/`6yp89;8ڟ"'08>>7g/􊞏a:Ϗ Vt897q؀ y ?i7'g/g.@L9˓>|H<9ONB Bм}9O}}D\?#Z1~pN؝>0aa7pL^'8yyl"A;x)gYl.J875xJzY~gYZa.Ӌd]|Lˊztʾt8~x $rrd~?8?|}1ۏGCY0Xih鬸̉C*#ͮy!ȸ$L_-},/k8)7OY ,M㸨'^$2.Alksu ͛0\+seqrX=(V"ǁ4ym<}J!LZ{@Tݨ0_L 7xBKBE@P8,t9L(R708 ׍ςDI^fP0iY81i=FI 1N&Ѧ)Ah&\]z2Y5rȕ"w_vPƒݳ &k#e33,7cV3 /Y4M DE` i5L5+{,"rqѯBp?b~h7oi:X (Kǂ .25yƑG^3@ MH)[ƚ` zOjfiU-65J+65"24OKV^`+k QV`0EkNn`=/@I%!'/Pg}%t ;Y WYǣlSvCXhh Vx(֕s&+@HQy![ #}nn*Nkk%sPt7,Uu`r 6e-SX;@zl=CwS_r|@ {<|7b0 qS0eH"g8g6?3bO\hhQ'TWQ^90Ļ(\/k'T[BPj!$A~G>*9$B$r 2̀yy9/LlV]!TH⺨2dģKBpS۶f2l1E%ec(^ 0zOpPuoP % )!vEx5fMژ7?@$ԃ9(3DiK}ZfNqbΐ,Ft},0SJ {0_jR) ~5-Ȏ:ԏ١u/ (|֊j3P -1]ǀϒrc'(/컝ܾ}7B{b,w4m}yafCй4W 'Q&0SPǟ>k̽eR GUą5N~3ƫT" ~}5?#m8< /e+=8ø<S%N9a==T+S щyQRQgㄏfSzSX-J+Hl!%RǕZ8|i! ^ 'p]a G@J)`-b@f2vpltGYR0.'G:osDM4Fo4Ԗ.RABԌmv iyE+$`U!릤f=m d 7s*‰+%a u ]d@4!RNmT*)tIֶ#.<<3a2<4LޔYZ_yxyVɍrwyxAЄ^K/a[Ҳz1.^qB?ՠ# ~a={c¦0O |d}

Lr ybh –ȟDO;)^5Gtp,.Ȏ]kr.(VÇTFN\yŸLi9V#%bX ijlh8{k]!Q%f@A'2"r0l[%[D?+>g7G /kaBf9Oi5+3gދ+M`xmDai@W!s fIb<*s Ztq jMZg4td0|b{TPD/TSIwj!,=P/$O y~UW҃V?ŏ 9Y7Q&AllIkj?ӎX;6";wAn zŭN,H(}Ϧ9?܎{uggs|Fmr)->¥L+`i'5SgW0 9x@E-8[Ϲ}k'U AvV0Ϝ{b~4.<żJ,_1e,Δ`[4ɎOÉtSҹ\ V!+׹ n-J4R6Jʆjk6d NWӴN&{cMrx7=B2irp_?!)[F4t'dl5tF)CL|ԒHglN*Ͻȕɝѧl n*tg[,үO^[wHlķz/n"=u%⮱>X2]8s#v3JZ㋬S$4wړPBT⹺@ o=z{)~DN)%wۢ2OWgv}~~4~L2a-pӓJ 1UTw+v7F)ZxGlKO#\jh‚:XSk[܎0}SLy~bWt.4e[* #(TۣJw%^BF'P8ݖ2UbH|R/`8a?ȭ@q%; ih[T}sUx?ᙰcTj`>D4zL]d'q>-;N hVϴ.ٲIRʫ/H-v "QtG_o+ MRlܦIVoe"2Y9Q< ^>‹9E\"Bϖ9Eyy%K;;ZCNf+UTPhdthTY?ca39+K1K;É󜺨b?z}4Ő'[ɋPpUh a0$nxTgM9b2US-, Ts(mQMzڗ E-W''N1Kl (! S^\HgcaHy~ DruG=F '-)@PlNe$ycfS7;}t?uOὙQa@x}=v{RxC؃mm{Iɝ\@`y\r$m"/C91~txtONՓ''f-~{/HT S>| \!F$8HkW?CeQ;RоqYK'1}y+|C@wT=z/+,S/&* [iUT&#,MJHIYbVeBhx(ӡ'LN% )"ΪvoSɁF)VS:r< 95 pt6>TG7O% 1e6 NEzbxdr7LF({P]s 9Fj<ʈJmKfX%@;\:{b~Œ~d"EhͰcFfISx+.vRN4?48zN 5-* v{Ho*%PK毭wY<ԇ>/ #I [^&h&H]uJI]EZq]x[1UA2Ean@{ZZ1s}]BCʁb#g'$u:Y~DZ~JaB* &7Jk>w'dyՋ4j}tH&d ,MO-zKZU9LĜfAɯu7ه%0}6+@bj)l&ޔrLB!z*O=0C%ը(/2VGY6??\ʢɜ_sY5F0( 6ǜ׍u,hףY1+]lG:ɖ6~aMͺ-[`6c\چ9L{\Ro*~C;ݬθ0> X+ۦ[:#CqBէP?'=hL:pXjMd쐄TMQ_PeGl`7jO9,.$p{Omolіg5?3}WPOMﻞ@ܙصR"A6(ƇOy/![ zDӊ,HÛ@D.d (5[BswuXd2J=iVPvFC-31Q{O_&8mE,OsfSæѠr,9QDпhM(ۛHM"dZ 7_U%RH#_K \iaah-柆pϑrϭ{V9htԩ{9{ٶո"1KXq#{x7 82Ml)-&$H(e RDOK0HVpݞjm`y{7s6wC_n}c AnNlwm">/ú %vmO)4lD9?7[dBr*\%:|oZ5؋tr#Ss)}!f}=S' T\ +Pb*g& q,F!G|iqFTsg\JNCikDJd}Iq%rm*PHC1jjZO_A9󟡞oaEG 6y1\8F\-[Kic5kj@:q9mR2M]c` Op?}ۧ@{QA9:ͪlwI!>n{k +`$Vt?r}JZ" tui#08Mךw$€n% ȫe+D.m`)(?HJjVZd7 ̔ަc|& kEKKK6taҝ:[GnJΑm׼TƓLi=?܏ig}F"CUZY5A|bLӥrNO{C*.b>@rGT_uΓK.qJRI^Ir/z qL@}N)UIE0u w%C=:gUܘw jԳS~WɺCzK+"3>A}y-tGH  - [UUì *"f|#gl.ҽ\'N4f~^`a:6Nlk܊st/2TPL[F 0U_?-)6?/?#ˠyX_-_邤~&b !M|1 `Z2s=[e|K"D. fYt[m֣%%."M:[]bD,NV29QNyUT Y2HI io3(;"f*0j,A`)QG,%>-Ttze,K<85&>NŊ^0V@x}^Es[ŐO"|&Ɗ8?.%4ѵڋazq*6-+JW{Qx(,F+OFwYewH@2eua}Ljz4葠&]f&lEk"tYaՁ-<10p:E[Tr Ƕ~^tZC7k:m?bО0rɆkU'~GU|,O[6fδ5#ڒeU=m>uUtPs!mm(Xu@+#n֤9ϝb{NJUz䍍NQ 9^D6Q.VreDZjjL]%dcV ժ@bj彺bE6#Lܩ᪝# m AwQzkDk#\m 0bRr&U15=mk`/E&T5 UX 2Lho2IwO j;^&ttqKP3cd }_ցVVR{&UJo2n+3~/tyѹ,ݝ E7v` ItWTz+躽 Cjjmi\asHG#x[!LؓÓ'#Vc^UFsM!z@(x߬V!^Dj+SMtuTR^ҝbջYq1)sjϸQ<DDpLuIspn7qbBxD{ҏ1=~ f9Z\'G^s7 6iE~{!bV&ՇlSqkF+(#`;C[Dl݈nqU]FwrENNw{qCE={֛4yqq9/;$Xhts,;>s; W.Ou?QN^㢞Dd \=n#E}?_K2rXH`2 QM"SZaTs6{D~eu ƽԶ Y-ݴ&- lKc^ Z`ӝO$džUJ9mQdk0l[ PQ*{,Ga\ f?6˰!I)ay@{T*RfeT|Q_&a4<4 ru_ 19iu-l7&piu>ac1nK$l/_LCؿLx.xkM (TE(9qwc8ǣljUU^\WZ"/jС.BI_ھ<~N]y?<~]!G{w!6鸰i_J/3?ٗ*Mi#Qydݴ]^T"sb,Q5 iW_ņ% تUtD:\w7\\`,_n \yA (ɅR|Bu;*cL+; u{My땹-s/$n:P$5!,Nk-@|ǖ7*k9cj^ϔ\j>X+OYƘuX vmYK[N=rYv9SWQt.gy/X)ȟEk6Z6*!jwrn %WZ2o3ꌕTXa):v< 8DO:29d[\>u^HUA6F2Et(U󞂦ȹ5YOų(wRMҜH%l/yjwK%^}^OpVUQZ$l5{~\]6$aGk$d[@;d@BLaݢn 2J46?oj%c,XF~h)l\{)u@]΢}j$ƎN WI ֝[čNsoJ?? 2x[ ֏@ hI:+h?;/|PZ [ +Or΋g'F(i;Dv+X<Ū`vW/l,M[G25V .)R|(Sqx xc6+=5×]'7~5ӜGQRe{uWhM>&ْ_Ho)u˲lcX sPmC~C7y,GJǐJllVS2_/0f"Ns8cA6|mLdǒ>QVAEA[ JKo#A`Tcun:qJ,l- g6d*Ts< $!ŸK?)\,ZTK42#y:2ƣJƒ[ Z\l8&Ѧ}A> L AZ4Ux3=vARog PK&q>Q.V^.Űantlr3/streams.py}kSIW@ Dl؍g0txlI*ARƚU!=;{U 2O,Lf᧝'uv_O| ߫t΢Buf]uQ06]e85Ua% (GyEӲD\yDif%AV4[=u*gZdΣE4 DO&̖QQs/~) LQrfi2S@2,^ ^JY:e^LE4>IZz ʁh1ij>B0,`92:9h%`x he^a!RxePYĹ%9u'30!KXMFlT<̀')@^ExE.HPjOyy(nqAGT g"3Bɐ9f ~7ex:8Uu~Hޞ:>?_ލ㱺wgCPF͆qO Oޟ'bΆoh6z7uFN~/_φ_i799VGz~b N㓳)}x/||v9~}6`0hp2A7qLzjnp2_ǣ_{0{urq>{hcJ "v0p~2P?]\"ax0ex2TgH7xhOip8!hx>FM] y O8aZW bԇs(hx2qU~:48? a8taucl0aa4q\ ~cN ߨ_47t'? ᙫaYQ-WiV;gư9ᅒg~$Ey{?\"siGބi>z14?v@|Mvͣ","Is % g:ܯۼ0g9 ӈC~ jg,ym$vhPr'~gEiEBHYdQ)zW qyr4t,roA2j24 HP(g@7#bXJ] gV"Qw٤rP&<2;dAs:O lv=;^=9Oa+2 \QT0YܢY~B"W< <@@B %J\ѡ;}F8]uqrE9(x: ? &xќ设ΐq[z-JgIܸuaÞE@E&dC5 2K *m2"bCSb3d7!~C֙B­G@I2t:-,lU|@ Ңo)iݮf3c+1|4y9r(EQ~?KchHoT؟G y6) *m[Q1(skwTz9Y|pj_i7@FN̰dBK$BCڱ uZ4n9 >$ ,.x$l%PVd1C_eLRmE"{FЫKFV6l< iS] m:@aX$K ǧ5E`-'@d#Dp.0A>a e^'b`R dr rAvE8 Ve,g0QsT 2YppR b.4jZ"K%յ}IP2M?&EfZ1OI(`shU0Ii }4JWØbai d'QuM?\6Nd4Tyu9UzD)jV4 [n30d#-5aӜu;Xs $eopiz C 9̿ƃ$gځ.nlƐ(,wBpz`,Pe>ͣ\iW]r,0X͛ea-quVi"/z؅?/Ģ$xV'6ff 9< (M:lktvp?q+^%Yd9 ?dY۳#{ڱ/*P$|%l&0@ *WL[@R!Ggd,C@]~|FG%R Lqa(5]gM]}s/l(@X"#Pk$Λ :Qd245Hĵ48Y6/s0C@Ί<($W7o.@,31xW`ˉe *? ؓhIr'K<..*0Ћ\x4ODm qD}ZA iyuo?e#RbyX:  F >+P0Km G`ЪS#HAq$vD EKƃfHqxb66AgA:1ksg\ozHQf02GhHE(scc`mL ;%B~prbcd] mp#i)' ab߷#=&NUDhrc9U63$DpkU`-֨ {>ܴjwV+0jG:\'*?s1 <|ug-W&' 8+txo{ p(X%1UR,(+Pذ"? ]X`D~'sDvN?VDD=JTrz)JSt~׃&a`Ф*ihBkȎ9wAG~؀!ȍ[>m|O2_qbR΢PEU^=U14Ҿ$h<{ca/j\rn}.ѱ&o!f-t2<Yo`SI.(+%1E媖)"a܁'_` G}1\`q39nY)Ek8T _!s,ԦDt'VMm gБTEpzlĢd8zi8]qm~N^]115<^M̔m*>9u,UqF5-V}䖎m Ƶl9t]YM{])S[McK(w{ B ^ڧ{dʁt G9?kEn{~;:=C9n{Lę >\rBAL^COa3uu+s V ךcODI7F[՛2:N.²+rQ^H=3Fs#q#^ĥc| B_,1^뢱K SW ٵMk *%5L5Ґ'սfǑo{8 e\+>mχ7jĥmBYNU]i͇JlIya劥. qKY%'HXu0)u&<9z+-tA-r'yI`M+;R p* ljNt`c*L9G>nz2)?@RzH(emMBW/MAuz ^H" EYlu!g(:ppRrT#\8J*왱2tLb +:zǵ] _1P(fl(iƷGmJ…H H| >tyx!=8E#Ž%]dvp+c-F- T&akN_9q q_qe\@z@[^8MZ &kt* $ $dX&:: ,˝K.tN\k[#0HؓssdgC .Y9@X+hP-^RqblMN^ VyBs:kQ]byCݬvк[Q+rlkxh5M3p9j%iܛ:v1DԺ 6:PWH9瞤ҙE˹{#|s.@СcpSj^ 9Y: }BwG\"%pt-umYOڡ&YY)+cդi#'7baKmϬh~t?ZtG&֧9apݐ.bt^M'}6ց5")#gf`Ҥ޿ Ǒk:Vt'B;^ʊ^xwSh6hB\ݻ9\6;S#ך4oT)f(rK(ǜ&x(C}7DLY-UIy*Aѷs$gG|\A~· #fb/Ä Ҟ2iXlncVewx C \9 _·f0'K>;Qyͅ#TMR:96<. 9<7NvK@t}GMKfd^%Do=iJL Jە&8*ƨrɉc!$(NbQ(g¯"s(&8e7w-3sT}xY>i1Ά}4^AxxJzb'pfDrשj#[AoQ)lMmO0BK,sšr`anc4qZ6{$ aQ [~^Wdd:. q(~?:mPY*.Ծs : VͼYi=^uC\St vRuܿDtˌW;%1':ib0ĎODښ? -dX}A><ĜMtpd/MŋBH "Rf[PCQם6~l, `v`T+:6!oLŲhMW`I*rJM:mYNjLmMskD cπnlB&17p[C،wQ~6e(܎2v΢g/Őf9 4yXThӡr}E׈TԪQ,ǡiG.v9̳ )̈́D<1JV }<&5 #*B{vuL-gӿ'ȨķPgD3t%%,/i//7,|1O?=}SOUksPYꊨu(#%_cՃ}1Cfk]:U~7oZYO#t=n?3O k˪0;|diÄRͨ^''k\t_,٨5IZݣƒ4άZQXh}-58A/6_fV/Y»guf_m$?C@2 ]zaVX4B|$]E].ҽ,Z4_0?9(븜Wq%Ѫzj#_~O\-0U.70WpFGUq(^S},׍I/8/T9K0Ν2T[ r1^-Czy;WT)ɅلzEp w% gv%X!tV:ın!2'ں51wua otE{ C;@{ݡ5ݖIi`-ٵ>$b{YeUu5~z 3 B8JEs@D|F30T2D`"/,yJ\rO{,ԭ@m@n#'12Y /`ʢ+}>0r7}!/"ʰIuiy,0;ki2}7yֵWG:@"G[k{Z]z;Ρ# m.=JgĤ =ѡVQM>u Moڗo1?2*cpV_ʌ&dYMEĶ~+mD- ̿(;`lPSMH_z7/u^>U;v, ޏ0 r{44J#0F6]{zL[rfHƄEVې DeD٦#ʯU_CѷC"u^ jfd_rQ6Xt'ZX>%a4 '.N` ?Un{>nWDvyWHF'W ;xLjwFo/Ϗ`rrtnFj_Cp 7t4OxNL 9P%OruLv:ͱF?;Dupe`0h8\}]ɦu$0#KOe0[ $hh"z]}|mA>ȮN^iULdƱ:16%.nΗ(PըQ8]<`<9"imFI}Lp V{gM\`(ոqţ|Y$Pڳ;7$곏LayLj9ȍ"*vgm 2:On|w Z_`:jjсʦ J:Xl2ţST^"Ū}v/Fݖ?Dլi&aa~YD0:h~-&-Ƙ?lYφ:Q = -h{ŭ5aTЗw&y ~0/B ەBRHvs U'GOZקetesa,9fbcP;X.[n,[R5xu;~rvKdBMcV ȟoj6;"0@0ѫs4\-jVDkǀǓW* |V#c }jt&VUb<N\˃7Ρ&l5XU[z;5S7/xrtCʡljxd+^b i2G~;TkBY`~;B|] d[fY0y%lwTQӲـv} 9UT8cZ27ѰQԂNEZK1Fn%iU0#P]:ސ#vbt(,n&P]am,g@:iԸV^dk5GU[p<{V.tӣ:H#W*%ZU~)P=Bk[)ͮOW smmAYWSYsECnuީď+UMje;\W5QC,ؖ39U) `_:,i ?%bOH7(]SÐ9klh鼛xDYo1Y_y;ia? bg4qPm$PKi}oIv=:϶G!u FM}XdQ}L'.U*gJ imbةGG?OeS3Rӧ>Q],etEI n[U :$L:_tB!FY?r!l{mlf{]5/1 "WVn=0n^젰iWHzOZWzu ݑd7a`y r+P8Ku``A"%1m5bчqa_fDCc5k@rXKx-p}ig=$-sE6 ٚFrFBv]KYz1 yXg<ݚi1<FemoYD²K_üЛX RA8ma,fQ/-;˜4G(>G "'ٷ0cϨHPa|>jMȣGڐ* L1|A$BsaܜMx0r__rPUg˞ΉeZVdOG ej9{@N\=5uf*[?Qk-V~ݢ~=1,4`VA:q Sa7:8bz\H:5y .antlr3/tokens.pykoFs+ZFM[ppDD'Rqp)reBq%eofv)-Pyf퇽wỵ}t+f 6X ~gArס#*p/B.#2`(IEJNj29DyB@C! 8#fi9(L&$#B@PfiYJޥ >Ө?dO[et@$tl&S|WrR1E2J8- ā\)'ˊ&VgiYPv $g)beL h5DƋ˨h fQ)TeJl)k A}_ׅk!1.ˡ0"eCe~77BBo7!9 \p^3s?sƅKwQhx9StT{C.^ϖCvܳ1D]uBb<1j99\zp8 A+T.r'`xMIl.<\PpRpwB )!Iޛw<+?Zh? Iő' gsp|bހsa/*7Dy1R zސ;wo^&0G|s9 |21LXΧ$V9i&*p4 _O973[QĠy#t) rXB `(Me)\ Lk,#`h4a.U"d_l "jza0[>;jPwBVt'(bN>smmmgb6F~H4D"^ s2;[:<(vMYo1RW72MUE)2#/j0Ygs̫߳_p46+Hf<ؙ0VA3 hDcUcCe "{!D(KJiBh 4a$ ( 1u9J+0\"on<-on}e{ь(fL f0g972KXպdخZ+zzu#ΟX3xP\ZP}jKUd݊y-mnA& ; B ՚)`bqR:hnym켰 *+ۊcdidyFCo돹!/ٴ2e |v~Qӑ&'e._+xq#ŒWjQs2h'97@}4e]1*duush;¨\ T2G!KR" .[u3vn|ag5 sEHpbTTzo=>X f-JhZN-OI)mkӖKƀvǞ6knb>`b;"A5t^3w.+56JOԲ9`ў؜n)7ӁWɡV!$f aEaS5Mju*ȕ84vDr<߶,ҶKt'oUV[҂ݬh\r8uZ(9=J^)QQڄ#8Q:9Yge˙*9Qqvi>~GfXئމ}jؘ܋+;<9oۿiDo@(R0.*;/T*.eRundS,=L,atZPrm"LrcڲȔ2XgWzH)]*6~ʡ]zGO}Vbq(L ye(/JCȸ1YJL>antlr3/tree.py}kSW˯聓B 'un \8 x|nMw w{`3L,{^{^ɟ6Βh7ʲ?]Uy6INoϿIE,7{ bml oZ$hҼt:M|^VM2uɤ!T7beyZYU7667776:/OQVlpqϛ{rXΗU~}$Q7zwO2*|'9K ^;)kuRe0]6S8 W =E%yeU^0{}$\4 %i ʪY48W]>M2f:-1αQ `Yּ޼ꤜ 1X ›L;|$>(n^̘ ~gB0&llwIYPӀ50/2bz Q*MV6 ~Eʾ'ME 3y/`7%fU&Y1.=qˡYd /eR0. T2ĺ4#I=F"2Gԩ9 FYAgß/ |>;?ypt|+<8Q㟏OOo+scVt48?>uqЀ%gLJp0_{?.%x;"ˇ|/Ϗ`?\ q68>ypx|=EPI./qZG: ˋhp2<>?<NOO``yz ?>=;FX{/?>Xr1<`9̬599c|~28 . ;^qO`f]2x< Oxj8@ilEӌ粸Q({{ WG9OQ>b|9͋5jW^ml3r*(ʥFW|/\_N`Ao~>x;88<';ͫlT^~Rٹw5—9aY:MEsA8/5mVw٬,LǙ64Bb8C\O8V?o wԻnեD;)q?~]iwY:>Cml0pMfW9g踪ʪ +, s@YT}x9x4I{$F&9Si2¾kwdfH7}լ4+~8]UȱeUwzu|M"o>~tSei!n7H 3y545Ni[6ͫ0 䉢v>cV:?rQot*;CzI(~8DI:>?a`l!ypHG 23ha" =-۷ sM Xf*\9ޓ{Hc6)/*tciIk`T{&afDwBE5{'߅.z|yƔC1^LRk؆{Q "aǗ$o1A/8| SeD GLKn\npt9-^JVEz_kQ'>.\,&96; Im—氄(gcǑ%rYIG FqgKlWVθ f~pagr3%&9pPP"2K M A`ȹ U2ɫr# 3@H̀ N0P&oQs)<ʲhl{9Y[zޫ3@q¹~ydq_pl=j|I㋬t oLG hM?ny@ $PU,SBQL۵<+9A$ߚ7t[ = g]`.{UЗ|J~>[@q$k6&dDc ТeTl|ߺ+!EQ.RC^b8f7bj=)[YH-(cAtIp;KSZ5ـEltԔC&)EKd=J@  x4!0yJ. cu(Kwh^CM yx MRb3( ԬjCoGh1YUc~wOM?Y˰anQ:At kԌ W$m&|7C䔥,&}qo.- @3~*<*Y$#MqqGx(mi*=ahFþS g7FL!Oo4묬 w;uz?MdZ `'T(4ط+ϹmͮՅb>%l43`|ԟ2cMxmSh]6]EMoY3_Y:y^fͥ UuYM9,3@Nʤ1lN;ܸ;C%f/4'벻tԢB/DX衵!~S"!J2wI7F%j]ap$;1QcjhC5’^܈^ή)|.=lQu "y}:<:QKz^>KC*d" *rFG+]J;\.YH8mQ3r d1@ C΀Nnt)2:ok8򧮂'm@.W@78+2GQ]d'6b:r̒%f]2Cӥ}f.N:L #LQrD}7@~:QF>UBvc~'bm (p at3lc_'&)5o8q顉łYxiz+ZR䳇 .ahۜN_*V|Af&%G E1N+k,JChΖH>|o%I5h3,)|}ϛ^(?ʸGqD܊n%9墙)zo3Smk{0ts³E`Q Ifg!;єަW dFH6H >=RZ|g89OУy[7Q ;!zcxFzmr8r^ٵ'Ә"X b! WI9 0=1{Xx{Uh/'v@0ŶKYmI^+A9/H1' z+KdVu]`+bZ$fHOFOR)|՞^R{9Ņ2z~[[emlmoKB)l]g(;^/p} 9FM=w,pNl8VׁoNwm&T2-n8>Wh|uZ!E7h[g/]v0*rrϸ㽄[I<K+ eoq<юrϚ8OL-63AziU%߀# Sڹ?~J?䎒 x3uO?3+z+)E_Zm?U34^u~Ѐvb )wsE=x:jl]ddqڤ}|z5Ltji<Ψziί×7&oлNicoޔȁ :qSNOgȧV<~p'De[tҡ$'hAFcT˷Xj_v\{8pΏ3+"1@IIܨr䪧T99;a҄:h%IҢ(QjTlzK׍:G i$и7>h?9=;L+3zj4f컕0Duir"֋'$-쏁 cO,ɚÅ6%1EdJTVy}۲A[ɛ;~ ruUi.%}Z7Jk'SӅo.tĸwYR -ψMzzvez"f]Ec>1 &X3ak; cͪbLOKy5NOBfUo\9҉@W q v-#pgɺe/osBw]xțM6]nN:#Aw^ }[5KMnVA?aJV OXG 6_%m73F\؀ѳv؛ѹ=+6Df-+u\^H8et?n/Ǐ%{Ŵf mOKa5UrO[ƗN-PNɣ)]O QTov՜{,i9z5x.qűg8_29*Urr3pT<ݩv" H0=ݮ t$ SFI԰}$:Fّd:}{- $CrT'T:S-o?ZFϖI'L''& tV%L_1 %GU;:I[J~3ˆweڷJƳo'c$ՏqάM8YH%eB9'71viFli/XKԐ3{"Sm.bl:m^%X4rׂ*X 1Ny:xTA- 0:([T /f|mʞX[m9s"I+ȶԺ!C:M3*^c]%X2DnfUKKA[VL\ -h\WI4e=kH,Pqe{5;->-I^,< :RڅZmS6݄"uA+7WnQR%)X{o8ߌ.-;DX'De5N5w$Dbn.9ܷL0g[}{bگ|'M$r=}oyɟ (Tk[3JFCm+VQ,s^ 4krZmwyg1t>&f[C[`rX.TIdJ*-rg3'q jp4Q%)$_RBW(&LfXP^z7Ii% K !64KS U6kR&AsDBuf i.5u\ ˄Vhx))>$Hu@Pbgq_;*'NYJQ:6Ԛ!V. R#FȌ`AXգF1k㍖o' ?UaLź,uISȿ /I)Lc~$3, z#!-ы YEBwkaGkǩѷE`ʦɺ̶X ++<4{̙sg#Q>^) uWrH)êeŠF`]I#+jaT7}_sOk~50{jޗ߼Y{K$v+Un~*ՙ42˨YchVMhMdkI?YTԍX'`svQeGqOH(ٰ̆%u{61l(DY:5kT{mu7tm]ӹc}%BLCLk)Am[l'1B4b`GFaS7ee*9W3~B7+UX8FrSv^i/2h- s=qpt. TX,a} d,u4 9ELo1K&lr,)&[ǔxdl̀EfɷeXt|ԓGaR<9Ɣ![:r]v;hxHwCX:s@̼DSďUDXDw?8o{ŝW6QHLi5[Oȁ"d$P:4dq" \۫")6|¥\hktu=q% ŭ >E.Hk/w0~ʢ0!]Ism.~s20nx Q졟5LPaܻ\tdoI*pd7A("KՖ) UŒ]kE(*Kn|4\{ߵKh)%; +1r/={㰤}zlf*hMDX)j9IBs,)T:@4ZrJʋa_++_kXa\ Am5!tT՟@96SM\dN vsPq|۽md-4*^aӕ!}D[K6\c{٭Ccz_.?r xWO)3/c ֱddn![!v= HGns㰖$fGeY_6W$Rm?93,$/N*^♙,Th6Vo$/!Ӽϟk"Ztfu-HsU?ZB ?vhSg5=& nuz/z|vOYã"h8!1flТb!RfVAd{&xfRKmVͱE^zK%JTR0mOꧣl}otnnٷӃu+` XG{~P[?c)% 0_Km &FV~[9w,ZwAvh>I \DeG eA"w|Epn"ufゝEs_Zq\Fd1WLg'ZXr(+ 3n[>ab/slD HMBl_E˨"xH;Ok) q]<|Ӕ ]R/*"?DʲŴammb Kc1=+k%T(obP"xqT2>Fv{Qjr@Q:*R+aU:ouHl v|(#Dg(&s|orM+ּJ#)~,t?J=V5;Nϔb -2]aB1mJ7 6^ɁFiT171o7͹0*CPJ* [a},r2%'-~S-PhGJaո)"r@Ez_mţܿHKv֚e9X4سK5&DMVhDᏸgY]{W8Gvo,~;k]-g|G}4)Q0u s̸CLRXܸ Ke:R⻬?vGI9C[[ *;C; y:Jǥ1w0璯a<&mzM9v>7`#UrZ-nN9FYppc2H: $WKwF~%9sآ|1)mSZNDb*˕&ی}XM*RDXꒂ'gJ;UpYWex6~rOTu~i Ж~HL 4sQ]tܯ2_=8UQzu? ķEP,-F+39NrḶʆR;_&1*M=rnO=ifANmoc|5<ʧl";Gݣ <O8Z\&b ~),[ʷO;4F zyCOtFRhJH \-fsX/zZLj:}hܡdNΗXJN }6aۼ(F͒b>֗O>(BR *,'RH)M\f++]G*lۼ5XRz^4x#^IDtG«J@uqw^,ȥ)Øo#hWN6g W"eQaѐ~swСtԔ0MzR(kL(R\*K7C,D[L9^+Q. E P;$SQ7Og]B!.Y, RoC nC]NI:2Wy"]tj;]E_a2탆Yڳ{Xr}cw<"x0t8 v:_7/('ØO6;S1x5Asj!lk-Š9шjE^„w&IMd>HLi diP WʬD7Uek|/7YZZ7]мRT+bPT:u^O޼4RG:8TI s<>6dW3)J~8<*$#L KQt=On_01 !4 cM+N&o -"Eex,Rgy?ق Չλ6ސT̉ 4givdD^qWy,[řIfOAOx^:n;1ݶ$z&d[z,[Ʋwr2(@樽w*w.ȵxQ ygTJRSuޥrw"i'40 q."Ј&Q)`@ނ3fh>()&:(.^;Qz%\pD,x58IE3 cu7}_GB9>e{Lv@w 8N'7,oxC\vMr׭e9G3Ixc $xh=)q^Dr ܦT`(^q`ںXM6뗋?jӢcSbqwB5S -a5P Φޱ8qܝbd?{pVw?ȔO1tM b $Z > VP^)2& gU҆:/LRxH;n.pI-Vn+s^|+z4I 7epGQU N&˳P%<Ń#GSEFdgC:>m(NC*܁+O$h}f"m#\=+≊Hv2q֞izljHHg2<47 Gh0Hgc mmUvmpfXB[B4Ke"mD!x5YP>k^cذ]apU/j "0SZvhCNmlz}h}p-;Fd@٨. Ys 2OUqB巧@NۄMe,*yu'*"ޫYj؅r3 {y]) 6*#kn3)$̗nėUÅxn!˛1,'yBW(e]5,Bi]͟0 q!%']GjQD.啯ԑ{oZpZj.}=g>f22I GJi{3O=%>p1ݕqF>Ĝ?A:8[{HǸstk`TD^f e3&SK~3tVhLÏn-oӮ.Y[ODŽG1d4qprA/LOfY GuA)ޢ $ӤQK5Rwo*XF-2D2;EMvWR쩼LMY;fvIW򐩠!7&NYP`Spӣ 頷:˥Wpk)ni"]h*7E B~N@zRM NȊ{b͔0b^ⱪE$XNElImfҳ驵'~|4G@ݞk"z5'B??1؆0sMk}(XS ՕsP"ݕ#U|ĎX qR%E5/-;O[ 'JL)3fO*: 0#]bR0CN˾uQvh=8(-;]Ӽs,vEj4;¬%XIK>dХ&Vpn5}@3X̙;¹jlbepмJ7 MD-ΣjE<.ө' f1#@@~m6T/âtgjkM?ݕn2:?5fC4Z<_C߽|sVaH|%xK@d}C˚ P:*õ}Ql@LVsHbm %4eS6U<ޛ*U~:yn~mq,>!RI+W5VRKn}`T Rf:%KnsyYG sTh|7mTvDW߀ ɞEeAuUŕu3I}R\+MEVO ssbHGa9{!c 7oehMlEӿ&tBTx[- NR:\Ϙƒk*Äk5{GN1k!.}n'#KN@%HJ"X.x˳:!*3ьJ4VXb Q!cA'wܝ0! YO F>]4Oze؟ԦMyTtly?>ƈwnVYP97 g@( q\,5GL r.jZoV>e.4iUW'zt\KP ~V*$ikJrBᥘ"W;F*NIJ @An]3S6d~Q W:`yle.4oI&Hyt& ӒOeUCTҤ}71*NJnbRD|/ѡrhf"v|HORh@)>m۴Jl95J_@N~-{ԴHcs8=eÒhyd ~}-6Ƒrô !kd(,kI|"y i CSv:'Gc vu,;]6`CzÄdġ 3s+y{](HL br7v0WFd dX~Rg9R7ʿa2{,3 AN0˥/)):7exKqi\]8V(`>bQxeCA r6\t.fc#΀XڢcK K-I/X#Lht^Bu48G`U0T4[9MzID͍Tt ޶cM#13AԵ6xF-+%( q:Q[t䯋l lMv I`lb u3ߑj}O<(] X/F/z8U$ppiR`qS$lA'`ί1]yǫoI<ְg ǍW)&BJFe1w\<OnI0Sx/(i!/;klbtG@{e@עH]%ثR!&8yRyJ&+=q#3tT٣D2ɢP A9i)A4>5ܬ-&W axmODֽNMU,R0P4^uN&"qZ۲SԑQNs)P츔 d/D#>TWi)0Jϫ)52aNi"2ƹE3:)2𾵗E##?`#rکZ?9c3]_W!M ߈ m5uҎmId 6J_WNɖi@EIA! =`U#ʳ,v:LrN&wNByJ{%<@JЈ[P"CDp؟ O+Q AyJ"εLjꊮ[Kbݠ 8+*%-G"+=ᚖo1H`MvZ]͋Z#GrwOԉP}ޞ*[YD-I{QeUQ`j%#M1 \˾ eGőq7?Gantlr3/treewizard.py;wȑ?< Ǟl67 bp@wӆh-$VL._UuKxwsܼ]K]UM^g]~ġޅ?0cw)l3Y_x8c`xV$v=7ި',4<;q"M6"WGC;w~>37Zy|?|tSsM|A|QXw0/KsxK_Gg'źWh^78'fp9&6r`"2b[6UɕÆ# 0g&ekt.qtO|=Omg5]v;vj˫h}{t B+>ZCMλщtOspP@@ɠ&Vj7N_dE̚>* $ݻ[H<:8sXl4`{l0BQdN6$ @&6:x|uأaA@gI!2LFOeAns Q|3P&9:Y+Zg,|?BGĚ;]@Ya+~e|ڭFܹ6?HIKB?f }5~jˈUFC:-ޞA\qUw! 5 T|N30cvPa_jC1 FZO GCu\I'1Q=ƷHxvJN+ЌќԞ @ ECNXUV N z.~lh(+1BRX邇&Pc8W_l}+Haڞ4ZH|hơ [8#JK?f`z-`6kϐ~ƍB&vDxi+deZV]+v.0eI5Kai֫QCۅͫH ѫS݅jw!i=v;vqH*,~6>W0P`2]HSP X@~yI_8.3B@a8G?}*̯YN ?ۖ%uÍ*r%sIҳ %q}62-rAgD*B oէz*$4ԝ<CHSCQc BV(o rƒɕF "UrS? YPe (l~KD[+.ttAKF@[)B#"vژZ. &;QL~Cz&NB[uI a+%Sr/pk  L$h$ VyǥGIJ*WP])bةf>}ʨ ^)# Ytv 5j*Y}. 8z֦ioznP<%PKe[퐈j~W9~}hrͺ,ϘK%#z6 <cu&BL'̛ 9uj)PEyEUKb ;X5GC$ ν`3][+T0Xݵ}$ō\NEhi\<BǏW.y}Wa|S?^XXp=KH^rVV`zŅ^keQB62Pt U=%ƛ{/xmqavU ecYh|԰1[zca%;8ѝ%H*tڍ*lվXv9$7 $X?GѽVJ`K0v?XT5c@~7MVÒEo2x5idBC dN#:ol)O@&'"jlxyȧCҬwݑZA] U !N/ Im(\Nw2A G(Vsu>vc wxρC7H"EciPJdi rJl:1`mCaIj5 4鵒c]?sVQ*_v F'Rċ`u jY#rY:G kPJiXqg%%Ji`C`[@|EQ4Q91)E8bg8w)wT#n/C|^",5=7b[]ې[UǗ*.-+' Ub@CWGi+3ChnWeoGs9?(ؐ*U̳gXz+aQS.E }c6agH^9!_.1xB^bƏQ.2jRt:a=HI?asR|dL}FK[>~3\My]s`aAwn8&)tb D{y9{r8Y ~>_H9"ۮ"1O,C%x): ZB=D:vj8xTu4+=:>QhSg+qE5d! JrC5zhJRxR%8 ĤxݦiE(W1r_1h0 Zwy) `{1S/26&֢޴OJXPQ. v  5׊uk::]/濕b:ʲ[Cmh/>T?]]SuZI`PhN .}' r 4\7k-:`-ŸJQS'=@Vp 55M[1R \7sx2!Кf?U9Ј47d¥ND/i,< `Z$J\^jyj I^6[PvGڬ ԴJQzּErJ.`GBgЧǮ-KgVfU4pI#ɻP=PꦈtOC$$)RjFFT=( o3夝i-(KmG,]J :0Lq%~* W e-V-o3l,šc5D7U;63|*:n4^nt|,mWuf8ٱ/,$zn\_8En0SKŠ0#>djMk!}<]a=/θ7h&ԤX{lS]G{aH!1e,{'6Tգ\ \g4 xb 6D~HEnWSvBt"DDUt0V 0˩LZpFRT$iRg3<.'2-#7ڕT!ٔ:Xr 3 n]l譤Hf0x.+pi}qeɯ- 'KZ5 2@ۥ\TKC-] Jʢ>zWD HsA+*j ŊJH*T8gSԭԼ͠&ȝXGRYʼAWV* %]%N%.b2 ZU}4Z[?>_rKO44hv'lx7!w(WrbԂi,ƣ^KKvDP,dvt%اFs/߭ק2'Ta lfsfK(Ō  gfBQ5eBiR řbHv72 {SW;Fw@yT;90vFh:GwcH ,|8<p "S* GCus0ZzKjxhPʈŊ0Zy@%X ɛtܠ$S]!Q7Bh<$xxリiI]$Ygw4W(hHt +0"qvImšlAÃ"=Ҧh]%p|^L)G=/!):c#Tks S)="ޘbJAcY;Vh PhpM×mND61Fa9u".H(SᎊLV?'GCs4m,@ϥ} y5xۘglu'DO|\n&WC~3lv shm?(^ u%)j]7m.R:FWHDK8% ر_7%`#t(+ƎF PK>7antlr3/__init__.pycWQo6v4m^ C!Q(`u)͂$tH Rlߑ,'ڴQhSLȚ&)Έƒ)3S&STK2Lj%Ƒ8+Hb\֚fAPM9PڿdMI)IhR:8Kj(L槻4uQ$Ŀa‡R'VڑdDffD8m'i\],k)tLK-K/MO=U`jR J" MSp@<; ÌZol-Չ!*-JU+IᖕdN!PRq ׈{cp`Jln!%GKv?Ne"ٙ//)PDą{f[s k]m1}Ψ}i3D.L[w^Zg~ۛ/+adrpX z[ oZK (c<}65|RGF.}e/`SHۍt6|N&>799|]ŐXe  ?y Ѻ$Jun=aEd`wAgV16 nQ^{q=cփ3eUt *{?Rxn5:GB?&LjS< ڔiqxqn2eYJ}rN"?M4:>+\)~3+K{19&x@Ô7YLV&t^ 5 x 3S0*g!V:g9]hu7$_ x yԽv7]G?@I㦅foV FӢا6@F;estq_?5)kXcYㅺJ:uMiE:+=Y1^"lO17.V$;\|ۑɸr-qn;ƫj̧ IJg֖[w<{ߜ/690PgGߜ>1ed4r1h%>l*r;j'{0w2b>._.Xm]Nz<|?ʺfNHLFZGCv?lPK>!T)antlr3/compat.pycP=O0='iR"@Ub BmPŀR@veح NRjL0+8~';a~O1/1e@"@DYYp[lA P -Myy8;YN/FFW%antlr3/constants.pycUJ0O?΁xov*L2VlNJ]:f5en)w|_@O _|~$ab B0a3AX,60DXF #Z^2kp\ {Q莾IJpBˢ_f(T(]_oDg AڤAlbePK>/ **.بantlr3/debug.pyc}{ly"kW+rKûWR"奭KʛUa^ޙ3sEіڴrmNMZ' ܢ#@_n`n 4i:("@QpQ 93s%%k(u|;|fSMK7s:dm[59/[%]vźVUZjYƬިXV{º6a'kVf]ѳ啭͒Yۖ) ߝ0wXMݚ;mS7w-wsw-G#{T{=f'?U:M+N{QrNm/XK"$x5V'~,hz]$c 8B a-N7 Um몺K^גϚ^B]KZ X\=C PPdtZ*ܧ"U>n-lԬ* A.F,fJJ{*]߬V Jtl%릅[~t2f}̔C6q'h͖#8qָ{]/Oml)0dmKK~x͢}J7L݉x ЏxˉZ1Yty:4z~=pn<{#/o^u vY_O$Zj2T_ %4a5!j#RQ]f+.7Ac pɔKLx>;P4u/(kؘ]4Խx7hT`8lD!*Lx2 }\%FyDtHI Mt.T`%kEg3胕N .S`+f|&|l;{H> '?JˇJGK3Cĩ>dP/o'Y#|}OĶ-q>9e66>'OZ)>9 ˝ܿDh]3QxcWɤz[DOcs?-W%~"vT6a +P>eV! ̭*ivUr7"z|ZV5faF(#>Mᵟ*HMxF{[s+6+a\BCm2Ѥ /4Q|f[9fQ^mbv,{ƔrA87G #"بGz?1Ţʆ䨎[T`9V܌DXQ*at".uw08ur!Vm>O*Dz{$}@cfUWS0WJm![IžcOKdcld[&j20EyKBLHÅ_HYg~c  gUH -2ٷ(D<0zوd-"@Xg55DֻXaE2;/ęW2;1CI, oQm/jxmcS3JkZ`{.^Q)*>#:>#5yґ:>#3:~qmKK}FGGt:ۈ@!t[M!ԫ*+[C۱cD{3AEJ{~I)7M8hͶ#.}fb#m~*qX hP;AY@#a5Fsޏ@B9qlA/Cwgw lT&ɥ-FQ0F[ţ#zv{cUqq$tg8p\*ss p{"o^>sϽ,qiH`v>_΄)}ݻ)ʼn|D9a^E~K {']a]o&`^rk/^\_sI5آ6q.Iń(Azƹmq4Lq49]ۜ~wbv'^,w{.UKvC[NݶyZ \ bLSejdk:Hьv=߼(p+EH "%$&#y"ۻa{# j7:Z?zr&5roT4 >@E **$ڤF#|G4 -p==M 4syҚ)e;-l6{ O=1Sh? ngkn aʙy?S o lL* AN:@Մsb!ahמ%63gǽph^>znxiC tiJV3;Xz 4_FhԞ ҷ ­CL=;O5bn9oQ'] "b?r4Z]j]I#.E.SpD:SaOS"hMm'%ITY M T)g6T gs[RG{)d$ T:ͷ@ohZR~MYӊ4(8݃H@-=Qۊs$-IĀEAl{<,kZ[*[ob;;2;`#g{agi1E&TZOTU'Jea֐/h9v'evF4 ɜ6$-[ڤ bJ-/I%s.eAľȢ:qv;^$Ƣ3P b,ƻ^3n8h0E 3u9YڈsCnŹA l*xHY+d'y & 1ľ/4 ՈJ%$Zsaj'k `{@lPߨkOph[-|6ϧaFؖiU,&Tpd~ 3xJ Ǫ~Dmn"Ș $tltqўE^7MtC 9C$;;B(Bj3jF*#oDYi@bѡ[o9 G ~QW|%ca?{}nQSClSq}Ly`y "ZB:yaG@{gIBFWX`F֟J0mEllnEDk1l#dhjKΚqm*e`BN8)[dvK|'Fl\c$#L3 [;[^@6b?Ͼ@^0Cf )IeΥjVO-IL3O9RuVovJxo <9a|d ҷLq +,L!xgh16UɮFmmE M]@ڮqOW+,J 2GzL=̖\'6\m&Mn*!: Q}V+Yu#2BQJዬ^q%'0[]`VeH`"Q!HX9uuR]-1V@" E#52Gh YA<Y!>ϬNB=3[{(kؠ&'v=6CXaȬ<1 f~lC*U)R6 '޺z*FRٲ!1US/DoL>Vnz r[ G:5D-\%O2'-E_`J\[v\[BxwFXfDL˳j=K\ѱqEc~>,>Q<0F20n]ҽ1͵2:5v* &zR'Ĝb!}#&ҌD:ڙ"s^XqXX Ztu&.~=~W A,+^GNGwNv1~SQf1wUօnŝ~"}N%5nJYVYkÊ` %`cNaBj87325H%Q_)(t)'5pb;}hBs<}^`6%GG) ~àwNmMgӲ%Y> 4^6GxE|r)H9&I-Jݵ]x ZH':sqS#Yv;n_afXF.~fI,ŧb %;?(ڡGAUlaVq *d /Yc/vxY"x3x%;84 L#wwʘyXG@*X53f,RLwVƘS^@7谦$Γ ],: ]8M԰粽o!͓ӹٜ'B2& };VgyʒT IW/_|iS˫piPgq@nWk{bJP1fbq&͊x._> Lq8={c`S:Tqj3E - Pz970d-N$ydw͔䓓~+$"I0*'e]'KܕJ3K}]%_KQԬʆ(;,J;YLNyBie>B$ Yґ 9ʉf*I7_MI.x%qmZ))`說d"C{Vr]o!M*>њD9.D ntJŏ]H6qeʟʲ;^R*JIU-&m,+#aAatyij,$Q=$scZaZzBY ۆw4C Y6܊Q\Ŀ#t2g8r:ڽ /JR2q ș\Vz@˰AK'Lr>5 I+Jӄnr?v.͗ա=zi4-i4{J5" ׽mvPZk 6hNe<4Lh rc}D}76d^I $3ֻ\>09_oqԈk؋EBSw,Tc),2Ճ$9:%1%{v8D❷@6Mඑ3|GAdMRo%,Qyud ړe ;W];oz)}r@L듙,{=hVYx~ e,_\zi}ciՏ|qr}gTa0IȆa<Ȗ'&O6㰗t{Vb88'Ԏ@4V# gr+ŕz4;)|U^*Yx/[V1U?*ak#]]6%sTJ:f$GWc|gG%X0c3KԴkS`;@֟.+T[L,/N[Li A>BDClP8b'2c$89~d NNN-RkA)q-Ot1UAavY[l"PR+"vx">إ IdRj)Ry -^XY]^Ĵ>^_^|Sxð+q BoOF}Rr[d~UQadTnHZfӘѦک j_pߍK˗X,{i'ШmK9eYffIlBk VonEi4fʏ-=Dcxґң!e+1[\W2<7rl}?B:Ǫn L@t@089'sʈH^Tc"G$\t-[ؓ!k4M#gϽ N50G{G旆8 MXrnҔ Ho="t~_ dɽp;oTc> ׊1o.7r%rF?IyBfmb#R0(5K3SC0bYۋ|M&֨7IA:?I#NxWlJ[ZBTQ^n#tܖ A%C:L4XQ9E>X@X4ZT;L`Jy2Z`>xgJ x0+tF^޿S~a2{ ;w 22\|06 ¤A(O߇o$4S:Q-(֋alr ţZr87 >Q;|ɲJ 6}BːP : TPp<Qr靉;NkLd X5R\epkc768"gI q~S̱\"7CvA!O'٫$-jxz{r_S:*: /,m_W/>Z`٪M ?njg1[Ac3,Jgݒl͋QakY Gñ(goG2fr$q6D/sDYFza(mdfF"hj30AΙe#t8B)fe갛cygW@kܺņ_SIYeh/Nrwme BJq~<%DFq&"g~jOϣ3h_Bm^_K.'ww7wfݫl)Oi Qs|5RG#Pm|ؗ_($pnX]3QVg !4<&(L',OMKTNU[8;5]0*"3 ^:|f||L?㵱CqOv1MO;5&>ސmjS9( iՙS1]0I0yθ|c#ʰ?Lx-Os#@{~_n .*=Z>N?VN8Y?9PK>j[+Oantlr3/dfa.pycWKoٝaJh9%N)K4c([6`;;<==!e ck~ArHgC%ꪯj(fͯ T$AY2yg'>ZŲ6;jla;e9cuX^݂fo zK:Wb**8[ǿy8؆fP~(S{}<결7`!]Rg*Ï shI! |T ;-;mZ`'hp`g%Lu s谷,: #X B|\V%BVBJYQ\Tݮ^1V ;VL3SbjTc\UID5U/`{Y)w"J(Bϴ=o'2jFUBQ:D O$bgOߊh.yմ >&A?@|3Ξ16Gԁa:0lSu`ء 7Ð:x4^ނ˴_)c UˠG0Jۼurfx̒:խhu#D3\L` M-'Ќj%DW颞ݟ;?Ui מvO)v8y^bF=,)}?RX?_9݁V*`qw;=삐9Xy-:i#6r_ vDb oϹ?HXsy1JMu2JY膢8$=u i:Ufh5@l|n-~ywmoӿy?mXּ WpֿX%0W [2 nbBQ 5[ "w1 wpVEd!eLh@DfTG&'#' Z{^£S.mKx!ؒ@D$ eנ<҈D4LXjdҿ}St v7pq_ŸǼ%bTȼؕ!_Hryy?!_?QUR4#s&/Г>Qqw`d8AnҴ]ar+)jg˻)L'PgH\TE\&PQ*ieS2.Hc%ª1R<]R` $=4ߏ3YUb =l-,gM,b3`si&U}/=P3e('L?!Q%ΠUBJH衫k0H)j9u>C=^AxH_k(.YSIg<5TޤڀɝU*J@Asf.&u3Zs j S'*3ރw cukd M#tP♜ /ř$95x @%-=ڂlh1X;xvojq-/Xe&*23C]zg9DO@\OKR+ 1~j -JT0DK0igt,"jԞ^>U#ugiify׻q_pH_4á3 ;4Lx80EwCNJ +DiM4ʏ.^x#,VQ;wl_QEeVaEY9'2]g"#-PK>6d+antlr3/dottreegen.pycX_s)R*%m\hC+25FM ș4rDEH$LjJLOLf2}wWhwݻ-ߚ}׏,yJއ7B1v-gN8`&;1فɼ6J=e쒱8,/S[a^=n fqO#)@*Iy$*/b4PXDjS t$m;#a\?;x:20.2*OQ,eirX̓p<.{VV@~qik Y׸o`HN44_H YJ^ 6[d9X`H)(UWCW(du){󖉭ʼQc },2|ohlϾ*3m:BF ض|(rDZh¿+[bcvC_EntFf[B|NE  Y8Dx%Ǔ( $gs3q{oKVq%H5  :-j#}Z(dG{TʎI,'{ HAI8p&Zuѝ˽آ/ RSSýۻb('01IBE)l>iB4p=q#"+v ^kS7{(G{.QGwq-F菼%fQMD%!c(C!D1@j \w7lOLJ2Nh #&AD@s8ulz~Z#?n~N-yt3EFuI+72PxX5hBW2aXY Vp]C@fs!HNl/ gߥ8+^řÙZ-ui,2)Z xE;_yvҤ~hhXVaJq\{PY Bˎr<`M+Ų*5YGΏA`%`tāױ:udɖ׆N'Ev*Ǘ*ҿ@La7c;N't>[xzah,I1'bvn sA!e-RLCCS;)'xp}i.֎ZͼUN# ר9i[9Fi9/'xZ",ox4 8;["^| c-I1ey @5ziBCZ`C\":Mf3uv.Wi&|ͥt#z.}Ff(ʜ^Yh݄EE0N) sF9C+o:{FbXKem~++7X {>Hc|JQ6axu3ϗDJWCGn B%*ƈ.U~\r~kgcWp:R]LZED䞤2&P9&wdcm_7svvw{7H5J9pX](&2;Sp:'X?>[DzGXHWቋb^|1N!bj LqN&LK闊q!q3>[V&23~(gP|L'2(9}B_M-ӇKZyt"i7;'QM+2*S8o yG0'iRzf7[HJhөN#tsW?PK>b"a/antlr3/exceptions.pycZo]}%ye'q|(Y'n4q97X) bP)rvwl.ZR*CТ(P(P=zl{reIu Xj8y͛![g_~O>g10v  lvbn ֆVES[j,(3^ډĂ %feLکvFLV[aeUbJsY$ MۅvMm=f=ΰ଩}"el 57o^uJđ<'  0'Sspx[?`uwUuy"AzǙ X6H|v["!󾗈x ݄D0!Ս 2ݛ^"a %ls7D9q0P|jw|q ]lwSALSz*s_|mnG/ h>ԣ6Σv W$y;R6cz^\)ip; }+.L̋djHb:q? ɜ!숃$-ADrz2]!ݎH"C`[D (FM @!א7%Rcnȼ/}'4иelXq!8MA;lD*=)ʸ Tu+Bdʀ3Z+GeF(W* I!3:Rx+@\L zZoh E2I^~{n$aUjǓ`I>=I+D(r5Prt4O0. $E%u;@Sh& R@)ASWD%aB@xLsT9QJ(zhI exFICL(EJN?NO C sh Tgmd`{3x@9!@VoT8nۋ29{ZbgYAiai` ES(B&L`q$ۧNsoB ؤu7iv`p8+YAY6f,!s++LL:TK*-i>SVs€Ȁp@V-tui:FM…V +r^oujfyŸX8+pɒ4 \G\᠄ӴaE ZPnܔ:r=!Ra 0s5%@8>!}u"I]*a6V6N3Bi BTЁᲄdnF%%ۭ)2y4BS)TsjWJgڪXNݪik֚ NZvWpV [;bҨTUf@A<*&zCk$X,8(3-xL]'Wl􎡷>A5xV1 ,~ܧSTbj 3&i~ͬO޲PN%y6iDзYC5 ]SX%˞Lb]~l-=q܅qGxY=9C30`@ت7sX?# bx}X(.@J#pu /sCUEE&4rȡZZL^@EPu4NjԍZ`Ww<= ~""瀙sf9{ʚ]8SY䪵A{/W=vVUஃ z%#&3ȕFF9xv@ ^O z863v2mm)Yń`gq!m6h4{s9dd u{: u c*!a ke%V  P Gu:Q6=oQ˜q~S)@ܵ-iav| ;#;Ƚ,%?;ׅiVo"U(?g1"G(i'C@,Yc}D}'|z-b:;bfNDeK:Hw, ;׀o:@CoٓBrK#,1L1}Iь?x-g`sdDU ~셊'~ۍcW|>+H<)In!a#F6[ҫy^O~Q7mH0k^ 4L뽱g5rD6G~oI8[53NbԻke$FU޸_po}Wml+@U2V6VV ?uέW :#7]E.LGl77ƈ4cI%Rwf$[0|U`uӟg?Lj Iv戋8;8 wM@o EgPCJ Rrp @aVD')vᘽ1iXCpFantlr3/extras.pycj0G^&ҒsC=J@z(5h-UW+iL6=}J_؛K̇,3:1#$*L`$JG#،JHg^c 0/[s1, g7Q\X4+(@ku/9EU|oʲ^;B1$"ԮIiM@rdsGۜjgN9]K0vݻT5t0mvE(Tl3"X _QOkወO[2/&ƌC}<59=JFPK>N antlr3/main.pycYsG=Zɖd[ZBKr* e8N^@V;cy2KUKDq(#8p8E{v+$B׭y{=?%|'|m| --Lۂm˴mض!(C)O>._Ƚ~V2Ud"Ѯ?ټ^o%HAkuw?ӑEQ&mZ_0@+ajHjhe>SF܍v7 MjKFȎm6mbdM+;-iaR#YRx(M3H^"iE1WU| þ㇩\`׽_[vw׻'QAkCI{^^ J:Hbj#űH(eT* Ue-pejv:5ز\nB~Lnl~`/2H9.]X-]رѾHK|Q/F=Ƒ6cl+y8H}~JYy0i=҆AB{dP:'a kb e+mv0BYf.maĤ7nh1D8okGxH$RpNGцL~[xzFF0Jk~W Ч jcdv*M)*lm$$WNO<;{;Ip'&d8mv{R!8'KqWLvluPz)FΛ;Ĝ^0 {њ'U_Xn?S=+U}-G磁燉<5O)1^}g ^sEBfH@n `ލQYzf֕ggb#8zCMF%a9†2&yOj X^F])ĒX@5ES8}52W%:?yd=>yV8FOc;5*9qDEaGw;S>H>.]b¥w[˼w =j05>%.~`tb6BVNvʮ=15in3sFTAշ drQH.d\M tM=6#̒]ߒc늒ئ: Lj`A|5ØSELZB.-T,ekb "â%%.:W|߬|1Y A ޟSI]' 0i;I$9 &6De񠜧qS8Y:@K`ߖ($$ju͍BfR2]cqVRD ͔RGG|T$?0?ub_K4Gtrs7E7,IGd>((e1:c{Ih}5bߪ=IcA>]Y;\x*"1!yJ'toD?SR]ԭ PK>J.l<antlr3/recognizers.pyc}k\gzw{zԣi˱أHْ#G^''gOif-Sr)R!P Rd Y E @?* EQPP!}/Hsw}~3S~ZJ1PZVY+j[]UkU՞Tk}@PZZ&ԭ=֦U{Fͨ!vHê=fUZ;GQ՞Sks}LSjjPk'TZ;)%VխS}ZV}ruRj߯WZ{PCjP}\ Uk\=h>ZꌽzFaQϪ,n##u2g>ZT(_=n>^]8Ak'8E[V>YYARI:4N/ʓvq?Kn0򕫗^vsW.\k,_|e^*ӥ+ׯ+7|ƕ(]Z^ҥk˗ (q3&9}ʝfI~nd(onǭxߛ)ُ㕴'W/%R;w7P+Q{N]>Q(o5n.ZI]j֍~JQ?" GާCݒcwwn>.Y('ᥴI|7b\ Ry_u~c:U툪S*ǴyaĊWkZY5VJ+THnn!})L7$nǭnQ762tC0!I0)@&d^vC4ߦL@5dvL,V|'=VD^ڍekq'Dznw3LS~Μф/Kɇ ׯ%T#H3&A]~`tq;v6W$rB jjo.p=6 vF+z;_3⭭sϹ{hysJ+&jJ@\;DeQ'^_վI[Mszc׳9 TTKܵ/FYH%C'~8k 4N5"VY͊Q͆ 1͂ { )w }}'k~KxuOT|jB}|B<3|B<~>!'OSS8o=T+ꐇq:'OgdIᓳ(U1>9Z|9Z'AǸL]$ {DxsmbMD$դRw~,PH+:oDZLv|'/ibD='>moh G$&% {8h' N1!RO21b(`sB|R#$6=P3KҚ%=ˆ⾡D8y:s@3*K_ Tpא`:&EI̫4\gzdlaaGËD.ߎ2beĝ\$vxrG%o k114Њ(jySm̦\xаaJܐs$~xJmr& j٠v,WSV mEel-X7et(iaQz$a^NM*ضE`TzO?$3k+gI.hsht`s6x=؏-S pr<pxpƃx#0Q0Ζ&J&VܵC3 T̊-wJ*%O(c<8A[_$JIb9nxrҸRePErװ'f:}h+J4*9 oǝ^n_$ޤ vEHe,oיo4B$$;~bf0#D`?(X O?iUbC__Xdvݦ{FMŶv*q7xz?̺k (NdWSBJIQ3?ey sN=`HR2rHm'ÐDuۤ46r ~8lW?t-Hd-D/\b2ʅ1A]KBr5+}Ɖe`θDˢY\Eppt0Q+M9 }](m I&˰U14Zͨ:/BeE>k?--Ѹ*OMБ9r,h#)Y,ʅz6c,0f9+6,˚[cy@,98%V4W2W${%eirRE+Tw)zDLUUO@>_+ڼx Jj<~)Bd\/o W_v VǴLf`py?q{򝸉fwdvܠ^'qN a)OʦJK,Qe]ȕ lI}T>"0%d>}%hF3Sꑎϖ@;q@GD`kX1 HY !DʯwBN ;^B J >A0Qz۫llx9G–imB}i.HU[M!0JϻxY$\OuA զ/}\OC%B f: myNݮhM_ßga4<]iqP1נp7XаD"~9|^U(L&'HyxgEvH~ZsZMvkVpD8"9y0ƆWb3|SxP.S .{e@7yidI¥'/\PP< G-xKѬD.z(2WVLoo'R#Z}=SeBu88^K At؟S WGTtMbLq2,;'Oa)ПO@)O ~UPV' //L~Lbذ6\<<9c7퓇'=7NtWWg?GՐ^=7'tNgy@c  *'/#+'̶Yv؈}Zgq;'Qu'g/xSSM i6Z RL P[S"pƶaG̏Q[x Yd=O Q@5mߴe ވET+Rf,H5&c`QHg|9Lr$8cɔ* |)a mv8ф!q+LvnB$ m3*@YډIZ`8f4d[qVgsmHpn-_tg1~<f0?4Pw̅MFi*1õzv9:#h"aZN<}|#7äZYNN";1nD&'%Y5a X%`  jygK +z[q'Vbt}p}m<¼*H,]j~ bsn}x;nLF%,}G3ьp/O[F⽜=Z| ]"EG>Uøߍ S{g;H`][|G˩ ՖzRX <b{Ɯ>|{/,@ЏFVJDA6Q[*(vC"*5P˂'3nޕ@g60 V׌j mL h[l46TLNVFܤπ+56qXX%N`6*ϖʄ_ى3%{Nw9V'2a`~ ڛQ1 d/ӈw .Z63V )s_OXX#*,Azb4{~^KTNV>Klh[bՉ2SyM)28>q6æQo$7Ri6|H4&턯F#F%(?wYZǔѾGy-~͍.ER2LNv;$CIQfhΔַ%ˉ:&Oس>sJ%ǥQ}ZRV۩ѻ!ͯgqVei=9dICwD@"P!%!FuJh#&JbסKm!)bHE~r1E1>=qSG$*}p)& ˷M7M:^+*CxOs~{eo3mM(3bnT7fjHkai@whmky.*:*1A8̡!^OFŠс7fpyP0\XEdZ% x G݀60c=n ѼXo ,u*6)kZԌڦv܆.Qτ2nkVU^z\nIU?uӗFlA9dH(ӥp^f^[=o9h'b(߈9IM PҋLs6Ξ ]'pT_ԧxY7 F Mxڮ쒸ĺ@Hj|kaf޵1uٯgE4w$by>|f']A Ч_.-<٘X˟ < >V+G+Kf&a6J!y^FU1`0,л4Ol(-2ҝFNGaY Esv,L!lm멢E&LPNt%338 MmX X4΀;޸nx6ݪ`5jwi}A|_]FP\iCQEb|lv#FIVz'j"$5}76|U>tfEgK{{QHL'։+ i컝Nb3[t3d(ռ?hp<B8:$D7~]~h CugA?EL@z:;sJw9,v=jvi\38_onv^HZ/|瞪?Ϻ_Σ9*Q~17J +0Ág0 l2VN߰,i&7cggLi;ڠ;zE(iklx6SJ/ݎezX̽︖ Y!~;MZ2UHKұMP>$!-;̎Snk oh>Bqi^ѻ bj'ы&|x}?W`됟t|H ɻh-X-,E2FtG> ߇{s?rDQ#zm@݅NB֓Aŋ!fG6 װZDXF/RSx":s~µQĒ-K_o;oY!KSDBq@bV>l{ Buő\֊O..d @\8C ӌ$Vת%Xq@4Ǘy:7K'v騨4"ݐ2[ τ]_fH.s-! ޞ1d؞^pcs]@ UV.{{ܭ>qt]gT ;1b$"aw@ƻ t޸B縵l -DKi,vL`!y]Secsc+*OJf~Ғ%ԯc'J6<`4S Wj%ԝĚgâ%_6Qr 1ø_!+V|/^ !vꊉ{U]JVMTu;U,VlgRu 0ZGm7 QYiƥ.yBha#rϼ S؁e~TIwApL$ ,JլK1$煘iXdġQPRzŸ,inGE/^GpWPT6b@ (ps}CLX1?jY 6ۊvh*Y71&Oku uv`VoǦ #Nk:6f!uv=Ae?o' ~VenTRm2g0OW*:UZcym\Өy;k{-hEH]!]$ƶ޴0E-ΞI8<2)q< :WGc8dr)psr#eĭOK@$xP<.M7*ϔ r*8Je|X2oY.)/&$D fT@&M`hXrIgqNE>ifxR̔N68|3I ˉֲ}WB f{A %"]am4uf dY]4M";/:U(oN阊9kv /*,]FDCA"\8Ve 3szQv*]v<&8=8R=l8ma6ޚ;0{ WzI3̐w= Xt^ETIY$DMqW7 fh%oUGcm. N7uuzÈ%]k8 ʠ!x1"波k"*96O&:Dɋ.圇ql-X⒦O_~>r[(ɐZ߁ %gu@ђ RsL_Qg$/)&!lb6oIuPX u)Eb3PZ~QmpʾٖdxtyZzB/ `v$Lֿɷ8'~I"U/lUdJmeuSA:ss&p:XKfmyh9:;o*ѷ4Fs+]_o~+_\ T:ɤtmP9e?oOgfץ_BRBO Z jCZzaGjf )>2>h zG͋Ưi/ކ/4!W(Q8sh*/K"dICI1D `˰A͂=r}@R8$/rXD76ҤCQ=kȄ+_^aV Gbzt5H| L}K׮\'!6ÕlI`6B1c!mLd; , wn$t>Gf[ʟ5t/ BG̖~ڲkL|KgP?jV4'0չL f2#ґXŚ0M.iIP)c8U1ĤS,MKt4NMTm) Dq=Ω%#ތ1m&P4Ұ_ cÌwL;L_,dI"RҜ3Z8"@NȘK9ނAN.40رw] jf a \|4t$>/EbecC@v;KL?.ewQw>XP -o q=I5wtACW[Y@,*]LLClv =FS|Իx 8kغ .v]qa^ ڰg3yyW_5>&iBhNW%5`}>:4V\SvN֛WZv*ʞ[ ́QB"*'ӏ6˸书^;)#v6x 2{}^PBNY5ep8Y0Ձt@$! ݢղt!L+(Lֽnm{ "?>=p$o|V{6*Q洬*)͖e7$eZG E (|_M oZ:]`Ezk0EaHAѰ;6 i=3(1lPP@ ŊQUZppOҕ;zv8u;ٝL+f&S抝f;A~AY Qjʏ(4QДxKG$/g\֤ vvOƻeU;S* {م7?᠒O4,Nk(=>m&ϵ#ĚzsLH{ZѓvEꖦeY&Mf9s J+N;7[A5`Е XÚja6Dz`V4?P2/Q;Ti.8DOGJ;E:eߔLDO,z Cтw3"F\d)88,8 #$z.)fL%~qꬋ)s?Oaw"kT #y9)B0#/+Lq&LFi0ީ(}4~nhiHsE;]c:3+G+Z~gK#JFԘ*npoHOj3 PJi~~Ȯңvԋ3ےLaoZ\~1$^3&k#0KY3ƍ?;K\#!F~W%i4J3fk;PXWs/a遬{REeK|3?G'a%5#6`p!Kf0W~?AvbT Atݤl0ǚcg>e`iL%&xRU9iG MOSkbۑ{l[0NEv_-7)\45^-V Ja^ճ'%3m(kNg-pS]~0?k8PQloQ6jWwTɽTx/7ӠM h5t&|kcsohv]T࡯M1}+Í`X"Mݚ% 7#?i1 s.gSv MX7b MY$OgFcoߨ9kġ?. >^VHϝ3c Ϥaj6&džݭn(nvX/mgO'U1Tb?Ē؋rWƲ.\G.U,\\2:e<^:#=.ڄl}\Vݧ@][3GƓ쓓ʞOGe<`wbޣFd^OW>ՃM[ *wɘU֞īW2h/g$8|߭O`?k T̟z}a֒ f9_L+x_Rމ%6d HǸhyjiR{eo?7K+B t~8e,iο9V} 3a-I3ЎdƖ?'o39 &ނܷ=vsv z7uI){h u"Q,yc$ypnhbkg&!ְ }OͧcvzJ$I<ߗ"6"j]E\v~fſ0aP-wF0_tſ2+ 6:klf+,l1s8@q[JDYbˀ+_R:}仂Jو~D" k'*}8NjԦkp~~vY+Z;T;pL'`: ѩYf0a25>Lãwa<:tẃ7rsP3t_/ k5clxcB` 疏>>_7˽:a[aPO%XrwD-aYԫ<Y&Z :?-{.OwC8AtwnqR{$F9g3L۬h4y?C&Y1UZlƨjÄL2(@(WoX $+v>a3QQ0O$5pfE ¡"֏'քsK38i6W ߽P}Xxêoۺ'4ƼATžw.zmT=;5*zW%JX+U;mee p,2b:|O gNZ%Ư  bWkRq[kHm~7n?G~Xǂq msL<TtntY;ٛg'u=D`f"5%mVbd!X2TBlZBy$v>.6#E4A65uQxK:G $W0ez70 gyd\R.Agobyڷ3MX6t5)̷tz #n]&oQoߙq'f9эκԹlE]D, dlP _zƘz5W=9(5zg-ǜ# <]Y>ϔ$uvy& g(Zifuumj,{ء0v% =(0z 8< jY4' MFv>Nӥ`Ԙ8_>rnx頋'?qPK>`86antlr3/streams.pyc}{Gz_uξ|-$v{H"DJR}2;ӻtwLdz#~&\    <$vذb vHǒ+*Jۜ骮ޯ?O>oוoEOR7ꩆ6=uρUW7|戺1%u$wKjƨjctZF7Ի7UcL|bNƸ;aNƤ';*3j̚ػ3uV7Qj7GXwƼݣcq=nΫ ݣՅt9rr텰mVf'ߢr5$KW|ܙ˅o򵛯y@.^yC(Z z }Sqe1FaD1]XV@.a~#aLJpe0 2h zaBXeH~>I(äǝki'xYq'5qV,j7c\vnoĭ0j6Ka=av0|#Q7Pﶩv7nn}#2TQ*7?uLhW ?9|LK!"n\tҎ|9°o-|ɪtY&Ff,|7?Q+ov^8Smzʺ[XU^6H?˛qyQֻ l)O?|6Lh#pC;@f#މ!lq/,YD[$f xV7í(očE Y4E՘H$Г8Hxe7ph%i0!Gu"j̽帗 W-H4ZL 䜧)jwm-<܈)UkjqlG daq $qRz[`ٌq^fvh(x/,i7\'rY%q/xӼjFPJi4zd4,<ѭD3K[zڤR6@Ŭ_ f]jKP3Hܝi\d/2 t'glzih$+3mf!.):`.,D-Tbkc&RSא2#=ޅ[bp 3:MzPߤϠ\JBlz`RyQH^ wCq$F Aѧt9=g[)KL?Hgu߸4,|Z^X:e"Z*8Px 3r(]YC4J+l:jk(Br^"ӠVR׬uv7H'T$|$do"CǪѪ"h+SXge1z$FZ[1/1+2Vwk5ՍMw`K n.*H#GB90$jXVLcn%ٖIezPBiLo$l nhB zbҐQ80`Z+}b4 v֒z Nx+MKyG 'nV] j1a/r(Caylrxe:i(1@Qb, (%8\A(P0 C1 3hc˯r\(FUA:8Ԧ=F8E 1bwpཎ`z0NuS`<.4FwQb4ƗI$ TNmAI GljЀ9 #/3y &+?j#eLYN;ML`X#*_\Y|Y눨}ٯEiWRN@4M,Ly1!w!ݘ,;>oFR\[qpȹtkiM"4ZFbDEay;bmMy.=@^x~y5E pbp(kT4ʁǸE>cܧtI0W0(?n#Q};v.#\w)܎պSS&?Uhx3dƿkg\ug\X:Tz|5^OZ->x+T͏ v>K-?׽~@]~1f /ACЦ8 }\6\>˓P,䟠-l J2NA2VT2'͂$|d 0h]#H%`G= el<i?9Nr|*7-dzr97y#kZ>3$ cɷS ^gddpWΑӫ4TKjRz[Yd^#mvh4gt$0Hi̓Q=[*_qgpt3'‰4fD*yUi7a]D81Q4Dx3(O!:r0IhIM$m/, T#^t_@M귓| #Du % q"!PbN{ L 4Lr*O$\;qH^SG٨}2 2Wj$"-/sT̝qt1>L͝|骳πM0̾h"v6Vo'܈mo"$BƚP:,hSr1R9'AҌ*VZzi~9 _PԱk=|+t6@iphڮ=ghy79\/ gЋZV:br;μoLx,.VcS2R)U88e=(/z3ޔ?Eӂi.wYqwiA=0/ҷǎ_:B0c 2CƒPJ_-@1)o޴ ~Ƞ%=A125 wfL/(eΑ8 +K %!5^dMӇ8XԎApCι]c[ QPD[O͖f2׉Dα4K4эTQm =Vo7ΥQ =To29łHq4f,p/ϽieqN܎T_9YuίcUy@>{GAT4*# +9TFҌ T>*=Q:DY!LۜJ.Z6l_k'N%dN/e3h-x qȂgXNW/^b MȚwY, Y+F0e߿D'5>/ߖw5øQ.K;7L zcnM$:~]ܪԑ>J۲Πx]]qxL[#F̮*@ysy1WtuVWNP2[hY8)fLv ̌_CsG|cW'MZ`IXUf!E+5oe!ɜd63}'gT~/r_Xi?& 'H/R>)/h6͂ MI@27ēu1Y<1nUTbߑw@;SeqXR*)ϑW{q!0Ejr/ce/}s3M>s3gu>S'6e37&:G19ύPhEYE x.ɇ*IJ׸0fUaʲ]]ߕi+S-5}ŤTG|A R%r^AmܿƵ/]VY~+rU1dX!< ̯:sn^BҜHfXzsE/POxe)o?헂Yo/p޴$pu.Z9}, sIo*Cy鵐5.T]һBsvdْc[x=: wbGw^U7Wؓ"2S#] -uyn 9ۛ2 57y*e'mB@UK;GPǽ"n%5xo9X䥎dBt$,!nIt+|P}ʬ7 -ٓJiR϶ 8KikYCԩ]SK߲b^ }|-Ě'9bҐ9|Ȕ =4T|/~u{\,SkДOtbe d 2óE8oվR tvG΍}6[-79*T=UֿX-rvl;nv58g%*S4^lUUV+_r,`-vY VOi-liVz&F]j5+}D9ށ[8 1p<[a;RP8 HgFKz 5Gn| U{ Ekd i9%lN(5"5r#*Z#bGL2WD1HJl[5ʳ[<56*j5ڹa)& ég2G}Ҵ/3~܌@!>ýХ#v]pLPCdzh96@8!3!!.6pr]iN K4`XYʩ+;3?kJbBY"7z'^_V$+\=ۉ _!`[ƁQr t()o܃1=E[Yra5-L[\v O۩r.; l%\VqDkr 6q)\ΰ|̂\-LZT|2RTJ,7Zw׺ajf7_hw}Z4qXˣ6E!IKz-0P4H%FjUxN6,.>9r{a'S=R~SPƳgޱ܅Tl 53(M9`B16Qs;p ȸId=[Ӹ!_Dȩl*ǭT5mQ`ݍ*bQd ,cVf\B| ,:F}dzVw}S-mB7xa܅Ev&[5 =XC P~:M՞~XNe#jf@_RJ$2}9 z}j-iX`SȼLe#Fh=2b2xƂ"7#κfgZaBspzT$ȰL@~KbAᓘ5A {T@_~߭?igihm lkI#'}Bį( rGnkq$/znmߺ uY}q??ۨP.@7vo"bwNtIWpC(̕*l`qo`H2K#D{g@q[R^ԛ1yҗ'x}챥+J>]3ǥM*-9˩ACP8ˍa6^ 2p^6an;IGoeMsg{+.^v“KrD;afo&{ Hr{R9|M?GkW޺rʅ_[Kg Nme!+#lTy޴7UiLPQT'h/ {%=&PCCI<W 6|߁:Ţe,_Q0N!P0,5Z,5mN羻zU宒殜S\uYe)WE*Yk0J&dG$Xx\'Ķ 2L[r#:$zAo0SQeer)6j)+/AѯEwKؔ>NT#_Ď?RZ247H+rv"2mt_&;#n8G`xs yHvO?Ӓj} n֞.q륇ssMF}%ob{3&N*gxydcT5"20~XA NW5G5 dBӶP^IL\:'KkyBQW'2/"l J!Rp.= X3>N(>p~yb5sX_(( 釕=L[(O|+J9owv4MdTHw1u1>WJas4uuaoxu{b?[\%-s"Ef^"OKb-gH!ze)";՛g Ku]iq9Mqi'kȦ B*Ŏl}GD;Rf W"(uғG&@ښ)ʑa礳GŔ1},fF+!{<{cj"೟c01qӊڝEܳ+==pS1|zW#8| @~Ŝ#"1uz5Ou#eybΣ 8VۗTxn<.alA.WȍaEo꫟2%Gg'nt=rږ3~>Hߗ<H6ݬE)e?b#EҺ^i*%61!6xjM) GJ0@=E,&i^F(vNsL<}DT1>nݯ9[{}]. = 흟,b.|/#!sKlg2rz_unߐպ?{kl{Qgц~|+؁X{3DŽǝϔaqLɊ34I3rAC]#ߗXM |s 2BDqENLO.4)^㌊q.$^'^߹9ICdqA !ﮦICnJ5!b PZPCPC7h+0% 0oa֍T %ʌсRsv!'T9\yZg0G߼8]&.]oU{4cJfڊp3Rx)-Q&˷yK o.Ϣ.ZmHcؼ4sV6:I6TY;Ɉ@͏&pspKE# M~nh!lE; [IkZŧvZ3p$γgw5/!e EKh6^)!ܫK|n}o+f|w8$Ca ҟimS;Ⴆ%z=gg>3H3ܩx0vT~v̻.8ȭ̍ջdtigk;E9e1] 5rh|5Сz_bZݭQ 6Ҽ%/ywPCΗӜk>(w8#^0 dW!HN3X(%b{腭Jv'OTnvBEϹ h▦iaC&ĖAQ6>Gns1-gjx1).m|41BiF MNL2WVv0XT^>OIegPo}c)F+KՔ@3nHImm3lIeVQ\c1mwP=Ԓx̎9ic7<]QX?c³K#Zcs¶!&@2>d.ueuaSiWw%.ޫgGOgg,[V&ŋvOgF|඀OUȼS4ʕ:ǭ/Fsdv8oQ+kW?%p4G52-ġ.3$)F;M*gZ9ֿA]6$xc[O0Πbx+ hK ji23[8&%}4sQQ˛9Qhi7?yG-GKy/{?m.QځoдrW84w#ߛ4VjMeepҪ| ɯr\+Q ¿ ˓5ˇ#ܵ{}bo!/( ҳ!{r;t侠)9_u{C{/ǴX˳jcSzIdnWu اq"p9Ũ ʤ"u0| 5)` yC.?{i^V z*_2i @U^KWddO뀸'iuDrhU,̓px?N!Iz!^K* >EO{3Do8۾U\Wp~\P0‰_Gq1e2ȸIXX)deB `*5N}3SrerXJyfrT~2>9pU5'|n }9X-4:e^wgH7WA%\圍#̑%I|P@ǺV6W[Iyo~z/SOB濥c_PK>ߵ 3antlr3/tokens.pyc[Ml$Gƻ,I%pf,DQYq`F=QQl{YCV9pᆸ qčp(gv=3N"r߫W_z׳[Vk.? {PxCi1/v,zygA(M3vK+2Qlyes0LZfk̫v3Rk}P]{l Cibu9 _]ƍWZ [m}}ꫝ:ko"pz-a WB!!7tJARW]JYw:JUw;U)Vw*uMQ4Tfi0n%Gh#&_Hd츒KpN4]K?T a {n\rǀICcc h#Q`\Hq'{=190Иc`08V^80ɮ(x$" i7~y0(wJ'r"^L!zf 찥ףė~7N6Pѵ|ͷk?8kDŽf(0ļ`xHn;a( dDN.~ji`Cq!ڟ<uD96I Gr܈CO܁RC-Ў6㨫 ~<ڸ%\yce 7n&ෝ(b>y3w֖I>LN*@*@]b[J2r~A <d7P;:3k0(J-\8`V^^|۹}CK }..4,w8q.$u&dݠ r,l61IGU4&~[:Z??0':1 UtB+:Z;n<,ӡ]Bf =؆rlnX` `<fbf٘<>]JjZOU^OEQY{AOlj4r_&(gDe2QfrjT4QIFˆK,Ҏ*F;#/}ї挾tyTyy3Tg,U1 UΣq0M^q! rS3=ho^qF0@]@߿~/R3AJn*枵̥[u C ty[ı$&Z nHQiT0}0KAGE059IH5hIjLL><8d4Y:YNz<~|P`D5u_s#@jz9?1`d  7 @ $EK-Hyo4X =^|$)72W9&Α>#N=8qÏIvDž f%ML[j#2dn\o0ݮA5;2 Y@09@y Y@. R[JxXoTziaJcw-EU`9T-IZ*,9:}}R H)2)#͜"Ug24s kEB DDůi STN o7ܡVb$E-% x睐(>0Z򂷸཰ུ/^]H-$]j_Qff@δ^JޖiPKh ݇)9x@< f%(V:R.oǴD"3) X/F2-I3 i6c_̏Q' _0J'ju:_r.Wj|7+XkO̒S8\ѲN:U#NAg:u#6δtf3k3FЙ3Y&΢S<6ylⱷ?}g8@Pa4>}DhChkٴ!$К2̡ C#՚ejE58ầޘa(+%A7K/qmMv8rE )"ĘXbBzKu7T|0 ؟ ώ"Q2# ۫AL?1#q^1MCIgw_'F]";ϥ3%E,qWnDo!e8>@Z=XbWRŞ/!&~muu8v/Fq =JpJbe?ߜ.PK>r tt\FIantlr3/tree.pyc |\Wzv 0A>ZIZ%Q"%і 퀒ڕ ùwbCƿڎ[i&vc׎ϏڎƎk;_?'v_:`@M- W3w=|;ߟyK JՕzf sQmT)ƐzgH5;êQRTCjsD5F;QVSqθ|/ ՘TL89()՘VL!!՘Q̨ƬzgV5wSQͩ5j]VQR爪w;iS*:wڻ;mޯTtߝwPYwۻG*PՏ+Z-R*zgTU4wwVU0=aOC݇GTU}#)U{ޝWy{wA5wwUEs/wwX+O߾.jmfhp)kG8Z -_zifVꖋ!ڄ[Iӈ.mĩ֒fViXm4´j%,5iZϭ\B4kwjY4uUmQ;]Ye#ԛod|"}K!νzKم˗>ƅy4jUZތ?tBDY\%h>@[콋lPݚG8]^Ld3jf]>Mvte8ݪf~&ԣ jQ Si\~/s]s O:4,WWѹF~P*v;΢zܬ6lnIP KеbCBdTe]BG ,K {0h'ͰiӬy2bGƠ4Z Oa }ooD:GTd-̶޸Q#ڢIЉn Tүxm:ifOχ/-<~^[P싘X[@]WznTp\.Z+A˂4`M.ۄtg/3ꑞZ;f( ӨVAye.ctYč8͖qs~G>xDZyZ;i3!^B- _FBÏq󧙾Q({-JS"E#D@ҬnE1"dzR|gR @]R* Rv vF?w"3>[6rb12܎ %;lvPt7@9Ж' 3vC7j YlU@*G .'H0u3Z Jr]je;BoAtLE.:U, i͈j' $bY跔_&D?0JO%w`0E@8Ev]Ԛ8E|C%0FCY7c#&89|МH>LiEވi$ f5F#qm06ps58 I_ȯUZj- WpsM:S}ļQz§ HͰoFυUlFm3] wvj!>W騢wl'͐: ی,R~z2j3}5hK`2Yށ>1 MhEH3{YW-IɒR@ߘPׯ ӛmE·}r4 }F9j@7bZs@ EMB;+!L]5 ?ĠgP}k>1"sWfimw2=>f^YRV V{:̡>ɉ9I}I$ZNVm\{3FeNkLBT2sSi:-0fJHq̙~i' ,dиَͭAoljOFax"ְi4O4s64x3NO۽w#R34߰) o҃ L_xgh>[`V5O2%n0Y j>TȍN"~>o :-bam!"-u;j5Ǟpݱ`J.$poIVw5Xo\їFg=8v fYRuE#C?͝=)#%n]*gH;F{ Yb QVRV8hw2^*UHNhsNTC}X۪BziwPh nE&;#6X{w.Ǎ/xe_PH|(tfĒ 79ab< !϶#`;튘$MY#/z{R4]CE"mHj{nos Ű>ӹ#\{%C^ﴠWw)'8L،zTw75݋c,#B')TA["_'h૕^"U7#v%UzNrOއԪ74b ? 7KV29TϨ7M1M(tޱP.hsq-|],1Kz2-̇[Po:Xc|)|z{30ۇ08LOOӔlEsqəpw+Io0(U#yׯcUDZQػx.ǥXl mߐey.; *ȭK߰ >}Urh8}[ܔy!VjY ;Ay\jmc MgHO;}e[jy|%G1FCC4j"ZP oMCQ000,'.Ѹ΄ْE_Y8s;>ja0KP[7sА㈍v8Za' 8h@!E>sŻ< 9NEq.>aJ;ԽaKİDßՐ7 AOudt#b&IO;n=cXm\o͹2oydz_&]&uIEPVmRK{REd YmhC\4Ɓ SN-gFLf6 E663$ SEt«q%Yifo hizԒpvK;3O;[5~OBAi8!S pNqH=vBT5?zdWtW`ӱVXx+`Z_f@^ sg};;02QհFKζÝ8jm|0ΙZD4#7Ay*Oyo;qb~i/z={8gʶ17v?޺ }޻qnf6IQT|k8Bӱ5Wɐ-H]bFmZT27+$Ûu91#j};p.l4mG!-,T/kcFC}Dkkq-/9P"~ҳw7}bE{]kqJp_!hZey 2kAgNmZs,W:1qGJ=LKtEF.;dՆ(50XǙw^@|Kto(rΘI?X!%{z#HphOu/,B~S?A.,v35MrZZ OfCb-FtV onJ;M; o-Z!kkIޔRM7^5 ߡڃKc.lW:BwtN6Ot`T$l jR[ͱVζ ='| Ҏn$gwԁ@U]0V}Xk{#/vOju5j }3<;'>YdD;y@0)dAOWKﶋ=?f_~gxY3䤕җ~TR\~Nu_Y /0hf=}rߩ8& 6S\"Ȼ$H"ԃKgܽWK )=@ =>@/p>igqX,F-F/k4Ð4C;m86J.2āѪJi1X|;:0a󈋫ӿ&V_6'|XT9V/ÖdlFT3n4bBf͆ ҵ X甴I%_UeN csm9CfX 8pb6.j"x 瞇[[/&CPsy9VwZXDH9l_NV9prj\qqdyYLY^5{ilV]cg!Yy-W/{Ubaceya}8v:1wt̿Ӄ׆<㟗 E՘8oZt89$+r(=-wj}V4E4I8Bo:)ᗅEbP/F 4az^ 6pdh*؎Io^Z_Xg# Lnp 4[PyN'yq0 8yp2mxY#<d3H:ZYmANg/CjxdxwIc#p]`/dcƎ6glsa\և!p3bP:= Cahu>yElD?B.*xlsS^.'@cy_΋^.M,Cp3kw4ϫx\S#FhP8TYY8lx] z_FNdOJޯ,zA 4Rmj˹%&ce<={~Gtoߦ,1:=S1O/?Q_,;5 n#xŗabإ$gv[+kl?Oz a7x1^g"a]S1hS ]U.)2U3A}:|$eܤin}Hw8<\)󟽥t e r7QUyZg2(vki$*جEhv*ϙN6z)n^M~+&i="-]j{=CNGV-L1=jȡLp<=&OS I\/JXTpfT^Q&#^ep=g,!@%ڊq2C,tq8 . LrHu#wG9RПPysۀC+>FP 0 #1aD&lˆLѸ #0aD&w!z(0@СM`iLt͘`YLt͙`#& &:f`<ν "ayO]&#Fx)rǝU$Ҋj-Նq WIvz`r&zflk(K 7:ƫ V`~=Iv;p 1_TIi,G0tN?GΟ?lq'qdW31/cH ֕IXZC;|Ӈi,nɑPf)%+O[H]&64;[+r8#019.(~%6a5LkWOÅL0URt/ M%3BQH LR]Yd6 5!fX[3c=z'&^= 1v^̋]͒EޒyufhD[u~2oQ|/ns܋V&Ds8iYR6cA'.9 ¤YaM>*q7 kKEj`tSQ B劝pn)6!Mr&1 ua8K8?]`O7Zs]wB@%:~`(WKSV1_0:A(7F+z΁ρa9>PU7zY:#e /%u/ 4b.ݽ@=/#` ypO/}\ R7VqiGsHa܎'cvCX 3aJ|q4Wנtq̔ieG:XѪ|悹T@q2, Xq0FDpi8#}cc/Ui4 ~hy6^˸Y̵FzdoA4VTWfs\]bCD.Y[#*mh#ɀɗߠ ~X\ݵQ {^o7vI2$$~`R M\yO c\Nj3G)\ "pyH[!ŮAў3u9ryuB@ڌ7fl6> +8+8@=zhO+{5%r0s zբ.,cr8oS{SB R=6( }P=Fj%s-?ľ5<p:8BBtlcdylhv}[iA@_I& qt\թ*ianc9.`-\[l,(;&gٺC\ݕ1٥D6Mwp\*MLG'dTCl,cC#IBa|ifӜeKxp;K,ÃJ9kQ)^Y۷=(XO; *oݞnj)9qo 5V!7"țCƴ)䲨1PqyP7,MjrqJd:Z@ZRe$QfSK,qSK~J I%XuX0BX`8oͱ2 +[>uJ_vfלuS'-Zz5+r/]T]cD\%\^MzMv,o.Tv~/#F7hh.jz3nRJ3W1æL+0kO*h% ek7a]EX_I="Nh_V5aOI\ɡ#-]ӻ/5:oY8!{務|3I+cmZBu= rͣˢs#X=eUVm.'4/z~" bi[)/<-h{zL߲:+LT)8D̞;#A|fo\}{I뾸y([(X i|EB2{Ϙ|$3UEw[u-1j釆 &_FXPi20XzNˢ0 y`;k&fv5$ΥG*/LzHyˣlõA$yD9J8@ 6 f> "X)K]eJG24^s ńz`*)'{avx2m F 1T\UP1vIY6S˰Q"fX1DX3 5t^9V]xOc_Eykǃ`0$_74;u+898<% g&΀3n.jR7E(~{CōOĄyMFК)@YΥ eE#fd^.,N;ʓ*9>V4d P.OeV *D>IY_ yEr ^{&8 z b ] {wBOCqxzs@mkHJbeᔬ' j98YL:tB`dS4z}#7;G Ba)8lV# =4hހЊcNw7U䮡uzB_uw;@wz)Mz ܨ *B[5V6FL>dC:\:Lyht een*tIWAݩl-/էz.m~gl,W7;x~a(mS$ߗ)y<DnibBe;j;Fd&y?f'wռF'|r,"K"K繝_l'c$0G?lCA!ڄR$\6FW 0QYdH|B9`?b-P9(/ `*<LpjՔs1#¯z|.Ô] j 0Ɛsjxo4-tXoj,'SFVvZȖ %)7PKNjO w9`0cڟkڅܴi%3hӃ?,wϿ{v>h4Zx|*0; x}SnRkR(K:A ~nS@a[gjWnu sp!=7| b58~siuPD>o(H] ͹Yxǿ`LbF6|Gȕx ܑEvVaʘ@\8HK*9?C~/{ Pn44ǔTgVFt=fZ_U:a=j^8dYvZu!U]~R"fikݟ v$X,~S~HQEJfWmv?Ϛ{'^iR-+xXȁqU˱Q.dƕ~R먈e=܍N[y?']y[9f}@R^b^ Q?gaɺPu^]'H^ē"~d𓣅 DfY֥`Ns>!܈َH5h:\ˠQmb^oLgذ0+K?lֿ1$g(!/YeR}0ʥBV&W1|.\|T@gV%btwJ)@\^m>7ٗOvJ?<%B rn"yH0?fd>ƦE7`TH3LSBˊ a:wd +=s5ǠvvBrcX,9vNCjWu5@UnގŨxESyqD!`2'' +K?Ž&%V! Ax<1gxs4~x4jq݈)OGBRc'9jR_g sL ]O `W{ "лν#DAt?J͋^m>ƮA)9>Od}DuPeV%:>ċ>L3gtH 4;Yu“ DLդIgAwFa=N\l`|Gk2N҇H|nrKө~i3 3GIm/pg39SLmOi_\3'Sk==\H wq+"_4M=Z%v9=JHvb;aWڶ%%0Jo71{^3{˾U\c掴q|1guh(%fal0벑]J8;dOĜ7x}v bg貨pqlǘp=qz]wZPz{i5P+;mIx%6N0Rzľ"vm r)r#*gv;+脫lܬ6MXëb/\Xvֈ3\goo]v2Ihrp| U_!ށ_ffOoz,[Z%{mGT Ugv*h'?IC,(k)egy|~qjF9cwB'U",ġpIyo_lBD\wEqQ:CaNԭfժm1xޥ5KhK2Whr/@(Jy@? m*ǔs#yMWľ[4)\.dN(7^Z=(!X{־E"$';|B"s￵$g)!{jV;@dEo%kal@ۡ;1+13,c,1:?F<ha0q/i ծJd*)t}gaՉ@.yˏ@Ο0CYkb|>n<8G,XG1'nyrBG;xG1}x z{ё6=._5e<5PȡΡz}HW=cw#v툗WBg*䷺(i7`n ,llIv+>J-Vo1YdK-+P'biT(,.եFʓ@Xʅ(4]V}XlԨF 6f`cg. KWϥ]*jV4RLHZpfBq+߮Ll^"IP\*is~PA3vZ&gx6x}$Á$Aݮ:j'ף,{?8Z讘iKLuN𱐤  쓜 u)3O̬b^=&SP+IyTg=@t5-]+)L-ԎtZ } _[zZ܌Zv=\i :(jܧadtA#̋!yVS4׌i6Wwfdgwl1Qz-ظF勺Ԭ3 |i 5~_OUFZeWI5jStvuk-O/٢vW/]}`!hZ$lBZH#<ϹE0)խ.l]b%!z;~J_WE_m}A Ʀs)h`!wՏ0' Ig!`Ofu J! ' IQ؞ؒȴm䤓<4m[AYzwd6iE/+f4z^,6y0n[k[4I&$LmhRӌt.WUI2cRH,fGp]MmНV4 Nk2戶㩸e,.,7 #F=y>Z"MYQFgnC$K˶OpT=`k1 V*B=MCq&(fmWfpb ! wrgF,͟ĺ:hڣJY@T4쯱=D&02b/"4whG4xKW.f[""v$96 RǜŇ}VWO>vݠNvlL/3ďzm$[̝+A@yKfgr)^:sb~тx"=>lF8! |7fg/ ߅_'X'1|q-Ajm 10V J3x&!3p@ylh\P{VTd>=n/3ӄiЕ^!3You㜮M/ B W=zI)$ W9nW] q] 1ypم('vFc!d P9E(⍓>5\k'[vտ7/iRݼ6]s=17?r52&l{tP20L<°ʦ!n|1mf~D(e[vAc$1FsQJ~Yp h &N1q.KP3\w(YR`~dʱN5Q&-8Nu\H\2995s-hDkDG6<^*FpA\~ xe@ǡT \E_pEa.&Cp'9\cJ wݮ| `ox "q1?^@$Axs]S/4;Za/@,`n(]L (iTmC*&zCTmX7.ENZڡ i7(Wbi&$E(X43'>EC*|'1^, #lPp8.NRЫݗj&13V8BWt @ABYߕ%C1#Ǫ Tsj3aNzY:P)+.WxN7L֣ !ceW$h@^%rE|!|cXOHӗ#\SqtƝhS {IOwDz_w7Y3QM{jFek9{N )W97OP#&h5c" ݬMc9b~4\N=CLs) wl[7M=!> 6G}I7Vdxy|Ք3L'~8xP|!'^Y&}@t%y>tO {Q\V8p{ӮE>GzW$.&k~>9+_ dm)cydsHtsE!6VElH9ё9Z&.6p3d>mqz~;k{xm@Xšn^bûݗ[)v38p0c^ M>G9#? Jc>7}jw6˯ >qI" fx\r=rH`lEvL1SLJZy^]Dy0LJ߸ܚǺi} c;ɛ#8Gpt?,F *:"V[sz_Y?T ߻f~LNfQMZ!}Oji`$Dmz fM3 +!}=vvBV qx4` ,6Kpa* U(WcD ]+i/ Q0 HQ7+ɁaL|{{Fu7{C"6Ab( i˿%i_x.wЭy)1#K`ڷZ:9jPt6Eu!0V91GB|`x?jǚOOtW<2B _QG{xWi.lf!SEq{rjԣz03Ovyqƺ@/wO4Ħwԟ\ Ke\FiRiv"Lvݪ5ﲰ|ʢ4PB&/(v+,mMzr.r,-om^N\e!db#*F9Ԇ%{6/sYSo&sGM8R C;Qa=0lЁO&}8v߸T@oX>8q[Վׄqx9p#ԄG;hg0,OeW>o7nžoT<;1%Kb0ʲI.C[v`s_|(+UKXIBw",ztY53TS\ՀQ~ LusS=t,+ f ʼn@H YT㴿ƂCE`9Qqh4pqʼz Oҁ WGڌ]3ՏّpJr.PU~vzNG<~v(/>p?[̃C_Rc\c&ɯ}W`j? wCry||<p"^\MdZh$ۜDV2=R}k$6L-=/_zK\eGGqMKGU& G7&[L!d`~: xQ[QIIt5k ᧆ˅JLGZ1!u7_tvL @:VV·U_x$:L33IL G@Jc>"74@'&\ZZ:4Nt>ͥ|¥W^?vݳwyÁ˴WW\^ u6c$7feIg]j8BM3%Z8Eo4ʆK2msDSxJ9hfAC̿wڌ`"1CoSF W<ꌅ`PTǃda88lL3Lű`- gMʵ56/ŜW3"a+QÔNb?eDZobMc"4E3 قul}DŽ\6xܰwYuv;dogPN Z?6괰" m*0:z=O$W7qbG@MZO?]gWMISKLJ 1m3 Õ͸s|HEP_Pk-)-ζ]bV䚻AÙ]ҷh5nhß5g9,=ǝ@$IJ$&SFt5jp2>c.kDPp8=T_$ Pf 61_m$ˏxΜ"Et8ѭCahh%j԰.%ɑIq~z#/#bδR7ȸk q `Ate$9bݼ8ՙbІZ/hIuc VTsG^a-hGk5߻sb$@6ݴ.l֍M 턻 p=.<^Ov{$ex$}TMaHHº~2.fXjx-&PGt0) ~չ^.ƯDz~l#LʪC j QCgWm쪺`pp'NOˇC<~P+ZbBhr6g#c(,eX*"m:3Ϊt+EԞj^R# 9"Nb=٫|l&;_ 4}:k-}ZwKH@CfذwA/C(P\7lU8cCsE%#6 ^WZo/t ]O2뜣$<UA) W#=.<.%yq)8 ע2qHD_"舉{E=ٔ‚V) Di}TMnTqPdmPӏq[kS;6&.˺&ZQil_QfHN2߿zj]%޻*)]X8:"GyYyFaxIYBk]:v$a~k_"@T'8J28UH XhEɒ'-հYcL 8PjAI7 :0S̆ÅBcĵ'ޥg8>g,BJE:|Js<}ゎ;/8\&4M/|q?{lڛzo]"Bם07MlFę {Hte~IEẩ.ydw#-I¡K9qsܜrFrgWc82Q]|_KtE qQ'<7,ACa4v9M@.W^.ðs@``;0qC9أp Ae v+a\O0_+Afgt!1Y k]rɯ2o=?jW v[I"7d}0nÅu`xY4L/{ugsFyglJl3Vq㟑_'^w}ȲCl-)Jv`\Oa2w{mAz0} &Ícˮn:m T*O¿ߩ=D \ g|V;6V6/۽R"l|NīIާA d?(kyDIfhq..Npx;"[:^DdL0=t,8J_W[ ?fW947b\, Ӝ("+FtAJ9g|&8NH,#^+XЋMInϯNzN\B["Tkw܆`(׮cKɯjBٻ"7KlySD !%aKUP~z"[3~Ct2?':a(z3 K\2d$^%N9d= /tqd+pXԆk+Z1y"A/b]'\|OFweQ-{٤^6@c`{8j)lʏ.ʺyWKͻfA+UkR{ɐWTMxk]\ܓ~ب'fT!KV#!:-bA- s5Af +Jab sD.'ves>ԳyкG^L&_[!M\RrXvj{OYcuuPx;ゥV5FR)P6J_ Xd鏥+EtU+QnYoGӼ-g2rG Fn_}%0 bpja@]>s{ah*~T028.rEKn-ḨfNP7SJkp;q=W(7v4H:LdO gXH:_8{I_jXO<޳|yMx˩5\ @#؎N~GNpsxW8o7ʞ/rq$;STĴ:tE!tg,* ʼn@q!I|'3,shǞYq6҉(RΆ^= G:ል7q]z1" @,ze:}OZ ?yx+\IbAuF@Uszڹ}\$ΝrφOm+Wp:jfvtŹg[vER7#ŀ\x?DNo}0q{u>[`$ΊӘ))I Ck iy&)A2*a. z5UTHmPg}c6!F" } N.)v ŇCThKYq;$^>δ7)(fS7 6XH6pkCl|vecSRd4M\yֈbDgة H홬ko- | o4gk2=j%/Y6@3JS6힭wϙu6 #Ϯ9s ,F5]&ƬR7D.&9ڪ~s}O:@ Gf6Iqw).q|P9Rx>YcTѻ|B&C?4>`ޫ>g,[V$73#r!\$^˗QfRady\~k˥b# (@Q&pUF*IoN Aή5*yݯ<[$L\~}&bO+Ϙ?\Z׉9iOY^ΔqZ:d5.*}tj~-4ɠ.;6ߵuy_>^w/ACKAJnmpN{ùn%n^³5gĕ\"~1è.BǤ@]ln,/sħaxpª>t)U+Z弽Syk Z/Ʊ:ž8 gcsptozs2Dgˆ]~T#& sz؁! ؒXഋ \U&<:W,S϶%3ܳlGnSVjs-| _HVv]N$˳dꓡfA 2lGkUTSZ2HfI%Yrٞ{WLqI?mfSLj׌xG ygg S48rc"=5, Mx8bp؛_{%_87Aņ g jrݛU2nXNԡ]ax:L.h-#*VϊF‹{Y2d^YSc r?O&~a@C`QDsꎳ ,4;8z-zl{k(UEiR=q%VVaijI töMJ.'\p DV[O$PTMLVIre8ܖ# F, p)g}H5+8)9h[^x/T?)פ˯ݘ=rX%²L}. #3[Ih̳{ `. '͍oOMlR7~m sss s/PK>=zlEBantlr3/treewizard.pyc[Ksu @"]%Bh QO3,eXn@I3=@=PHDZ^eTyT\8}VY&UITV*TrsKH\}wϹ]_P?U}~#&b-6masIDXe,*"ͪGPQt߬Ս՝Q9.:5Y19!:g茋)ѩ Ҡ !>ޜIvʼ¯i=gΊάGy;':zܯs^5^p{=ow( ךYco'u[7לdeAwTEmϽ ƖAAv8Y|gym=uL[},_YZo8YNCp^DK_y%r$v.].K봘 ^{N7NhNO~iŴlfvi%wVˏw8r^?{~yYn:jG? "ȦXm]z}k+Ⱥ3T\{8ZaXVYeԂ7T<"kW5+ rFNQB=*ںGhg9Mq2C3i]Sj*';S15B&eo8:ټ-J-ѵ]AUl1Z %/=d %H_b{m僷z%ggIR.aZ^O{`MbV A h^X0'I2ЖbAYqvnUXjuX^PA6V4E莗e~X;1``l G$7d2f0|xZB=no)ςhgNJ~_? jKmIUcc} 軼k<3l/eEKza'?IiQJ,*ɭ5#OQ굵DAd`v]3ȝݝ^؋$?_zʍojݾ}wm>b{˯ƛomc(*S!=AS t$, \b?%TG,bhs_ռeoGup{}TWcYUEK9A?C O"PQ%lv_C:7N`/rHhY&+3{FʮmJǖU/iǖV:BD5V0غ&D}4ΐ12m}.0.hSMvq JVI+?|qi^W€]a 3õ,ΰXS໲<2P鴡&M CcHAe6 0]@q [Zٸxr.&l˅Yqƪ'֊6ZXߡK{ _[`|I~-kXJ%#S$3-*Q8ȗ2*@L"OZ$C,kXc%bd3W^>nlJ qԕ(|y\ QhN` ` t!9͌Y :Guq`9=ꗴf圡m+(P+ Ƽщ7hwv)w 6Ik޶X^!k3su An$qُi}fq_꠽ Iv]rw0:<2@#0=dE%RN0071]%]fmWJx3ɺM\RuvݚUEu}ֺ`m6%`N֍z[LJ(7Lf6Q{޽ c@ dˀcO #g@q^dѹ±'CcXXt}7H3J鸳6i`,!i}Ef54X8چ#Z1eTϬc;Gi$^02DyAI׬5!j8nکoɁZ)"yh-ZJF[[<9QN٫]qs A&f-KV ]1 知j<"+PA)v~ R p'm{Qc0ޘTG*,UZyddʌ_BqԸ;U}ND0<)k(ObNp:=;SEU~!1Dp1+<#::PxYO,N!z_E/! lfI E';ĀOd'J2$8:{Qx@&K8#!e:ig{ ҜKxiApVAwLїa4-f7k7QLIQSW FgI]4Ǹ*FBv tLFߡjjQy=]eZǤJLx CCꊉKx1<8,hB/)8c jsVK1/n¡vy*Z{/~HsX^wUozAD#cvj DI,@l7͡5ސN rSQGzxU hqkVʭr%&F( Gy*7PgA"5"X/[ID;nT/CM I 2R1[G ey~\pCoOr>L1H(-a ȵey &* Xlim1n'9 &/`L1HRADs- lml8eZX;4#vP2uX~gtD­H@(Bv:} 6r,u Zv[ohoT% iӉ%J_#o 0dİ"$a8ndʕiLCpIƓhn:OgYRwaB$/]TrAeA53[El\ܞ1cG 5((>nR'6ϵ/ن.q)Vms\pl1ap^)\%WG ! f hc[X2R_āHnCD4P̨{O;sbM@O-usY{:[jk( %)τ˃)sxIXO4`nTf^8rq#Da ;H|fݽ#bR3Xs SiQ?^I ? Ҁ=]X =nh:o#ޥ^Ǫ=Y6lb\Z茳 OlC ".{ )CPp!]ǂ찈}fI8^ZpHqYr_u`?C8" J`T9|y |m('Po;C{a=e^Ի[@̏N'/cPڀ cTQ_\݂:^缡xM!`TI*dP&nO\qyic`E:( ĖLMUE!ǓQ6°Ii9׋rEe(ýae +0+[:P' 3c/c? H ! CҨ2Ұ6T i0[`;iB\ Aceɀ3IJϵ]zq-* QˀL >jOFɳH4Z&Nk(HLe\nȗcw`,=ӣ?E܇(sW+H/z|TmC1GxUV卹[R~@^4h,~r^ w+ם\3˴8y|):&/wyitlQphd@SvNw8~L~z~$LP|nbޡa @2|ad+F *T(c@*/z2$X:*7QL=H.\w.@Kł 8Kx8|Qp?^B!sA2!lx_8bsW)=7pR `@ޠpxdTm!gY1fnՏ1d6bd"j,@jSzFrɏyS[h5wU_[3zNz^#n ˫_r/T`;8_8k;V@E+(;E#-_:\VZC9o.+ύ?PK>y'AEGG-INFO/PKG-INFOuN0 y ?MAz@mp6iš=٨vy%`A߫ = =g"6 Γrȋ_vߑa[zlH`|g}֑\mv~л#!B.70pz-Ժ%U˜\wBq}6;卜vE߫Ɔ U֍X5 2N0g/ ($kYUT=JKY?PK>–EGG-INFO/SOURCES.txtAn0E a "jڨ%bX[n2#eOZ6aOɏVRIb'!CR#3?aYku{ @;&dJ \r0) TS j?#b!kWH'|_zXoO2EGG-INFO/dependency_links.txtPK>: EGG-INFO/top_level.txtK+)2PK>2EGG-INFO/not-zip-safePK>[׬antlr3/__init__.pyPKRy>]antlr3/compat.pyPKRy>^? antlr3/constants.pyPK鱳>]`+$[antlr3/debug.pyPKRy>U76  5antlr3/dfa.pyPKRy>Q"O D@antlr3/dottreegen.pyPKRy>ZVͽ1Jantlr3/exceptions.pyPKRy>ytYantlr3/extras.pyPKj>81#]antlr3/main.pyPKK^>碌7~fantlr3/recognizers.pyPK&q>Q.V^.Űantlr3/streams.pyPKy>y .7antlr3/tokens.pyPKx>L>antlr3/tree.pyPKV>7?G%antlr3/treewizard.pyPK>7F:antlr3/__init__.pycPK>!T)@antlr3/compat.pycPK>FW%1Bantlr3/constants.pycPK>/ **.بCantlr3/debug.pycPK>j[+Oqantlr3/dfa.pycPK>6d+7zantlr3/dottreegen.pycPK>b"a/antlr3/exceptions.pycPK>CpF=antlr3/extras.pycPK>N antlr3/main.pycPK>J.l<Оantlr3/recognizers.pycPK>`86pantlr3/streams.pycPK>ߵ 3qantlr3/tokens.pycPK>r tt\FIBantlr3/tree.pycPK>=zlEBl|antlr3/treewizard.pycPK>y'AEGG-INFO/PKG-INFOPK>–EGG-INFO/SOURCES.txtPK>2EGG-INFO/dependency_links.txtPK>: EGG-INFO/top_level.txtPK>2*EGG-INFO/not-zip-safePK!!G`python-antlr3-3.5.2/doxyfile0000644000175000017500000002351112653072152014520 0ustar zigozigo# -*- mode: doxymacs -*- #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- DOXYFILE_ENCODING = UTF-8 PROJECT_NAME = "ANTLR Python API" PROJECT_NUMBER = 3.3 OUTPUT_DIRECTORY = api CREATE_SUBDIRS = NO OUTPUT_LANGUAGE = English BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the ALWAYS_DETAILED_SEC = YES INLINE_INHERITED_MEMB = NO FULL_PATH_NAMES = YES STRIP_FROM_PATH = build/doc/ STRIP_FROM_INC_PATH = SHORT_NAMES = NO JAVADOC_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO DETAILS_AT_TOP = NO INHERIT_DOCS = YES SEPARATE_MEMBER_PAGES = NO TAB_SIZE = 8 ALIASES = OPTIMIZE_OUTPUT_FOR_C = NO OPTIMIZE_OUTPUT_JAVA = YES BUILTIN_STL_SUPPORT = NO CPP_CLI_SUPPORT = NO DISTRIBUTE_GROUP_DOC = NO SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- EXTRACT_ALL = YES EXTRACT_PRIVATE = YES EXTRACT_STATIC = YES EXTRACT_LOCAL_CLASSES = YES EXTRACT_LOCAL_METHODS = NO HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO HIDE_FRIEND_COMPOUNDS = NO HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO CASE_SENSE_NAMES = NO HIDE_SCOPE_NAMES = NO SHOW_INCLUDE_FILES = YES INLINE_INFO = YES SORT_MEMBER_DOCS = YES SORT_BRIEF_DOCS = NO SORT_BY_SCOPE_NAME = NO GENERATE_TODOLIST = YES GENERATE_TESTLIST = NO GENERATE_BUGLIST = NO GENERATE_DEPRECATEDLIST= NO ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 SHOW_USED_FILES = YES SHOW_DIRECTORIES = NO FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- QUIET = NO WARNINGS = YES WARN_IF_UNDOCUMENTED = YES WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = NO WARN_FORMAT = "$file:$line: $text" WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- INPUT = build/doc INPUT_ENCODING = UTF-8 FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.c++ \ *.d \ *.java \ *.ii \ *.ixx \ *.ipp \ *.i++ \ *.inl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ *.idl \ *.odl \ *.cs \ *.php \ *.php3 \ *.inc \ *.m \ *.mm \ *.dox \ *.py RECURSIVE = YES EXCLUDE = build/doc/antlr3/__init__.py EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = EXCLUDE_SYMBOLS = dfa exceptions recognizers streams tokens constants EXAMPLE_PATH = EXAMPLE_PATTERNS = * EXAMPLE_RECURSIVE = NO IMAGE_PATH = INPUT_FILTER = FILTER_PATTERNS = FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- SOURCE_BROWSER = YES INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES REFERENCED_BY_RELATION = NO REFERENCES_RELATION = NO REFERENCES_LINK_SOURCE = YES USE_HTAGS = NO VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = NO COLS_IN_ALPHA_INDEX = 5 IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- GENERATE_HTML = YES HTML_OUTPUT = . HTML_FILE_EXTENSION = .html HTML_HEADER = HTML_FOOTER = HTML_STYLESHEET = HTML_ALIGN_MEMBERS = YES GENERATE_HTMLHELP = NO CHM_FILE = HHC_LOCATION = GENERATE_CHI = NO BINARY_TOC = NO TOC_EXPAND = NO DISABLE_INDEX = NO ENUM_VALUES_PER_LINE = 4 GENERATE_TREEVIEW = NO TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- GENERATE_LATEX = NO LATEX_OUTPUT = latex LATEX_CMD_NAME = latex MAKEINDEX_CMD_NAME = makeindex COMPACT_LATEX = NO PAPER_TYPE = a4wide EXTRA_PACKAGES = LATEX_HEADER = PDF_HYPERLINKS = NO USE_PDFLATEX = YES LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- GENERATE_RTF = NO RTF_OUTPUT = rtf COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- GENERATE_MAN = NO MAN_OUTPUT = man MAN_EXTENSION = .3 MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- GENERATE_XML = NO XML_OUTPUT = xml XML_SCHEMA = XML_DTD = XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- GENERATE_PERLMOD = NO PERLMOD_LATEX = NO PERLMOD_PRETTY = YES PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- ENABLE_PREPROCESSING = YES MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = NO SEARCH_INCLUDES = YES INCLUDE_PATH = INCLUDE_FILE_PATTERNS = PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- TAGFILES = GENERATE_TAGFILE = ALLEXTERNALS = NO EXTERNAL_GROUPS = YES PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- CLASS_DIAGRAMS = NO MSCGEN_PATH = HIDE_UNDOC_RELATIONS = YES HAVE_DOT = YES CLASS_GRAPH = YES COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES UML_LOOK = NO TEMPLATE_RELATIONS = NO INCLUDE_GRAPH = YES INCLUDED_BY_GRAPH = YES CALL_GRAPH = NO CALLER_GRAPH = NO GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES DOT_IMAGE_FORMAT = png DOT_PATH = DOTFILE_DIRS = DOT_GRAPH_MAX_NODES = 50 DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- SEARCHENGINE = NO #--------------------------------------------------------------------------- # doxypy integration #--------------------------------------------------------------------------- FILTER_SOURCE_FILES = YES INPUT_FILTER = "python doxypy.py" python-antlr3-3.5.2/tests/0000755000175000017500000000000012653072152014112 5ustar zigozigopython-antlr3-3.5.2/tests/t026actions.py0000644000175000017500000000306412653072152016543 0ustar zigozigoimport antlr3 import testbase import unittest class t026actions(testbase.ANTLRTest): def parserClass(self, base): class TParser(base): def __init__(self, *args, **kwargs): base.__init__(self, *args, **kwargs) self._errors = [] self._output = "" def capture(self, t): self._output += t def emitErrorMessage(self, msg): self._errors.append(msg) return TParser def lexerClass(self, base): class TLexer(base): def __init__(self, *args, **kwargs): base.__init__(self, *args, **kwargs) self._errors = [] self._output = "" def capture(self, t): self._output += t def emitErrorMessage(self, msg): self._errors.append(msg) return TLexer def setUp(self): self.compileGrammar() def testValid1(self): cStream = antlr3.StringStream('foobar _Ab98 \n A12sdf') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) parser.prog() self.assertEqual( parser._output, 'init;after;finally;') self.assertEqual( lexer._output, 'action;u\'foobar\' 4 1 0 -1 0 0 5;attribute;action;u\'_Ab98\' 4 1 7 -1 0 7 11;attribute;action;u\'A12sdf\' 4 2 1 -1 0 15 20;attribute;') if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t012lexerXMLLexer.g0000644000175000017500000000524312653072152017375 0ustar zigozigolexer grammar t012lexerXMLLexer; options { language = Python; } @header { from cStringIO import StringIO } @lexer::init { self.outbuf = StringIO() } @lexer::members { def output(self, line): self.outbuf.write(line.encode('utf-8') + "\n") } DOCUMENT : XMLDECL? WS? DOCTYPE? WS? ELEMENT WS? ; fragment DOCTYPE : '' ; fragment INTERNAL_DTD : '[' (options {greedy=false;} : .)* ']' ; fragment PI : '' ; fragment XMLDECL : '' ; fragment ELEMENT : ( START_TAG (ELEMENT | t=PCDATA {self.output("PCDATA: \""+$t.text+"\"")} | t=CDATA {self.output("CDATA: \""+$t.text+"\"")} | t=COMMENT {self.output("Comment: \""+$t.text+"\"")} | pi=PI )* END_TAG | EMPTY_ELEMENT ) ; fragment START_TAG : '<' WS? name=GENERIC_ID WS? {self.output("Start Tag: "+name.text)} ( ATTRIBUTE WS? )* '>' ; fragment EMPTY_ELEMENT : '<' WS? name=GENERIC_ID WS? {self.output("Empty Element: "+name.text)} ( ATTRIBUTE WS? )* '/>' ; fragment ATTRIBUTE : name=GENERIC_ID WS? '=' WS? value=VALUE {self.output("Attr: "+name.text+"="+value.text)} ; fragment END_TAG : '' {self.output("End Tag: "+name.text)} ; fragment COMMENT : '' ; fragment CDATA : '' ; fragment PCDATA : (~'<')+ ; fragment VALUE : ( '\"' (~'\"')* '\"' | '\'' (~'\'')* '\'' ) ; fragment GENERIC_ID : ( LETTER | '_' | ':') ( options {greedy=true;} : LETTER | '0'..'9' | '.' | '-' | '_' | ':' )* ; fragment LETTER : 'a'..'z' | 'A'..'Z' ; fragment WS : ( ' ' | '\t' | ( '\n' | '\r\n' | '\r' ) )+ ; python-antlr3-3.5.2/tests/t009lexer.g0000644000175000017500000000011612653072152016014 0ustar zigozigolexer grammar t009lexer; options { language = Python; } DIGIT: '0' .. '9'; python-antlr3-3.5.2/tests/t042ast.g0000644000175000017500000001215112653072152015463 0ustar zigozigogrammar t042ast; options { language = Python; output = AST; } tokens { VARDEF; FLOAT; EXPR; BLOCK; VARIABLE; FIELD; CALL; INDEX; FIELDACCESS; } @init { self.flag = False } r1 : INT ('+'^ INT)* ; r2 : 'assert'^ x=expression (':'! y=expression)? ';'! ; r3 : 'if'^ expression s1=statement ('else'! s2=statement)? ; r4 : 'while'^ expression statement ; r5 : 'return'^ expression? ';'! ; r6 : (INT|ID)+ ; r7 : INT -> ; r8 : 'var' ID ':' type -> ^('var' type ID) ; r9 : type ID ';' -> ^(VARDEF type ID) ; r10 : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text + ".0"))} ; r11 : expression -> ^(EXPR expression) | -> EXPR ; r12 : ID (',' ID)* -> ID+ ; r13 : type ID (',' ID)* ';' -> ^(type ID+) ; r14 : expression? statement* type+ -> ^(EXPR expression? statement* type+) ; r15 : INT -> INT INT ; r16 : 'int' ID (',' ID)* -> ^('int' ID)+ ; r17 : 'for' '(' start=statement ';' expression ';' next=statement ')' statement -> ^('for' $start expression $next statement) ; r18 : t='for' -> ^(BLOCK) ; r19 : t='for' -> ^(BLOCK[$t]) ; r20 : t='for' -> ^(BLOCK[$t,"FOR"]) ; r21 : t='for' -> BLOCK ; r22 : t='for' -> BLOCK[$t] ; r23 : t='for' -> BLOCK[$t,"FOR"] ; r24 : r=statement expression -> ^($r expression) ; r25 : r+=statement (',' r+=statement)+ expression -> ^($r expression) ; r26 : r+=statement (',' r+=statement)+ -> ^(BLOCK $r+) ; r27 : r=statement expression -> ^($r ^($r expression)) ; r28 : ('foo28a'|'foo28b') -> ; r29 : (r+=statement)* -> ^(BLOCK $r+) ; r30 : statement* -> ^(BLOCK statement?) ; r31 : modifier type ID ('=' expression)? ';' -> {self.flag == 0}? ^(VARDEF ID modifier* type expression?) -> {self.flag == 1}? ^(VARIABLE ID modifier* type expression?) -> ^(FIELD ID modifier* type expression?) ; r32[which] : ID INT -> {which==1}? ID -> {which==2}? INT -> // yield nothing as else-clause ; r33 : modifiers! statement ; r34 : modifiers! r34a[$modifiers.tree] //| modifiers! r33b[$modifiers.tree] ; r34a[mod] : 'class' ID ('extends' sup=type)? ( 'implements' i+=type (',' i+=type)*)? '{' statement* '}' -> ^('class' ID {$mod} ^('extends' $sup)? ^('implements' $i+)? statement* ) ; r35 : '{' 'extends' (sup=type)? '}' -> ^('extends' $sup)? ; r36 : 'if' '(' expression ')' s1=statement ( 'else' s2=statement -> ^('if' ^(EXPR expression) $s1 $s2) | -> ^('if' ^(EXPR expression) $s1) ) ; r37 : (INT -> INT) ('+' i=INT -> ^('+' $r37 $i) )* ; r38 : INT ('+'^ INT)* ; r39 : (primary->primary) // set return tree to just primary ( '(' arg=expression ')' -> ^(CALL $r39 $arg) | '[' ie=expression ']' -> ^(INDEX $r39 $ie) | '.' p=primary -> ^(FIELDACCESS $r39 $p) )* ; r40 : (INT -> INT) ( ('+' i+=INT)* -> ^('+' $r40 $i*) ) ';' ; r41 : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';' ; r42 : ids+=ID (','! ids+=ID)* ; r43 returns [res] : ids+=ID! (','! ids+=ID!)* {$res = [id.text for id in $ids]} ; r44 : ids+=ID^ (','! ids+=ID^)* ; r45 : primary^ ; r46 returns [res] : ids+=primary! (','! ids+=primary!)* {$res = [id.text for id in $ids]} ; r47 : ids+=primary (','! ids+=primary)* ; r48 : ids+=. (','! ids+=.)* ; r49 : .^ ID ; r50 : ID -> ^({CommonTree(CommonToken(type=FLOAT, text="1.0"))} ID) ; /** templates tested: tokenLabelPropertyRef_tree */ r51 returns [res] : ID t=ID ID { $res = $t.tree } ; /** templates tested: rulePropertyRef_tree */ r52 returns [res] @after { $res = $tree } : ID ; /** templates tested: ruleLabelPropertyRef_tree */ r53 returns [res] : t=primary { $res = $t.tree } ; /** templates tested: ruleSetPropertyRef_tree */ r54 returns [res] @after { $tree = $t.tree; } : ID t=expression ID ; /** backtracking */ r55 options { backtrack=true; k=1; } : (modifier+ INT)=> modifier+ expression | modifier+ statement ; /** templates tested: rewriteTokenRef with len(args)>0 */ r56 : t=ID* -> ID[$t,'foo'] ; /** templates tested: rewriteTokenRefRoot with len(args)>0 */ r57 : t=ID* -> ^(ID[$t,'foo']) ; /** templates tested: ??? */ r58 : ({CommonTree(CommonToken(type=FLOAT, text="2.0"))})^ ; /** templates tested: rewriteTokenListLabelRefRoot */ r59 : (t+=ID)+ statement -> ^($t statement)+ ; primary : ID ; expression : r1 ; statement : 'fooze' | 'fooze2' ; modifiers : modifier+ ; modifier : 'public' | 'private' ; type : 'int' | 'bool' ; ID : 'a'..'z' + ; INT : '0'..'9' +; WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; python-antlr3-3.5.2/tests/t038lexerRuleLabel.g0000644000175000017500000000073612653072152017616 0ustar zigozigolexer grammar t038lexerRuleLabel; options { language = Python; } A: 'a'..'z' WS '0'..'9' { print $WS print $WS.type print $WS.line print $WS.pos print $WS.channel print $WS.index print $WS.text } ; fragment WS : ( ' ' | '\t' | ( '\n' | '\r\n' | '\r' ) )+ { $channel = HIDDEN } ; python-antlr3-3.5.2/tests/t020fuzzyLexer.g0000644000175000017500000000345112653072152017062 0ustar zigozigolexer grammar t020fuzzyLexer; options { language=Python; filter=true; } @header { from cStringIO import StringIO } @init { self.output = StringIO() } IMPORT : 'import' WS name=QIDStar WS? ';' ; /** Avoids having "return foo;" match as a field */ RETURN : 'return' (options {greedy=false;}:.)* ';' ; CLASS : 'class' WS name=ID WS? ('extends' WS QID WS?)? ('implements' WS QID WS? (',' WS? QID WS?)*)? '{' {self.output.write("found class "+$name.text+"\n")} ; METHOD : TYPE WS name=ID WS? '(' ( ARG WS? (',' WS? ARG WS?)* )? ')' WS? ('throws' WS QID WS? (',' WS? QID WS?)*)? '{' {self.output.write("found method "+$name.text+"\n");} ; FIELD : TYPE WS name=ID '[]'? WS? (';'|'=') {self.output.write("found var "+$name.text+"\n");} ; STAT: ('if'|'while'|'switch'|'for') WS? '(' ; CALL : name=QID WS? '(' {self.output.write("found call "+$name.text+"\n");} ; COMMENT : '/*' (options {greedy=false;} : . )* '*/' {self.output.write("found comment "+self.getText()+"\n");} ; SL_COMMENT : '//' (options {greedy=false;} : . )* '\n' {self.output.write("found // comment "+self.getText()+"\n");} ; STRING : '"' (options {greedy=false;}: ESC | .)* '"' ; CHAR : '\'' (options {greedy=false;}: ESC | .)* '\'' ; WS : (' '|'\t'|'\n')+ ; fragment QID : ID ('.' ID)* ; /** QID cannot see beyond end of token so using QID '.*'? somewhere won't * ever match since k=1 lookahead in the QID loop of '.' will make it loop. * I made this rule to compensate. */ fragment QIDStar : ID ('.' ID)* '.*'? ; fragment TYPE: QID '[]'? ; fragment ARG : TYPE WS ID ; fragment ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')* ; fragment ESC : '\\' ('"'|'\''|'\\') ; python-antlr3-3.5.2/tests/t047treeparser.py0000644000175000017500000001036612653072152017265 0ustar zigozigoimport unittest import textwrap import antlr3 import antlr3.tree import testbase class T(testbase.ANTLRTest): def walkerClass(self, base): class TWalker(base): def __init__(self, *args, **kwargs): base.__init__(self, *args, **kwargs) self.traces = [] def traceIn(self, ruleName, ruleIndex): self.traces.append('>'+ruleName) def traceOut(self, ruleName, ruleIndex): self.traces.append('<'+ruleName) def recover(self, input, re): # no error recovery yet, just crash! raise return TWalker def setUp(self): self.compileGrammar() self.compileGrammar('t047treeparserWalker.g', options='-trace') def testWalker(self): input = textwrap.dedent( '''\ char c; int x; void bar(int x); int foo(int y, char d) { int i; for (i=0; i<3; i=i+1) { x=3; y=5; } } ''') cStream = antlr3.StringStream(input) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) r = parser.program() self.failUnlessEqual( r.tree.toStringTree(), "(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))" ) nodes = antlr3.tree.CommonTreeNodeStream(r.tree) nodes.setTokenStream(tStream) walker = self.getWalker(nodes) walker.program() # FIXME: need to crosscheck with Java target (compile walker with # -trace option), if this is the real list. For now I'm happy that # it does not crash ;) self.failUnlessEqual( walker.traces, [ '>program', '>declaration', '>variable', '>type', 'declarator', 'declaration', '>variable', '>type', 'declarator', 'declaration', '>functionHeader', '>type', 'formalParameter', '>type', 'declarator', 'declaration', '>functionHeader', '>type', 'formalParameter', '>type', 'declarator', 'formalParameter', '>type', 'declarator', 'block', '>variable', '>type', 'declarator', 'stat', '>forStat', '>expr', '>expr', '>atom', 'expr', '>expr', '>atom', 'expr', '>atom', 'expr', '>expr', '>expr', '>atom', 'expr', '>atom', 'block', '>stat', '>expr', '>expr', '>atom', 'stat', '>expr', '>expr', '>atom', ' WS+ NAME | ) EOF ; NAME: ('a'..'z') ('a'..'z' | '0'..'9')+; NUMBER: ('0'..'9')+; WS: ' '+; python-antlr3-3.5.2/tests/t014parser.g0000644000175000017500000000113212653072152016164 0ustar zigozigogrammar t014parser; options { language = Python; } @parser::init { self.events = [] self.reportedErrors = [] } @parser::members { def emitErrorMessage(self, msg): self.reportedErrors.append(msg) } document: ( declaration | call )* EOF ; declaration: 'var' t=IDENTIFIER ';' {self.events.append(('decl', $t.text))} ; call: t=IDENTIFIER '(' ')' ';' {self.events.append(('call', $t.text))} ; IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; WS: (' '|'\r'|'\t'|'\n') {$channel=HIDDEN;}; python-antlr3-3.5.2/tests/t006lexer.py0000644000175000017500000000334212653072152016217 0ustar zigozigoimport antlr3 import testbase import unittest class t006lexer(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def lexerClass(self, base): class TLexer(base): def emitErrorMessage(self, msg): # report errors to /dev/null pass def reportError(self, re): # no error recovery yet, just crash! raise re return TLexer def testValid(self): stream = antlr3.StringStream('fofaaooa') lexer = self.getLexer(stream) token = lexer.nextToken() assert token.type == self.lexerModule.FOO assert token.start == 0, token.start assert token.stop == 1, token.stop assert token.text == 'fo', token.text token = lexer.nextToken() assert token.type == self.lexerModule.FOO assert token.start == 2, token.start assert token.stop == 7, token.stop assert token.text == 'faaooa', token.text token = lexer.nextToken() assert token.type == self.lexerModule.EOF def testMalformedInput(self): stream = antlr3.StringStream('fofoaooaoa2') lexer = self.getLexer(stream) lexer.nextToken() lexer.nextToken() try: token = lexer.nextToken() raise AssertionError, token except antlr3.MismatchedTokenException, exc: assert exc.expecting == 'f', repr(exc.expecting) assert exc.unexpectedType == '2', repr(exc.unexpectedType) assert exc.charPositionInLine == 10, repr(exc.charPositionInLine) assert exc.line == 1, repr(exc.line) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t047treeparserWalker.g0000644000175000017500000000165012653072152020225 0ustar zigozigotree grammar t047treeparserWalker; options { language=Python; tokenVocab=t047treeparser; ASTLabelType=CommonTree; } program : declaration+ ; declaration : variable | ^(FUNC_DECL functionHeader) | ^(FUNC_DEF functionHeader block) ; variable returns [res] : ^(VAR_DEF type declarator) { $res = $declarator.text; } ; declarator : ID ; functionHeader : ^(FUNC_HDR type ID formalParameter+) ; formalParameter : ^(ARG_DEF type declarator) ; type : 'int' | 'char' | 'void' | ID ; block : ^(BLOCK variable* stat*) ; stat: forStat | expr | block ; forStat : ^('for' expr expr expr block) ; expr: ^(EQEQ expr expr) | ^(LT expr expr) | ^(PLUS expr expr) | ^(EQ ID expr) | atom ; atom : ID | INT ; python-antlr3-3.5.2/tests/t016actions.g0000644000175000017500000000073412653072152016341 0ustar zigozigogrammar t016actions; options { language = Python; } declaration returns [name] : functionHeader ';' {$name = $functionHeader.name} ; functionHeader returns [name] : type ID {$name = $ID.text} ; type : 'int' | 'char' | 'void' ; ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* ; WS : ( ' ' | '\t' | '\r' | '\n' )+ {$channel=HIDDEN} ; python-antlr3-3.5.2/tests/t021hoist.g0000644000175000017500000000115512653072152016021 0ustar zigozigogrammar t021hoist; options { language=Python; } /* With this true, enum is seen as a keyword. False, it's an identifier */ @parser::init { self.enableEnum = False } stat returns [enumIs] : identifier {enumIs = "ID"} | enumAsKeyword {enumIs = "keyword"} ; identifier : ID | enumAsID ; enumAsKeyword : {self.enableEnum}? 'enum' ; enumAsID : {not self.enableEnum}? 'enum' ; ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* ; INT : ('0'..'9')+ ; WS : ( ' ' | '\t' | '\r' | '\n' )+ {$channel=HIDDEN} ; python-antlr3-3.5.2/tests/t012lexerXML.input0000644000175000017500000000053412653072152017304 0ustar zigozigo ]> Text öäüß & < python-antlr3-3.5.2/tests/t011lexer.py0000644000175000017500000000447612653072152016224 0ustar zigozigoimport antlr3 import testbase import unittest class t011lexer(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def lexerClass(self, base): class TLexer(base): def emitErrorMessage(self, msg): # report errors to /dev/null pass def reportError(self, re): # no error recovery yet, just crash! raise re return TLexer def testValid(self): stream = antlr3.StringStream('foobar _Ab98 \n A12sdf') lexer = self.getLexer(stream) token = lexer.nextToken() assert token.type == self.lexerModule.IDENTIFIER assert token.start == 0, token.start assert token.stop == 5, token.stop assert token.text == 'foobar', token.text token = lexer.nextToken() assert token.type == self.lexerModule.WS assert token.start == 6, token.start assert token.stop == 6, token.stop assert token.text == ' ', token.text token = lexer.nextToken() assert token.type == self.lexerModule.IDENTIFIER assert token.start == 7, token.start assert token.stop == 11, token.stop assert token.text == '_Ab98', token.text token = lexer.nextToken() assert token.type == self.lexerModule.WS assert token.start == 12, token.start assert token.stop == 14, token.stop assert token.text == ' \n ', token.text token = lexer.nextToken() assert token.type == self.lexerModule.IDENTIFIER assert token.start == 15, token.start assert token.stop == 20, token.stop assert token.text == 'A12sdf', token.text token = lexer.nextToken() assert token.type == self.lexerModule.EOF def testMalformedInput(self): stream = antlr3.StringStream('a-b') lexer = self.getLexer(stream) lexer.nextToken() try: token = lexer.nextToken() raise AssertionError, token except antlr3.NoViableAltException, exc: assert exc.unexpectedType == '-', repr(exc.unexpectedType) assert exc.charPositionInLine == 1, repr(exc.charPositionInLine) assert exc.line == 1, repr(exc.line) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t055templates.py0000644000175000017500000002731012653072152017103 0ustar zigozigoimport unittest import textwrap import antlr3 import antlr3.tree import stringtemplate3 import testbase import sys import os from StringIO import StringIO class T(testbase.ANTLRTest): def execParser(self, grammar, grammarEntry, input, group=None): lexerCls, parserCls = self.compileInlineGrammar(grammar) cStream = antlr3.StringStream(input) lexer = lexerCls(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = parserCls(tStream) if group is not None: parser.templateLib = group result = getattr(parser, grammarEntry)() if result.st is not None: return result.st.toString() return None def testInlineTemplate(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a : ID INT -> template(id={$ID.text}, int={$INT.text}) "id=, int=" ; ID : 'a'..'z'+; INT : '0'..'9'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc 34" ) self.failUnlessEqual("id=abc, int=34", found) def testExternalTemplate(self): templates = textwrap.dedent( '''\ group T; expr(args, op) ::= << [}>] >> ''' ) group = stringtemplate3.StringTemplateGroup( file=StringIO(templates), lexer='angle-bracket' ) grammar = textwrap.dedent( r'''grammar T2; options { language=Python; output=template; } a : r+=arg OP r+=arg -> expr(op={$OP.text}, args={$r}) ; arg: ID -> template(t={$ID.text}) ""; ID : 'a'..'z'+; OP: '+'; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "a + b", group ) self.failUnlessEqual("[a+b]", found) def testEmptyTemplate(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a : ID INT -> ; ID : 'a'..'z'+; INT : '0'..'9'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc 34" ) self.failUnless(found is None) def testList(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a: (r+=b)* EOF -> template(r={$r}) "" ; b: ID -> template(t={$ID.text}) "" ; ID : 'a'..'z'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc def ghi" ) self.failUnlessEqual("abc,def,ghi", found) def testAction(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a: ID -> { stringtemplate3.StringTemplate("hello") } ; ID : 'a'..'z'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc" ) self.failUnlessEqual("hello", found) def testTemplateExpressionInAction(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a: ID { $st = %{"hello"} } ; ID : 'a'..'z'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc" ) self.failUnlessEqual("hello", found) def testTemplateExpressionInAction2(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a: ID { res = %{"hello "} %res.foo = "world"; } -> { res } ; ID : 'a'..'z'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc" ) self.failUnlessEqual("hello world", found) def testIndirectTemplateConstructor(self): templates = textwrap.dedent( '''\ group T; expr(args, op) ::= << [}>] >> ''' ) group = stringtemplate3.StringTemplateGroup( file=StringIO(templates), lexer='angle-bracket' ) grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=template; } a: ID { $st = %({"expr"})(args={[1, 2, 3]}, op={"+"}) } ; ID : 'a'..'z'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc", group ) self.failUnlessEqual("[1+2+3]", found) def testPredicates(self): grammar = textwrap.dedent( r'''grammar T3; options { language=Python; output=template; } a : ID INT -> {$ID.text=='a'}? template(int={$INT.text}) "A: " -> {$ID.text=='b'}? template(int={$INT.text}) "B: " -> template(int={$INT.text}) "C: " ; ID : 'a'..'z'+; INT : '0'..'9'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "b 34" ) self.failUnlessEqual("B: 34", found) def testBacktrackingMode(self): grammar = textwrap.dedent( r'''grammar T4; options { language=Python; output=template; backtrack=true; } a : (ID INT)=> ID INT -> template(id={$ID.text}, int={$INT.text}) "id=, int=" ; ID : 'a'..'z'+; INT : '0'..'9'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''' ) found = self.execParser( grammar, 'a', "abc 34" ) self.failUnlessEqual("id=abc, int=34", found) def testRewrite(self): grammar = textwrap.dedent( r'''grammar T5; options { language=Python; output=template; rewrite=true; } prog: stat+; stat : 'if' '(' expr ')' stat | 'return' return_expr ';' | '{' stat* '}' | ID '=' expr ';' ; return_expr : expr -> template(t={$text}) <)>> ; expr : ID | INT ; ID: 'a'..'z'+; INT: '0'..'9'+; WS: (' '|'\n')+ {$channel=HIDDEN;} ; COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ; ''' ) input = textwrap.dedent( '''\ if ( foo ) { b = /* bla */ 2; return 1 /* foo */; } /* gnurz */ return 12; ''' ) lexerCls, parserCls = self.compileInlineGrammar(grammar) cStream = antlr3.StringStream(input) lexer = lexerCls(cStream) tStream = antlr3.TokenRewriteStream(lexer) parser = parserCls(tStream) result = parser.prog() found = tStream.toString() expected = textwrap.dedent( '''\ if ( foo ) { b = /* bla */ 2; return boom(1) /* foo */; } /* gnurz */ return boom(12); ''' ) self.failUnlessEqual(expected, found) def testTreeRewrite(self): grammar = textwrap.dedent( r'''grammar T6; options { language=Python; output=AST; } tokens { BLOCK; ASSIGN; } prog: stat+; stat : IF '(' e=expr ')' s=stat -> ^(IF $e $s) | RETURN expr ';' -> ^(RETURN expr) | '{' stat* '}' -> ^(BLOCK stat*) | ID '=' expr ';' -> ^(ASSIGN ID expr) ; expr : ID | INT ; IF: 'if'; RETURN: 'return'; ID: 'a'..'z'+; INT: '0'..'9'+; WS: (' '|'\n')+ {$channel=HIDDEN;} ; COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ; ''' ) treeGrammar = textwrap.dedent( r'''tree grammar T6Walker; options { language=Python; tokenVocab=T6; ASTLabelType=CommonTree; output=template; rewrite=true; } prog: stat+; stat : ^(IF expr stat) | ^(RETURN return_expr) | ^(BLOCK stat*) | ^(ASSIGN ID expr) ; return_expr : expr -> template(t={$text}) <)>> ; expr : ID | INT ; ''' ) input = textwrap.dedent( '''\ if ( foo ) { b = /* bla */ 2; return 1 /* foo */; } /* gnurz */ return 12; ''' ) lexerCls, parserCls = self.compileInlineGrammar(grammar) walkerCls = self.compileInlineGrammar(treeGrammar) cStream = antlr3.StringStream(input) lexer = lexerCls(cStream) tStream = antlr3.TokenRewriteStream(lexer) parser = parserCls(tStream) tree = parser.prog().tree nodes = antlr3.tree.CommonTreeNodeStream(tree) nodes.setTokenStream(tStream) walker = walkerCls(nodes) walker.prog() found = tStream.toString() expected = textwrap.dedent( '''\ if ( foo ) { b = /* bla */ 2; return boom(1) /* foo */; } /* gnurz */ return boom(12); ''' ) self.failUnlessEqual(expected, found) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t026actions.g0000644000175000017500000000132712653072152016341 0ustar zigozigogrammar t026actions; options { language = Python; } @lexer::init { self.foobar = 'attribute;' } prog @init { self.capture('init;') } @after { self.capture('after;') } : IDENTIFIER EOF ; catch [ RecognitionException, exc ] { self.capture('catch;') raise } finally { self.capture('finally;') } IDENTIFIER : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* { # a comment self.capture('action;') self.capture('\%r \%r \%r \%r \%r \%r \%r \%r;' \% ($text, $type, $line, $pos, $index, $channel, $start, $stop)) if True: self.capture(self.foobar) } ; WS: (' ' | '\n')+; python-antlr3-3.5.2/tests/t027eof.g0000644000175000017500000000012012653072152015441 0ustar zigozigolexer grammar t027eof; options { language=Python; } END: EOF; SPACE: ' '; python-antlr3-3.5.2/tests/t047treeparser.g0000644000175000017500000000322212653072152017054 0ustar zigozigogrammar t047treeparser; options { language=Python; output=AST; } tokens { VAR_DEF; ARG_DEF; FUNC_HDR; FUNC_DECL; FUNC_DEF; BLOCK; } program : declaration+ ; declaration : variable | functionHeader ';' -> ^(FUNC_DECL functionHeader) | functionHeader block -> ^(FUNC_DEF functionHeader block) ; variable : type declarator ';' -> ^(VAR_DEF type declarator) ; declarator : ID ; functionHeader : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' -> ^(FUNC_HDR type ID formalParameter+) ; formalParameter : type declarator -> ^(ARG_DEF type declarator) ; type : 'int' | 'char' | 'void' | ID ; block : lc='{' variable* stat* '}' -> ^(BLOCK[$lc,"BLOCK"] variable* stat*) ; stat: forStat | expr ';'! | block | assignStat ';'! | ';'! ; forStat : 'for' '(' start=assignStat ';' expr ';' next=assignStat ')' block -> ^('for' $start expr $next block) ; assignStat : ID EQ expr -> ^(EQ ID expr) ; expr: condExpr ; condExpr : aexpr ( ('=='^ | '<'^) aexpr )? ; aexpr : atom ( '+'^ atom )* ; atom : ID | INT | '(' expr ')' -> expr ; FOR : 'for' ; INT_TYPE : 'int' ; CHAR: 'char'; VOID: 'void'; ID : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')* ; INT : ('0'..'9')+ ; EQ : '=' ; EQEQ : '==' ; LT : '<' ; PLUS : '+' ; WS : ( ' ' | '\t' | '\r' | '\n' )+ { $channel=HIDDEN } ; python-antlr3-3.5.2/tests/t050decorate.g0000644000175000017500000000076512653072152016471 0ustar zigozigogrammar t050decorate; options { language = Python; } @header { def logme(func): def decorated(self, *args, **kwargs): self.events.append('before') try: return func(self, *args, **kwargs) finally: self.events.append('after') return decorated } @parser::init { self.events = [] } document @decorate { @logme } : IDENTIFIER ; IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; python-antlr3-3.5.2/tests/t013parser.g0000644000175000017500000000065012653072152016167 0ustar zigozigogrammar t013parser; options { language = Python; } @parser::init { self.identifiers = [] self.reportedErrors = [] } @parser::members { def foundIdentifier(self, name): self.identifiers.append(name) def emitErrorMessage(self, msg): self.reportedErrors.append(msg) } document: t=IDENTIFIER {self.foundIdentifier($t.text)} ; IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*; python-antlr3-3.5.2/tests/t043synpred.g0000644000175000017500000000017312653072152016362 0ustar zigozigogrammar t043synpred; options { language = Python; } a: ((s+ P)=> s+ b)? E; b: P 'foo'; s: S; S: ' '; P: '+'; E: '>'; python-antlr3-3.5.2/tests/t015calc.py0000644000175000017500000000224312653072152016001 0ustar zigozigoimport antlr3 import testbase import unittest class t015calc(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def _evaluate(self, expr, expected, errors=[]): cStream = antlr3.StringStream(expr) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) result = parser.evaluate() assert result == expected, "%r != %r" % (result, expected) assert len(parser.reportedErrors) == len(errors), parser.reportedErrors def testValid01(self): self._evaluate("1 + 2", 3) def testValid02(self): self._evaluate("1 + 2 * 3", 7) def testValid03(self): self._evaluate("10 / 2", 5) def testValid04(self): self._evaluate("6 + 2*(3+1) - 4", 10) def testMalformedInput(self): self._evaluate("6 - (2*1", 4, ["mismatched token at pos 8"]) # FIXME: most parse errors result in TypeErrors in action code, because # rules return None, which is then added/multiplied... to integers. # evaluate("6 - foo 2", 4, ["some error"]) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t013parser.py0000644000175000017500000000172712653072152016377 0ustar zigozigoimport antlr3 import testbase import unittest class t013parser(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def testValid(self): cStream = antlr3.StringStream('foobar') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) parser.document() assert len(parser.reportedErrors) == 0, parser.reportedErrors assert parser.identifiers == ['foobar'] def testMalformedInput1(self): cStream = antlr3.StringStream('') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) parser.document() # FIXME: currently strings with formatted errors are collected # can't check error locations yet assert len(parser.reportedErrors) == 1, parser.reportedErrors if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t019lexer.py0000644000175000017500000000077612653072152016233 0ustar zigozigoimport os import antlr3 import testbase import unittest class t019lexer(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def testValid(self): inputPath = os.path.splitext(__file__)[0] + '.input' stream = antlr3.StringStream(open(inputPath).read()) lexer = self.getLexer(stream) while True: token = lexer.nextToken() if token.type == antlr3.EOF: break if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t004lexer.py0000644000175000017500000000367712653072152016230 0ustar zigozigoimport antlr3 import testbase import unittest class t004lexer(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def lexerClass(self, base): class TLexer(base): def emitErrorMessage(self, msg): # report errors to /dev/null pass def reportError(self, re): # no error recovery yet, just crash! raise re return TLexer def testValid(self): stream = antlr3.StringStream('ffofoofooo') lexer = self.getLexer(stream) token = lexer.nextToken() assert token.type == self.lexerModule.FOO assert token.start == 0, token.start assert token.stop == 0, token.stop assert token.text == 'f', token.text token = lexer.nextToken() assert token.type == self.lexerModule.FOO assert token.start == 1, token.start assert token.stop == 2, token.stop assert token.text == 'fo', token.text token = lexer.nextToken() assert token.type == self.lexerModule.FOO assert token.start == 3, token.start assert token.stop == 5, token.stop assert token.text == 'foo', token.text token = lexer.nextToken() assert token.type == self.lexerModule.FOO assert token.start == 6, token.start assert token.stop == 9, token.stop assert token.text == 'fooo', token.text token = lexer.nextToken() assert token.type == self.lexerModule.EOF def testMalformedInput(self): stream = antlr3.StringStream('2') lexer = self.getLexer(stream) try: token = lexer.nextToken() self.fail() except antlr3.MismatchedTokenException, exc: self.failUnlessEqual(exc.expecting, 'f') self.failUnlessEqual(exc.unexpectedType, '2') if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t006lexer.g0000644000175000017500000000012212653072152016006 0ustar zigozigolexer grammar t006lexer; options { language = Python; } FOO: 'f' ('o' | 'a')*; python-antlr3-3.5.2/tests/t048rewrite2.g0000644000175000017500000000023312653072152016443 0ustar zigozigolexer grammar t048rewrite2; options { language=Python; } ID : 'a'..'z'+; INT : '0'..'9'+; SEMI : ';'; PLUS : '+'; MUL : '*'; ASSIGN : '='; WS : ' '+; python-antlr3-3.5.2/tests/t012lexerXML.py0000644000175000017500000001160712653072152016600 0ustar zigozigoimport antlr3 import testbase import unittest import os import sys from cStringIO import StringIO import difflib import textwrap class t012lexerXML(testbase.ANTLRTest): def setUp(self): self.compileGrammar('t012lexerXMLLexer.g') def lexerClass(self, base): class TLexer(base): def emitErrorMessage(self, msg): # report errors to /dev/null pass def reportError(self, re): # no error recovery yet, just crash! raise re return TLexer def testValid(self): inputPath = os.path.splitext(__file__)[0] + '.input' stream = antlr3.StringStream(unicode(open(inputPath).read(), 'utf-8')) lexer = self.getLexer(stream) while True: token = lexer.nextToken() if token.type == self.lexerModule.EOF: break output = unicode(lexer.outbuf.getvalue(), 'utf-8') outputPath = os.path.splitext(__file__)[0] + '.output' testOutput = unicode(open(outputPath).read(), 'utf-8') success = (output == testOutput) if not success: d = difflib.Differ() r = d.compare(output.splitlines(1), testOutput.splitlines(1)) self.fail( ''.join([l.encode('ascii', 'backslashreplace') for l in r]) ) def testMalformedInput1(self): input = textwrap.dedent("""\ """) stream = antlr3.StringStream(input) lexer = self.getLexer(stream) try: while True: token = lexer.nextToken() if token.type == antlr3.EOF: break raise AssertionError except antlr3.NoViableAltException, exc: assert exc.unexpectedType == '>', repr(exc.unexpectedType) assert exc.charPositionInLine == 11, repr(exc.charPositionInLine) assert exc.line == 2, repr(exc.line) def testMalformedInput2(self): input = textwrap.dedent("""\ """) stream = antlr3.StringStream(input) lexer = self.getLexer(stream) try: while True: token = lexer.nextToken() if token.type == antlr3.EOF: break raise AssertionError except antlr3.MismatchedSetException, exc: assert exc.unexpectedType == 't', repr(exc.unexpectedType) assert exc.charPositionInLine == 2, repr(exc.charPositionInLine) assert exc.line == 1, repr(exc.line) def testMalformedInput3(self): input = textwrap.dedent("""\ """) stream = antlr3.StringStream(input) lexer = self.getLexer(stream) try: while True: token = lexer.nextToken() if token.type == antlr3.EOF: break raise AssertionError except antlr3.NoViableAltException, exc: assert exc.unexpectedType == 'a', repr(exc.unexpectedType) assert exc.charPositionInLine == 11, repr(exc.charPositionInLine) assert exc.line == 2, repr(exc.line) if __name__ == '__main__': unittest.main() ## # run an infinite loop with randomly mangled input ## while True: ## print "ping" ## input = """\ ## ## ## ## ## ]> ## ## ## Text ## ## & ## < ## ## ## ## ## """ ## import random ## input = list(input) # make it mutable ## for _ in range(3): ## p1 = random.randrange(len(input)) ## p2 = random.randrange(len(input)) ## c1 = input[p1] ## input[p1] = input[p2] ## input[p2] = c1 ## input = ''.join(input) # back to string ## stream = antlr3.StringStream(input) ## lexer = Lexer(stream) ## try: ## while True: ## token = lexer.nextToken() ## if token.type == EOF: ## break ## except antlr3.RecognitionException, exc: ## print exc ## for l in input.splitlines()[0:exc.line]: ## print l ## print ' '*exc.charPositionInLine + '^' ## except BaseException, exc: ## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())]) ## print "%s at %d:%d" % (exc, stream.line, stream.charPositionInLine) ## print ## raise python-antlr3-3.5.2/tests/t009lexer.py0000644000175000017500000000362512653072152016226 0ustar zigozigoimport antlr3 import testbase import unittest class t009lexer(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def lexerClass(self, base): class TLexer(base): def emitErrorMessage(self, msg): # report errors to /dev/null pass def reportError(self, re): # no error recovery yet, just crash! raise re return TLexer def testValid(self): stream = antlr3.StringStream('085') lexer = self.getLexer(stream) token = lexer.nextToken() assert token.type == self.lexerModule.DIGIT assert token.start == 0, token.start assert token.stop == 0, token.stop assert token.text == '0', token.text token = lexer.nextToken() assert token.type == self.lexerModule.DIGIT assert token.start == 1, token.start assert token.stop == 1, token.stop assert token.text == '8', token.text token = lexer.nextToken() assert token.type == self.lexerModule.DIGIT assert token.start == 2, token.start assert token.stop == 2, token.stop assert token.text == '5', token.text token = lexer.nextToken() assert token.type == self.lexerModule.EOF def testMalformedInput(self): stream = antlr3.StringStream('2a') lexer = self.getLexer(stream) lexer.nextToken() try: token = lexer.nextToken() raise AssertionError, token except antlr3.MismatchedSetException, exc: # TODO: This should provide more useful information assert exc.expecting is None assert exc.unexpectedType == 'a', repr(exc.unexpectedType) assert exc.charPositionInLine == 1, repr(exc.charPositionInLine) assert exc.line == 1, repr(exc.line) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t022scopes.py0000644000175000017500000000751512653072152016400 0ustar zigozigoimport antlr3 import testbase import unittest import textwrap class t022scopes(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def parserClass(self, base): class TParser(base): def emitErrorMessage(self, msg): # report errors to /dev/null pass def reportError(self, re): # no error recovery yet, just crash! raise re return TParser def testa1(self): cStream = antlr3.StringStream('foobar') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) parser.a() def testb1(self): cStream = antlr3.StringStream('foobar') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) try: parser.b(False) self.fail() except antlr3.RecognitionException: pass def testb2(self): cStream = antlr3.StringStream('foobar') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) parser.b(True) def testc1(self): cStream = antlr3.StringStream( textwrap.dedent('''\ { int i; int j; i = 0; } ''')) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) symbols = parser.c() self.failUnlessEqual( symbols, set(['i', 'j']) ) def testc2(self): cStream = antlr3.StringStream( textwrap.dedent('''\ { int i; int j; i = 0; x = 4; } ''')) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) try: parser.c() self.fail() except RuntimeError, exc: self.failUnlessEqual(exc.args[0], 'x') def testd1(self): cStream = antlr3.StringStream( textwrap.dedent('''\ { int i; int j; i = 0; { int i; int x; x = 5; } } ''')) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) symbols = parser.d() self.failUnlessEqual( symbols, set(['i', 'j']) ) def teste1(self): cStream = antlr3.StringStream( textwrap.dedent('''\ { { { { 12 } } } } ''')) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) res = parser.e() self.failUnlessEqual(res, 12) def testf1(self): cStream = antlr3.StringStream( textwrap.dedent('''\ { { { { 12 } } } } ''')) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) res = parser.f() self.failUnlessEqual(res, None) def testf2(self): cStream = antlr3.StringStream( textwrap.dedent('''\ { { 12 } } ''')) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) res = parser.f() self.failUnlessEqual(res, None) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t033backtracking.py0000644000175000017500000000130112653072152017514 0ustar zigozigoimport antlr3 import testbase import unittest class t033backtracking(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def parserClass(self, base): class TParser(base): def recover(self, input, re): # no error recovery yet, just crash! raise return TParser @testbase.broken("Some bug in the tool", SyntaxError) def testValid1(self): cStream = antlr3.StringStream('int a;') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) events = parser.translation_unit() if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t045dfabug.py0000644000175000017500000000063112653072152016331 0ustar zigozigoimport unittest import textwrap import antlr3 import testbase class T(testbase.ANTLRTest): def testbug(self): self.compileGrammar() cStream = antlr3.StringStream("public fooze") lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) parser.r() if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t045dfabug.g0000644000175000017500000000071212653072152016127 0ustar zigozigogrammar t045dfabug; options { language = Python; output = AST; } // this rule used to generate an infinite loop in DFA.predict r options { backtrack=true; } : (modifier+ INT)=> modifier+ expression | modifier+ statement ; expression : INT '+' INT ; statement : 'fooze' | 'fooze2' ; modifier : 'public' | 'private' ; ID : 'a'..'z' + ; INT : '0'..'9' +; WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;}; python-antlr3-3.5.2/tests/t020fuzzy.input0000644000175000017500000000054412653072152016773 0ustar zigozigoimport org.antlr.runtime.*; public class Main { public static void main(String[] args) throws Exception { for (int i=0; i'+ruleName) def traceOut(self, ruleName, ruleIndex): self.traces.append('<'+ruleName) def recover(self, input, re): # no error recovery yet, just crash! raise return TWalker def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input): lexerCls, parserCls = self.compileInlineGrammar(grammar) walkerCls = self.compileInlineGrammar(treeGrammar) cStream = antlr3.StringStream(input) lexer = lexerCls(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = parserCls(tStream) r = getattr(parser, grammarEntry)() nodes = antlr3.tree.CommonTreeNodeStream(r.tree) nodes.setTokenStream(tStream) walker = walkerCls(nodes) getattr(walker, treeEntry)() return walker._output def testFlatList(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : ID INT; ID : 'a'..'z'+ ; INT : '0'..'9'+; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : ID INT {self.capture("\%s, \%s" \% ($ID, $INT))} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "abc 34" ) self.failUnlessEqual("abc, 34", found) def testSimpleTree(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : ID INT -> ^(ID INT); ID : 'a'..'z'+ ; INT : '0'..'9'+; WS : (' '|'\\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : ^(ID INT) {self.capture(str($ID)+", "+str($INT))} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "abc 34" ) self.failUnlessEqual("abc, 34", found) def testFlatVsTreeDecision(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : b c ; b : ID INT -> ^(ID INT); c : ID INT; ID : 'a'..'z'+ ; INT : '0'..'9'+; WS : (' '|'\\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : b b ; b : ID INT {self.capture(str($ID)+" "+str($INT)+'\n')} | ^(ID INT) {self.capture("^("+str($ID)+" "+str($INT)+')');} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a 1 b 2" ) self.failUnlessEqual("^(a 1)b 2\n", found) def testFlatVsTreeDecision2(self): grammar = textwrap.dedent( r"""grammar T; options { language=Python; output=AST; } a : b c ; b : ID INT+ -> ^(ID INT+); c : ID INT+; ID : 'a'..'z'+ ; INT : '0'..'9'+; WS : (' '|'\n') {$channel=HIDDEN;} ; """) treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : b b ; b : ID INT+ {self.capture(str($ID)+" "+str($INT)+"\n")} | ^(x=ID (y=INT)+) {self.capture("^("+str($x)+' '+str($y)+')')} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a 1 2 3 b 4 5" ) self.failUnlessEqual("^(a 3)b 5\n", found) def testCyclicDFALookahead(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : ID INT+ PERIOD; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; PERIOD : '.' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : ID INT+ PERIOD {self.capture("alt 1")} | ID INT+ SEMI {self.capture("alt 2")} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a 1 2 3." ) self.failUnlessEqual("alt 1", found) ## def testTemplateOutput(self): ## String grammar = ## "grammar T;\n" + ## "options {output=AST;}\n" + ## "a : ID INT;\n" + ## "ID : 'a'..'z'+ ;\n" + ## "INT : '0'..'9'+;\n" + ## "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n"; ## String treeGrammar = ## "tree grammar TP;\n" + ## "options {output=template; ASTLabelType=CommonTree;}\n" + ## "s : a {System.out.println($a.st);};\n" + ## "a : ID INT -> {new StringTemplate($INT.text)}\n" + ## " ;\n"; ## String found = execTreeParser("T.g", grammar, "TParser", "TP.g", ## treeGrammar, "TP", "TLexer", "a", "s", "abc 34"); ## assertEquals("34\n", found); ## } def testNullableChildList(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : ID INT? -> ^(ID INT?); ID : 'a'..'z'+ ; INT : '0'..'9'+; WS : (' '|'\\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : ^(ID INT?) {self.capture(str($ID))} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "abc" ) self.failUnlessEqual("abc", found) def testNullableChildList2(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : ID INT? SEMI -> ^(ID INT?) SEMI ; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : ^(ID INT?) SEMI {self.capture(str($ID))} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "abc;" ) self.failUnlessEqual("abc", found) def testNullableChildList3(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; WS : (' '|'\\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a : ^(ID INT? b) SEMI {self.capture(str($ID)+", "+str($b.text))} ; b : ID? ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "abc def;" ) self.failUnlessEqual("abc, def", found) def testActionsAfterRoot(self): grammar = textwrap.dedent( r'''grammar T; options { language=Python; output=AST; } a : x=ID INT? SEMI -> ^($x INT?) ; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r'''tree grammar TP; options { language=Python; ASTLabelType=CommonTree; } a @init {x=0} : ^(ID {x=1} {x=2} INT?) {self.capture(str($ID)+", "+str(x))} ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "abc;" ) self.failUnless("abc, 2\n", found) def testWildcardLookahead(self): grammar = textwrap.dedent( r''' grammar T; options {language=Python; output=AST;} a : ID '+'^ INT; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; PERIOD : '.' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r''' tree grammar TP; options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} a : ^('+' . INT) { self.capture("alt 1") } ; ''') found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a + 2") self.assertEquals("alt 1", found) def testWildcardLookahead2(self): grammar = textwrap.dedent( r''' grammar T; options {language=Python; output=AST;} a : ID '+'^ INT; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; PERIOD : '.' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r''' tree grammar TP; options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} a : ^('+' . INT) { self.capture("alt 1") } | ^('+' . .) { self.capture("alt 2") } ; ''') # AMBIG upon '+' DOWN INT UP etc.. but so what. found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a + 2") self.assertEquals("alt 1", found) def testWildcardLookahead3(self): grammar = textwrap.dedent( r''' grammar T; options {language=Python; output=AST;} a : ID '+'^ INT; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; PERIOD : '.' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r''' tree grammar TP; options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} a : ^('+' ID INT) { self.capture("alt 1") } | ^('+' . .) { self.capture("alt 2") } ; ''') # AMBIG upon '+' DOWN INT UP etc.. but so what. found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a + 2") self.assertEquals("alt 1", found) def testWildcardPlusLookahead(self): grammar = textwrap.dedent( r''' grammar T; options {language=Python; output=AST;} a : ID '+'^ INT; ID : 'a'..'z'+ ; INT : '0'..'9'+; SEMI : ';' ; PERIOD : '.' ; WS : (' '|'\n') {$channel=HIDDEN;} ; ''') treeGrammar = textwrap.dedent( r''' tree grammar TP; options {language=Python; tokenVocab=T; ASTLabelType=CommonTree;} a : ^('+' INT INT ) { self.capture("alt 1") } | ^('+' .+) { self.capture("alt 2") } ; ''') # AMBIG upon '+' DOWN INT UP etc.. but so what. found = self.execTreeParser( grammar, 'a', treeGrammar, 'a', "a + 2") self.assertEquals("alt 2", found) if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t043synpred.py0000644000175000017500000000154312653072152016566 0ustar zigozigoimport antlr3 import testbase import unittest class t043synpred(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def lexerClass(self, base): class TLexer(base): def recover(self, input, re): # no error recovery yet, just crash! raise return TLexer def parserClass(self, base): class TParser(base): def recover(self, input, re): # no error recovery yet, just crash! raise return TParser def testValid1(self): cStream = antlr3.StringStream(' +foo>') lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) events = parser.a() if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t032subrulePredict.py0000644000175000017500000000177012653072152020076 0ustar zigozigoimport antlr3 import testbase import unittest class t032subrulePredict(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def parserClass(self, base): class TParser(base): def recover(self, input, re): # no error recovery yet, just crash! raise return TParser def testValid1(self): cStream = antlr3.StringStream( 'BEGIN A END' ) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) events = parser.a() @testbase.broken("DFA tries to look beyond end of rule b", Exception) def testValid2(self): cStream = antlr3.StringStream( ' A' ) lexer = self.getLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = self.getParser(tStream) events = parser.b() if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t038lexerRuleLabel.py0000644000175000017500000000123012653072152020006 0ustar zigozigoimport antlr3 import testbase import unittest class t038lexerRuleLabel(testbase.ANTLRTest): def setUp(self): self.compileGrammar() def lexerClass(self, base): class TLexer(base): def recover(self, input, re): # no error recovery yet, just crash! raise return TLexer def testValid1(self): cStream = antlr3.StringStream('a 2') lexer = self.getLexer(cStream) while True: t = lexer.nextToken() if t.type == antlr3.EOF: break print t if __name__ == '__main__': unittest.main() python-antlr3-3.5.2/tests/t007lexer.g0000644000175000017500000000012712653072152016014 0ustar zigozigolexer grammar t007lexer; options { language = Python; } FOO: 'f' ('o' | 'a' 'b'+)*; python-antlr3-3.5.2/tests/t019lexer.input0000644000175000017500000000054412653072152016733 0ustar zigozigoimport org.antlr.runtime.*; public class Main { public static void main(String[] args) throws Exception { for (int i=0; i