././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1609880862.5591245 antlr4-python3-runtime-4.9.1/0000755000076600000000000000000000000000000016264 5ustar00parrtwheel00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1609695176.0 antlr4-python3-runtime-4.9.1/MANIFEST.in0000644000076600000000000000005600000000000020023 0ustar00parrtwheel00000000000000include *.txt recursive-include test *.py *.c ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1609880862.5588427 antlr4-python3-runtime-4.9.1/PKG-INFO0000644000076600000000000000043200000000000017360 0ustar00parrtwheel00000000000000Metadata-Version: 1.0 Name: antlr4-python3-runtime Version: 4.9.1 Summary: ANTLR 4.9.1 runtime for Python 3.7 Home-page: http://www.antlr.org Author: Eric Vergnaud, Terence Parr, Sam Harwell Author-email: eric.vergnaud@wanadoo.fr License: BSD Description: UNKNOWN Platform: UNKNOWN ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1606416672.0 antlr4-python3-runtime-4.9.1/README.txt0000644000076600000000000000027200000000000017763 0ustar00parrtwheel00000000000000This is the Python 3.4 runtime for ANTLR. Visit the ANTLR web sites for more information: http://www.antlr.org https://raw.githubusercontent.com/antlr/antlr4/master/doc/python-target.md ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1606416672.0 antlr4-python3-runtime-4.9.1/RELEASE-4.5.txt0000644000076600000000000000060300000000000020410 0ustar00parrtwheel00000000000000What's in this release? - fixed bug where non-ascii input streams would fail - added support for visitor pattern - added support for wildcards in grammar Breaking change: In version 4.4, the parser/lexer had a tokenNames member. This has been removed in favor of the following members: - lexicalNames, containing the parsed text - symbolicNames, corresponding to tokenNames ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1609880862.505213 antlr4-python3-runtime-4.9.1/bin/0000755000076600000000000000000000000000000017034 5ustar00parrtwheel00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1606416672.0 antlr4-python3-runtime-4.9.1/bin/pygrun0000755000076600000000000001373700000000000020321 0ustar00parrtwheel00000000000000#!python __author__ = 'jszheng' import optparse import sys import os import importlib from antlr4 import * # this is a python version of TestRig def beautify_lisp_string(in_string): indent_size = 3 add_indent = ' '*indent_size out_string = in_string[0] # no indent for 1st ( indent = '' for i in range(1, len(in_string)): if in_string[i] == '(' and in_string[i+1] != ' ': indent += add_indent out_string += "\n" + indent + '(' elif in_string[i] == ')': out_string += ')' if len(indent) > 0: indent = indent.replace(add_indent, '', 1) else: out_string += in_string[i] return out_string if __name__ == '__main__': ############################################################# # parse options # not support -gui -encoding -ps ############################################################# usage = "Usage: %prog [options] Grammar_Name Start_Rule" parser = optparse.OptionParser(usage=usage) # parser.add_option('-t', '--tree', # dest="out_file", # default="default.out", # help='set output file name', # ) parser.add_option('-t', '--tree', default=False, action='store_true', help='Print AST tree' ) parser.add_option('-k', '--tokens', dest="token", default=False, action='store_true', help='Show Tokens' ) parser.add_option('-s', '--sll', dest="sll", default=False, action='store_true', help='Show SLL' ) parser.add_option('-d', '--diagnostics', dest="diagnostics", default=False, action='store_true', help='Enable diagnostics error listener' ) parser.add_option('-a', '--trace', dest="trace", default=False, action='store_true', help='Enable Trace' ) options, remainder = parser.parse_args() if len(remainder) < 2: print('ERROR: You have to provide at least 2 arguments!') parser.print_help() exit(1) else: grammar = remainder.pop(0) start_rule = remainder.pop(0) file_list = remainder ############################################################# # check and load antlr generated files ############################################################# # dynamic load the module and class lexerName = grammar + 'Lexer' parserName = grammar + 'Parser' # check if the generate file exist lexer_file = lexerName + '.py' parser_file = parserName + '.py' if not os.path.exists(lexer_file): print("[ERROR] Can't find lexer file {}!".format(lexer_file)) print(os.path.realpath('.')) exit(1) if not os.path.exists(parser_file): print("[ERROR] Can't find parser file {}!".format(lexer_file)) print(os.path.realpath('.')) exit(1) # current directory is where the generated file loaded # the script might be in different place. sys.path.append('.') # print(sys.path) # print("Load Lexer {}".format(lexerName)) module_lexer = __import__(lexerName, globals(), locals(), lexerName) class_lexer = getattr(module_lexer, lexerName) # print(class_lexer) # print("Load Parser {}".format(parserName)) module_parser = __import__(parserName, globals(), locals(), parserName) class_parser = getattr(module_parser, parserName) # print(class_parser) ############################################################# # main process steps. ############################################################# def process(input_stream, class_lexer, class_parser): lexer = class_lexer(input_stream) token_stream = CommonTokenStream(lexer) token_stream.fill() if options.token: # need to show token for tok in token_stream.tokens: print(tok) if start_rule == 'tokens': return parser = class_parser(token_stream) if options.diagnostics: parser.addErrorListener(DiagnosticErrorListener()) parser._interp.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION if options.tree: parser.buildParseTrees = True if options.sll: parser._interp.predictionMode = PredictionMode.SLL #parser.setTokenStream(token_stream) parser.setTrace(options.trace) if hasattr(parser, start_rule): func_start_rule = getattr(parser, start_rule) parser_ret = func_start_rule() if options.tree: lisp_tree_str = parser_ret.toStringTree(recog=parser) print(beautify_lisp_string(lisp_tree_str)) else: print("[ERROR] Can't find start rule '{}' in parser '{}'".format(start_rule, parserName)) ############################################################# # use stdin if not provide file as input stream ############################################################# if len(file_list) == 0: input_stream = InputStream(sys.stdin.read()) process(input_stream, class_lexer, class_parser) exit(0) ############################################################# # iterate all input file ############################################################# for file_name in file_list: if os.path.exists(file_name) and os.path.isfile(file_name): input_stream = FileStream(file_name) process(input_stream, class_lexer, class_parser) else: print("[ERROR] file {} not exist".format(os.path.normpath(file_name))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1609880862.5592253 antlr4-python3-runtime-4.9.1/setup.cfg0000644000076600000000000000004600000000000020105 0ustar00parrtwheel00000000000000[egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1609697139.0 antlr4-python3-runtime-4.9.1/setup.py0000644000076600000000000000101700000000000017775 0ustar00parrtwheel00000000000000from setuptools import setup setup( name='antlr4-python3-runtime', version='4.9.1', packages=['antlr4', 'antlr4.atn', 'antlr4.dfa', 'antlr4.tree', 'antlr4.error', 'antlr4.xpath'], package_dir={'': 'src'}, install_requires=[ "typing ; python_version<'3.5'", ], url='http://www.antlr.org', license='BSD', author='Eric Vergnaud, Terence Parr, Sam Harwell', author_email='eric.vergnaud@wanadoo.fr', scripts=["bin/pygrun"], description='ANTLR 4.9.1 runtime for Python 3.7' ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1609880862.5028095 antlr4-python3-runtime-4.9.1/src/0000755000076600000000000000000000000000000017053 5ustar00parrtwheel00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1609880862.5169473 antlr4-python3-runtime-4.9.1/src/antlr4/0000755000076600000000000000000000000000000020257 5ustar00parrtwheel00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1609695176.0 antlr4-python3-runtime-4.9.1/src/antlr4/BufferedTokenStream.py0000644000076600000000000002476400000000000024545 0ustar00parrtwheel00000000000000# # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. # Use of this file is governed by the BSD 3-clause license that # can be found in the LICENSE.txt file in the project root. # This implementation of {@link TokenStream} loads tokens from a # {@link TokenSource} on-demand, and places the tokens in a buffer to provide # access to any previous token by index. # #

# This token stream ignores the value of {@link Token#getChannel}. If your # parser requires the token stream filter tokens to only those on a particular # channel, such as {@link Token#DEFAULT_CHANNEL} or # {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a # {@link CommonTokenStream}.

from io import StringIO from antlr4.Token import Token from antlr4.error.Errors import IllegalStateException # need forward declaration Lexer = None # this is just to keep meaningful parameter types to Parser class TokenStream(object): pass class BufferedTokenStream(TokenStream): __slots__ = ('tokenSource', 'tokens', 'index', 'fetchedEOF') def __init__(self, tokenSource:Lexer): # The {@link TokenSource} from which tokens for this stream are fetched. self.tokenSource = tokenSource # A collection of all tokens fetched from the token source. The list is # considered a complete view of the input once {@link #fetchedEOF} is set # to {@code true}. self.tokens = [] # The index into {@link #tokens} of the current token (next token to # {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be # {@link #LT LT(1)}. # #

This field is set to -1 when the stream is first constructed or when # {@link #setTokenSource} is called, indicating that the first token has # not yet been fetched from the token source. For additional information, # see the documentation of {@link IntStream} for a description of # Initializing Methods.

self.index = -1 # Indicates whether the {@link Token#EOF} token has been fetched from # {@link #tokenSource} and added to {@link #tokens}. This field improves # performance for the following cases: # #