pax_global_header00006660000000000000000000000064147437375270014535gustar00rootroot0000000000000052 comment=2721028dfe2151bae48d03e7e89c66a804e4ed0e python-tatsu-lts-5.13.1+ds/000077500000000000000000000000001474373752700155055ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/.github/000077500000000000000000000000001474373752700170455ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/.github/FUNDING.yml000066400000000000000000000010201474373752700206530ustar00rootroot00000000000000# These are supported funding model platforms github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry custom: # Replace with a single custom sponsorship URL python-tatsu-lts-5.13.1+ds/.github/workflows/000077500000000000000000000000001474373752700211025ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/.github/workflows/default.yml000066400000000000000000000026441474373752700232570ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: tests on: - push - pull_request jobs: build: runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: [ '3.13', '3.12', '3.11', '3.10', '3.9', '3.8', ] steps: - uses: actions/checkout@v2 - name: set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: install dependencies run: | sudo apt-get update sudo apt-get dist-upgrade -y sudo apt-get install -y pandoc sudo apt-get install -y python3-sphinx sudo apt-get install -y python3-docutils python -m pip install --upgrade pip pip install --upgrade --requirement requirements-test.txt - name: lint if: ${{ matrix.python-version == '3.12' }} run: | make lint - name: pytest run: | make tatsu_test - name: examples run: | make examples - name: documentation if: ${{ matrix.python-version == '3.12' }} run: | make documentation - name: distributions run: | make build python-tatsu-lts-5.13.1+ds/.github/workflows/release.yml000066400000000000000000000011671474373752700232520ustar00rootroot00000000000000name: release on: push: tags: - 'v5.[0-9]+.*-LTS' jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: python -m pip install build - run: python -m build - uses: actions/upload-artifact@v4 with: path: dist/* upload: needs: build runs-on: ubuntu-latest environment: upload permissions: id-token: write steps: - uses: actions/download-artifact@v4 with: merge-multiple: true path: dist - uses: pypa/gh-action-pypi-publish@release/v1 with: attestations: false python-tatsu-lts-5.13.1+ds/.gitignore000066400000000000000000000023531474373752700175000ustar00rootroot00000000000000# Generated files # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class *.c *.so tmp/ /parsers/ # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .*cache/ nosetests.xml coverage.xml *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # dotenv .env # virtualenv .venv venv/ ENV/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # PyCharm /.idea/ # VSCode .vscode/* # generated files examples/g2e/*.ebnf python-tatsu-lts-5.13.1+ds/LICENSE.txt000066400000000000000000000033521474373752700173330ustar00rootroot00000000000000TATSU - A PEG/Packrat parser generator for Python Copyright (C) 2017-2023 Juancarlo Añez All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 4. Redistributions of any form whatsoever must retain the following acknowledgment: 'This product includes software developed by "Juancarlo Añez" (https://apalala.bitbucket.io).' THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. python-tatsu-lts-5.13.1+ds/MANIFEST.in000066400000000000000000000012041474373752700172400ustar00rootroot00000000000000include *.cfg include *.ebnf include *.ini include *.md include *.rst include *.txt include *.toml include *.yaml include Makefile include .pylintrc include grammar/* include docs/* exclude docs/_build exclude docs/_static exclude docs/_templates recursive-include tatsu *.ebnf recursive-include etc * recursive-exclude etc *.pdf recursive-include examples/g2e * exclude examples/g2e/antlr_parser.py exclude examples/g2e/python.ebnf recursive-include examples/calc * global-exclude *.pyc global-exclude *.pyd global-exclude *.pyo global-exclude *.orig global-exclude *.c global-exclude *.so global-exclude *~ global-exclude __pycache__ python-tatsu-lts-5.13.1+ds/Makefile000066400000000000000000000020171474373752700171450ustar00rootroot00000000000000test: lint tatsu_test documentation examples tatsu_test: clean pytest documentation: clean sphinx sphinx: cd docs; make -s html > /dev/null examples: clean g2e_test calc_test g2e_test: cd examples/g2e; make -s clean; make -s test > /dev/null calc_test: cd examples/calc; make -s clean; make -s test > /dev/null lint: ruff mypy ruff: -@ pip install -q -U ruff ruff check --preview tatsu test examples mypy: -@ pip install -q -U mypy mypy --ignore-missing-imports . --exclude dist clean: find . -name "__pycache__" | xargs rm -rf find . -name "*.pyc" | xargs rm -f find . -name "*.pyd" | xargs rm -f find . -name "*.pyo" | xargs rm -f find . -name "*.orig" | xargs rm -f rm -rf tatsu.egg-info rm -rf dist rm -rf build rm -rf .tox release_check: clean documentation tox @echo version `python -m tatsu --version` build: clean pip install -U build python -m build test_upload: build pip install -U twine twine upload --repository test dist/* upload: release_check build twine upload dist/* python-tatsu-lts-5.13.1+ds/README.rst000066400000000000000000000266201474373752700172020ustar00rootroot00000000000000.. |dragon| unicode:: 0x7ADC .. unicode dragon .. |nbsp| unicode:: 0xA0 .. non breakable space .. |TatSu| replace:: |dragon|\ |nbsp|\ **TatSu** .. |TatSu-LTS| replace:: |dragon|\ |nbsp|\ **TatSu-LTS** .. _RELEASES: https://github.com/neogeny/TatSu/releases |license| |pyversions| |fury| |downloads| |actions| |docs| |TatSu-LTS| =========== |TatSu-LTS| is a friendly fork of |TatSu|_ that guarantees compatibility with all `supported versions of Python`_. Compatibility with older Python versions is maintained as long as that does not require additional changes. Only patches required to support Python version older than the ones supported by the upstream project are applied. |TatSu-LTS| releases use the same version number as the |TatSu| release on which they are based and should be published shortly after the corresponding upstream release. Both the |TatSu| and |TatSu-LTS| distributions install a Python package and an executable named ``tatsu``. Users can thus switch seamlessly between the two, depending on which Python version support guarantees they prefer. For project that desire to support a wide range of Python versions, there is no drawback in depending on |TatSu-LTS| other than the short delay in the release of new versions. .. _TatSu: https://github.com/neogeny/TatSu .. _supported versions of Python: https://devguide.python.org/versions/#supported-versions |TatSu| ======= |TatSu| is a tool that takes grammars in a variation of `EBNF`_ as input, and outputs `memoizing`_ (`Packrat`_) `PEG`_ parsers in `Python`_. Why use a PEG_ parser? Because `regular languages`_ (those parsable with Python's ``re`` package) *"cannot count"*. Any language with nested structures or with balancing of demarcations requires more than regular expressions to be parsed. |TatSu| can compile a grammar stored in a string into a ``tatsu.grammars.Grammar`` object that can be used to parse any given input, much like the `re`_ module does with regular expressions, or it can generate a Python_ module that implements the parser. |TatSu| supports `left-recursive`_ rules in PEG_ grammars using the algorithm_ by *Laurent* and *Mens*. The generated AST_ has the expected left associativity. |TatSu| requires a maintained version of Python (>=3.12 at the moment). While no code in |TatSu| yet depends on new language or standard library features, the authors don't want to be constrained by Python version compatibility considerations when developing features that will be part of future releases. *If you need support for previous versions of Python, please consider* `TatSu-LTS`_, *a friendly fork of* |TatSu| *aimed at compatibility with other versions of Python still used by many projects. The developers of both projects work together to promote compatibility with most versions of Python.* .. _algorithm: http://norswap.com/pubs/sle2016.pdf .. _TatSu-LTS: https://pypi.org/project/TatSu-LTS/ Installation ------------ .. code-block:: bash $ pip install TatSu-LTS Using the Tool -------------- |TatSu| can be used as a library, much like `Python`_'s ``re``, by embedding grammars as strings and generating grammar models instead of generating Python_ code. This compiles the grammar and generates an in-memory *parser* that can subsequently be used for parsing input with: .. code-block:: python parser = tatsu.compile(grammar) Compiles the grammar and parses the given input producing an AST_ as result: .. code-block:: python ast = tatsu.parse(grammar, input) The result is equivalent to calling: .. code-block:: python parser = compile(grammar) ast = parser.parse(input) Compiled grammars are cached for efficiency. This compiles the grammar to the `Python`_ sourcecode that implements the parser: .. code-block:: python parser_source = tatsu.to_python_sourcecode(grammar) This is an example of how to use |TatSu| as a library: .. code-block:: python GRAMMAR = ''' @@grammar::CALC start = expression $ ; expression = | expression '+' term | expression '-' term | term ; term = | term '*' factor | term '/' factor | factor ; factor = | '(' expression ')' | number ; number = /\d+/ ; ''' if __name__ == '__main__': import json from tatsu import parse from tatsu.util import asjson ast = parse(GRAMMAR, '3 + 5 * ( 10 - 20 )') print(json.dumps(asjson(ast), indent=2)) .. |TatSu| will use the first rule defined in the grammar as the *start* rule. This is the output: .. code-block:: console [ "3", "+", [ "5", "*", [ "10", "-", "20" ] ] ] Documentation ------------- For a detailed explanation of what |TatSu| is capable of, please see the documentation_. .. _documentation: http://tatsu.readthedocs.io/ Questions? ---------- Please use the `[tatsu]`_ tag on `StackOverflow`_ for general Q&A, and limit Github issues to bugs, enhancement proposals, and feature requests. .. _[tatsu]: https://stackoverflow.com/tags/tatsu/info Changes ------- See the `RELEASES`_ for details. License ------- You may use |TatSu| under the terms of the `BSD`_-style license described in the enclosed `LICENSE.txt`_ file. *If your project requires different licensing* please `email`_. .. _ANTLR: http://www.antlr.org/ .. _AST: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _Abstract Syntax Tree: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _Algol W: http://en.wikipedia.org/wiki/Algol_W .. _Algorithms + Data Structures = Programs: http://www.amazon.com/Algorithms-Structures-Prentice-Hall-Automatic-Computation/dp/0130224189/ .. _BSD: http://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29 .. _Basel Shishani: https://bitbucket.org/basel-shishani .. _C: http://en.wikipedia.org/wiki/C_language .. _CHANGELOG: https://github.com/neogeny/TatSu/releases .. _CSAIL at MIT: http://www.csail.mit.edu/ .. _Cyclomatic complexity: http://en.wikipedia.org/wiki/Cyclomatic_complexity .. _David Röthlisberger: https://bitbucket.org/drothlis/ .. _Dennis Ritchie: http://en.wikipedia.org/wiki/Dennis_Ritchie .. _EBNF: http://en.wikipedia.org/wiki/Ebnf .. _English: http://en.wikipedia.org/wiki/English_grammar .. _Euler: http://en.wikipedia.org/wiki/Euler_programming_language .. _Grako: https://bitbucket.org/neogeny/grako/ .. _Jack: http://en.wikipedia.org/wiki/Javacc .. _Japanese: http://en.wikipedia.org/wiki/Japanese_grammar .. _KLOC: http://en.wikipedia.org/wiki/KLOC .. _Kathryn Long: https://bitbucket.org/starkat .. _Keywords: https://en.wikipedia.org/wiki/Reserved_word .. _`left-recursive`: https://en.wikipedia.org/wiki/Left_recursion .. _LL(1): http://en.wikipedia.org/wiki/LL(1) .. _Marcus Brinkmann: http://blog.marcus-brinkmann.de/ .. _MediaWiki: http://www.mediawiki.org/wiki/MediaWiki .. _Modula-2: http://en.wikipedia.org/wiki/Modula-2 .. _Modula: http://en.wikipedia.org/wiki/Modula .. _Oberon-2: http://en.wikipedia.org/wiki/Oberon-2 .. _Oberon: http://en.wikipedia.org/wiki/Oberon_(programming_language) .. _PEG and Packrat parsing mailing list: https://lists.csail.mit.edu/mailman/listinfo/peg .. _PEG.js: http://pegjs.majda.cz/ .. _PEG: http://en.wikipedia.org/wiki/Parsing_expression_grammar .. _PL/0: http://en.wikipedia.org/wiki/PL/0 .. _Packrat: http://bford.info/packrat/ .. _Pascal: http://en.wikipedia.org/wiki/Pascal_programming_language .. _Paul Sargent: https://bitbucket.org/PaulS/ .. _Perl: http://www.perl.org/ .. _PyPy team: http://pypy.org/people.html .. _PyPy: http://pypy.org/ .. _Python Weekly: http://www.pythonweekly.com/ .. _Python: http://python.org .. _Reserved Words: https://en.wikipedia.org/wiki/Reserved_word .. _Robert Speer: https://bitbucket.org/r_speer .. _Ruby: http://www.ruby-lang.org/ .. _Semantic Graph: http://en.wikipedia.org/wiki/Abstract_semantic_graph .. _StackOverflow: http://stackoverflow.com/tags/tatsu/info .. _Sublime Text: https://www.sublimetext.com .. _TatSu Forum: https://groups.google.com/forum/?fromgroups#!forum/tatsu .. _UCAB: http://www.ucab.edu.ve/ .. _USB: http://www.usb.ve/ .. _Unix: http://en.wikipedia.org/wiki/Unix .. _VIM: http://www.vim.org/ .. _WTK: http://en.wikipedia.org/wiki/Well-known_text .. _Warth et al: http://www.vpri.org/pdf/tr2007002_packrat.pdf .. _Well-known text: http://en.wikipedia.org/wiki/Well-known_text .. _Wirth: http://en.wikipedia.org/wiki/Niklaus_Wirth .. _`LICENSE.txt`: LICENSE.txt .. _basel-shishani: https://bitbucket.org/basel-shishani .. _blog post: http://dietbuddha.blogspot.com/2012/12/52python-encapsulating-exceptions-with.html .. _colorama: https://pypi.python.org/pypi/colorama/ .. _context managers: http://docs.python.org/2/library/contextlib.html .. _declensions: http://en.wikipedia.org/wiki/Declension .. _drothlis: https://bitbucket.org/drothlis .. _email: mailto:apalala@gmail.com .. _exceptions: http://www.jeffknupp.com/blog/2013/02/06/write-cleaner-python-use-exceptions/ .. _franz\_g: https://bitbucket.org/franz_g .. _gapag: https://bitbucket.org/gapag .. _gegenschall: https://bitbucket.org/gegenschall .. _gkimbar: https://bitbucket.org/gkimbar .. _introduced: http://dl.acm.org/citation.cfm?id=964001.964011 .. _jimon: https://bitbucket.org/jimon .. _keyword: https://en.wikipedia.org/wiki/Reserved_word .. _keywords: https://en.wikipedia.org/wiki/Reserved_word .. _lambdafu: http://blog.marcus-brinkmann.de/ .. _leewz: https://bitbucket.org/leewz .. _linkdd: https://bitbucket.org/linkdd .. _make a donation: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=P9PV7ZACB669J .. _memoizing: http://en.wikipedia.org/wiki/Memoization .. _nehz: https://bitbucket.org/nehz .. _neumond: https://bitbucket.org/neumond .. _parsewkt: https://github.com/cleder/parsewkt .. _pauls: https://bitbucket.org/pauls .. _pgebhard: https://bitbucket.org/pgebhard .. _pygraphviz: https://pypi.python.org/pypi/pygraphviz .. _r\_speer: https://bitbucket.org/r_speer .. _raw string literal: https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals .. _re: https://docs.python.org/3.7/library/re.html .. _regular languages: https://en.wikipedia.org/wiki/Regular_language .. _regex: https://pypi.python.org/pypi/regex .. _siemer: https://bitbucket.org/siemer .. _sjbrownBitbucket: https://bitbucket.org/sjbrownBitbucket .. _smc.mw: https://github.com/lambdafu/smc.mw .. _starkat: https://bitbucket.org/starkat .. _tonico\_strasser: https://bitbucket.org/tonico_strasser .. _vinay.sajip: https://bitbucket.org/vinay.sajip .. _vmuriart: https://bitbucket.org/vmuriart .. |fury| image:: https://badge.fury.io/py/TatSu-LTS.svg :target: https://badge.fury.io/py/TatSu-LTS .. |license| image:: https://img.shields.io/badge/license-BSD-blue.svg :target: https://raw.githubusercontent.com/dnicolodi/TatSu-LTS/master/LICENSE.txt .. |pyversions| image:: https://img.shields.io/pypi/pyversions/TatSu-LTS.svg :target: https://pypi.python.org/pypi/TatSu-LTS .. |actions| image:: https://github.com/dnicolodi/TatSu-LTS/actions/workflows/default.yml/badge.svg :target: https://github.com/dnicolodi/TatSu-LTS/actions/workflows/default.yml .. |docs| image:: https://readthedocs.org/projects/tatsu/badge/?version=stable :target: http://tatsu.readthedocs.io/en/stable/ .. |downloads| image:: https://img.shields.io/pypi/dm/TatSu-LTS.svg :target: https://pypistats.org/packages/tatsu-lts python-tatsu-lts-5.13.1+ds/docs/000077500000000000000000000000001474373752700164355ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/docs/LICENSE.txt000066400000000000000000000025041474373752700202610ustar00rootroot00000000000000TATSU - A PEG/Packrat parser generator for Python Copyright (C) 2017-2023 Juancarlo Añez All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. python-tatsu-lts-5.13.1+ds/docs/Makefile000066400000000000000000000011321474373752700200720ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = tatsu SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)python-tatsu-lts-5.13.1+ds/docs/_static/000077500000000000000000000000001474373752700200635ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/docs/_static/css/000077500000000000000000000000001474373752700206535ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/docs/_static/css/custom.css000066400000000000000000000003251474373752700226770ustar00rootroot00000000000000.succeed { color: green; font-family: monospace; } .fail { color: red; font-family: monospace; } .try { color: orange; font-family: monospace; } .console { font-family: monospace; } python-tatsu-lts-5.13.1+ds/docs/_static/images/000077500000000000000000000000001474373752700213305ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/docs/_static/images/btn_donate_SM.gif000066400000000000000000000027231474373752700245370ustar00rootroot00000000000000GIF89aJ&-ޟM36:۲@^v>k@ߚ.Xpn[>j@_yp~`aQϓ2SVЭ*OG0TtӛP]^@_w٬ϕ8̢`t~@`zl`s{ãх0J[;b0Ts0TrJ`v~BĨ0Sq4@^tߝ4pI͂;ژ]FPizԌӋUϡRο`ξʝޤ,Pk~`f]F٠ InphM؟ B_:@U`YK֌Mf1Pl`u`xݞp-۠Y0Lar3 Hm4?&ݪ΄^83f3!&,JMh“*\ȰÄ#4(L/Ǐ C~Ơĉs,\ɲ˗0c|iIO~.ɳϟ@ &AOb2)]ʴӧPJhAjʕRtKkh)E1i۷pNén=r}{޿cˣÈ+Iã-88c9HaCÜ7hH@Xa< 14۸{pQ;.tXNF hΘiCH1!lO|y$?Vp!#"pr|'% !`# 0BThrBphᆜ0U@% 't@ ,p2F @2@Gip2A$$ DpRdWA.$[`AGrltpi@tI( Iri *ꨤR`'98ԕB*'kpDuQvJ갢r1$&Bl6[-@B@@m"+oXI,&V $ 7GqQgwAQ!(L )0 3#&DC83C';python-tatsu-lts-5.13.1+ds/docs/antlr.rst000066400000000000000000000017061474373752700203130ustar00rootroot00000000000000.. include:: links.rst ANTLR Grammars -------------- .. _grammars: https://github.com/antlr/grammars-v4 ANTLR_ is one of the best known parser genrators, and it has an important collection of grammars_. The ``tatsu.g2e`` module can translate an ANTLR_ grammar to the syntax used by |TatSu|. The resulting grammar won't be immediately usable. It will have to be edited to make it abide to PEG_ semantics, and in general be adapted to the way things are done with |TatSu|. To use ``g2e`` as a module, invoke one of its translation functions. .. code:: python def translate(text=None, filename=None, name=None, encoding='utf-8', trace=False): .. For example: .. code:: python from tatsu import g2e tatsu_grammar = translate(filename='mygrammar.g', name='My') with open('my.ebnf') as f: f.write(tatsu_grammar) .. ``g2e`` can also be used from the command line: .. code:: bash $ python -m tatsu.g2e mygrammar.g > my.ebnf .. python-tatsu-lts-5.13.1+ds/docs/ast.rst000066400000000000000000000036501474373752700177620ustar00rootroot00000000000000.. include:: links.rst Abstract Syntax Trees (ASTs) ---------------------------- By default, an `AST`_ is either: * a *value*, for simple elements such as *token*, *pattern*, or *constant* * a ``tuple``, for *closures*, *gatherings*, and the right-hand-side of rules with more than one element but without named elements * a ``dict``-derived object (``AST``) that contains one item for every named element in the grammar rule, with items can be accessed through the standard ``dict`` syntax (``ast['key']``), or as attributes (``ast.key``). `AST`_ entries are single values if only one item was associated with a name, or ``tuple`` if more than one item was matched. There's a provision in the grammar syntax (the ``+:`` operator) to force an `AST`_ entry to be a ``tuple`` even if only one element was matched. The value for named elements that were not found during the parse (perhaps because they are optional) is ``None``. When the ``parseinfo=True`` keyword argument has been passed to the ``Parser`` constructor or enabled with the ``@@parseinfo`` directive, a ``parseinfo`` item is added to `AST`_ nodes that are *dict*-like. The item contains a ``collections.namedtuple`` with the parse information for the node: .. code:: python ParseInfo = namedtuple( 'ParseInfo', [ 'tokenizer', 'rule', 'pos', 'endpos', 'line', 'endline', ] ) With the help of the ``Tokenizer.line_info()`` method, it is possible to recover the line, column, and original text parsed for the node. Note that when ``ParseInfo`` is generated, the ``Tokenizer`` used during parsing is kept in memory for the lifetime of the `AST`_. Generation of ``parseinfo`` can also be controlled using the ``@@parseinfo :: True`` grammar directive. .. _Abstract Syntax Tree: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _AST: http://en.wikipedia.org/wiki/Abstract_syntax_tree python-tatsu-lts-5.13.1+ds/docs/conf.py000066400000000000000000000130131474373752700177320ustar00rootroot00000000000000#!/usr/bin/env python3 # # \u7ADC TatSu documentation build configuration file, created by # sphinx-quickstart on Mon May 1 18:01:31 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('.')) del sys del os import tatsu # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = '\u7ADC TatSu' copyright = '2017-2023 Juancarlo Añez' author = 'Juancarlo Añez' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = tatsu.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # Add path to the RTD explicitly to robustify builds (otherwise might # fail in a clean Debian build env) html_theme_path = [] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static', '_static/css'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'tatsudoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'tatsu.tex', '\u7ADC TatSu Documentation', 'Juancarlo Añez', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, '\u7ADC TatSu', '\u7ADC TatSu Documentation', [author], 1), ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, '\u7ADC TatSu', '\u7ADC TatSu Documentation', author, '\u7ADC TatSu', 'One line description of project.', 'Miscellaneous'), ] html_sidebars = { '**': [ 'globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html', ], } def setup(app): app.add_css_file('css/custom.css') # may also be an URL python-tatsu-lts-5.13.1+ds/docs/contributing.rst000066400000000000000000000013111474373752700216720ustar00rootroot00000000000000.. include:: links.rst Contributing ------------ |TatSu| development is done on Github_. Bug reports, patches, suggestions, and improvements are welcome. .. _Github : https://github.com/neogeny/TatSu Donations ~~~~~~~~~ |donate| If you'd like to contribute to the future development of |TatSu|, please `make a donation`_ to the project. Some of the planned new features are: grammar expressions for left and right associativity, new algorithms for left-recursion, a unified intermediate model for parsing and translating programming languages, and more... .. |donate| image:: _static/images/btn_donate_SM.gif :target: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=2TW56SV6WNJV6 python-tatsu-lts-5.13.1+ds/docs/contributors.rst000066400000000000000000000024221474373752700217240ustar00rootroot00000000000000.. include:: links.rst Contributors ------------ The following, among others, have contributed to |TatSu| with features, bug reports, bug fixes, or suggestions: `Alberto Berti`_, `Andy Wright`_, `Basel Shishani`_, `Daniel Martin`_, `Daniele Nicolodi`_, `David Chen`_, `David Delassus`_, `David Röthlisberger`_, `David Sanders`_, `Dmytro Ivanov`_, `Felipe`_, `Franck Pommereau`_, `Franklin Lee`_, `Gabriele Paganelli`_, `Guido van Rossum`_, `Jack Taylor`_, `Kathryn Long`_, `Karthikeyan Singaravelan`_, `Manuel Jacob`_, `Marcus Brinkmann`_, `Mark Jason Dominus`_, `Max Liebkies`_, `Michael Noronha`_, `Nicholas Bishop`_, `Nicolas Laurent`_, `Nils-Hero Lindemann`_, `Oleg Komarov`_, `Paul Houle`_, `Paul Sargent`_, `Robert Speer`_, `Ryan`_, `Ryan Gonzales`_, `Ruth-Polymnia`_, `S Brown`_, `Tonico Strasser`_, `Vic Nightfall`_, `Victor Uriarte`_, `Vinay Sajip`_, `franz\_g`_, `gkimbar`_, `nehz`_ , `neumond`_, `pdw-mb`_, `pgebhard`_, `siemer`_, `by-Exist`_ `commonism`_ `Vincent Fazio`_ .. _commits: https://bitbucket.org/neogeny/grako/commits/all .. _issues: https://bitbucket.org/neogeny/grako/issues python-tatsu-lts-5.13.1+ds/docs/credits.rst000066400000000000000000000112101474373752700206170ustar00rootroot00000000000000.. include:: links.rst Credits ------- - |TatSu| is the successor of Grako_, which was built by **Juancarlo Añez** and funded by **Thomas Bragg** to do analysis and translation of programs written in legacy programming languages. - **Niklaus Wirth** was the chief designer of the programming languages `Euler`_, `Algol W`_, `Pascal`_, `Modula`_, `Modula-2`_, `Oberon`_, and `Oberon-2`_. In the last chapter of his 1976 book `Algorithms + Data Structures = Programs`_, `Wirth`_ creates a top-down, descent parser with recovery for the `Pascal`_-like, `LL(1)`_ programming language `PL/0`_. The structure of the program is that of a `PEG`_ parser, though the concept of `PEG`_ wasn't formalized until 2004. - **Bryan Ford** `introduced`_ `PEG`_ (parsing expression grammars) in 2004. - Other parser generators like `PEG.js`_ by **David Majda** inspired the work in |TatSu|. - **William Thompson** inspired the use of context managers with his `blog post`_ that I knew about through the invaluable `Python Weekly`_ newsletter, curated by **Rahul Chaudhary** - **Jeff Knupp** explains why |TatSu|'s use of `exceptions`_ is sound, so I don't have to. - **Terence Parr** created `ANTLR`_, probably the most solid and professional parser generator out there. *Ter*, *ANTLR*, and the folks on the *ANLTR* forums helped me shape my ideas about |TatSu|. - **JavaCC** (originally `Jack`_) looks like an abandoned project. It was the first parser generator I used while teaching. - |TatSu| is very fast. But dealing with millions of lines of legacy source code in a matter of minutes would be impossible without `PyPy`_, the work of **Armin Rigo** and the `PyPy team`_. - **Guido van Rossum** created and has lead the development of the `Python`_ programming environment for over a decade. A tool like |TatSu|, at under 10K lines of code, would not have been possible without `Python`_. - **Kota Mizushima** welcomed me to the `CSAIL at MIT`_ `PEG and Packrat parsing mailing list`_, and immediately offered ideas and pointed me to documentation about the implementation of *cut* in modern parsers. The optimization of memoization information in |TatSu| is thanks to one of his papers. - **My students** at `UCAB`_ inspired me to think about how grammar-based parser generation could be made more approachable. - **Gustavo Lau** was my professor of *Language Theory* at `USB`_, and he was kind enough to be my tutor in a thesis project on programming languages that was more than I could chew. My peers, and then teaching advisers **Alberto Torres**, and **Enzo Chiariotti** formed a team with **Gustavo** to challenge us with programming languages like *LATORTA* and term exams that went well into the eight hours. And, of course, there was also the *pirate patch* that should be worn on the left or right eye depending on the *LL* or *LR* challenge. - **Manuel Rey** led me through another, unfinished, thesis project that taught me about what languages (spoken languages in general, and programming languages in particular) are about. I learned why languages use `declensions`_, and why, although the underlying words are in `English`_, the structure of the programs we write is more like `Japanese`_. - `Marcus Brinkmann`_ has kindly submitted patches that have resolved obscure bugs in |TatSu|'s implementation, and that have made the tool more user-friendly, specially for newcomers to parsing and translation. - `Robert Speer`_ cleaned up the nonsense in trying to have Unicode handling be compatible with 2.7.x and 3.x, and figured out the canonical way of honoring escape sequences in grammar tokens without throwing off the encoding. - `Basel Shishani`_ has been an incredibly throrough peer-reviewer of |TatSu|. - `Paul Sargent`_ implemented `Warth et al`_'s algorithm for supporting direct and indirect left recursion in `PEG`_ parsers. - `Kathryn Long`_ proposed better support for UNICODE in the treatment of whitespace and regular expressions (patterns) in general. Her other contributions have made |TatSu| more congruent, and more user-friendly. - `David Röthlisberger`_ provided the definitive patch that allows the use of `Python`_ keywords as rule names. - `Nicolas Laurent`_ researched, designed, implemented, and published the left recursion algorithm used in |TatSu|. - `Vic Nightfall`_ designed and coded an implementation of left recursion that handles all the use cases of interest (see the `Left Recursion`_ topic for details). He was gentle enough to kindly take over management of the |TatSu| project since 2019. .. _Left Recursion: left_recursion.html python-tatsu-lts-5.13.1+ds/docs/directives.rst000066400000000000000000000100551474373752700213310ustar00rootroot00000000000000.. include:: links.rst Grammar Directives ------------------ |TatSu| allows *directives* in the grammar that control the behavior of the generated parsers. All directives are of the form ``@@name :: ``. For example: .. code:: @@ignorecase :: True The *directives* supported by |TatSu| are described below. ``@@grammar :: `` ~~~~~~~~~~~~~~~~~~~~~~~ Specifies the name of the grammar, and provides the base name for the classes in parser source-code generation. ``@@comments :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Specifies a regular expression to identify and exclude inline (bracketed) comments before the text is scanned by the parser. For ``(* ... *)`` comments: .. code:: @@comments :: /\(\*((?:.|\n)*?)\*\)/ .. note:: In previous versions of |TatSu|, the `re.MULTILINE `_ option was enabled by default. This is no longer the case. Use ``(?m)`` at the start of your regular expressions to make them multi-line. ``@@eol_comments :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Specifies a regular expression to identify and exclude end-of-line comments before the text is scanned by the parser. For ``# ...`` comments: .. code:: @@eol_comments :: /#([^\n]*?)$/ .. note:: In previous versions of |TatSu|, the `re.MULTILINE `_ option was enabled by default. This is no longer the case. Use ``(?m)`` at the start of your regular expressions to make them multi-line. ``@@ignorecase :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~~ If set to ``True`` makes |TatSu| not consider case when parsing tokens. Defaults to ``False``: .. code:: @@ignorecase :: True ``@@keyword :: {|}+`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Specifies the list of strings or words that the grammar should consider as *"keywords"*. May appear more than once. See the `Reserved Words and Keywords`_ section for an explanation. .. _`Reserved Words and Keywords`: syntax.html#reserved-words-and-keywords ``@@left_recursion :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enables left-recursive rules in the grammar. See the `Left Recursion`_ sections for an explanation. .. _`Left Recursion`: left_recursion.html ``@@namechars :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A list of (non-alphanumeric) characters that should be considered part of names when using the `@@nameguard`_ feature: .. code:: @@namechars :: '-_$' .. _`@@nameguard`: #nameguard-bool ``@@nameguard :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~ When set to ``True``, avoids matching tokens when the next character in the input sequence is alphanumeric or a ``@@namechar``. Defaults to ``True``. See the `'text' expression`_ for an explanation. .. code:: @@nameguard :: False .. _`'text' expression`: syntax.html?highlight=nameguard#text-or-text ``@@parseinfo :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~ When ``True``, the parser will add parse information to every ``AST`` and ``Node`` generated by the parse under a ``parseinfo`` field. The information will include: * ``rule`` the rule name that parsed the node * ``pos`` the initial position for the node in the input * ``endpos`` the final position for the node in the input * ``line`` the initial input line number for the element * ``endline`` the final line number for the element Enabling ``@@parseinfo`` will allow precise reporting over the input source-code while performing semantic actions. ``@@whitespace :: `` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Provides a regular expression for the whitespace to be ignored by the parser. If no definition is provided, then ``r'(?m)\s+'`` will be used as default: .. code:: @@whitespace :: /[\t ]+/ To disable any parsing of whitespace, use ``None`` for the definition: .. code:: @@whitespace :: None .. note:: In previous versions of |TatSu|, the `re.MULTILINE `_ option was enabled by default. This is no longer the case. Use ``(?m)`` at the start of your regular expressions to make them multi-line. python-tatsu-lts-5.13.1+ds/docs/examples.rst000066400000000000000000000021231474373752700210030ustar00rootroot00000000000000.. include:: links.rst Examples -------- Tatsu ~~~~~ The file ``grammar/tatsu.ebnf`` contains a grammar for the |TatSu| grammar language written in its own grammar language. It is used in the *bootstrap* test suite to prove that |TatSu| can generate a parser to parse its own language, and the resulting parser is made the bootstrap parser every time |TatSu| is stable (see ``tatsu/bootstrap.py`` for the generated parser). |TatSu| uses |TatSu| to translate grammars into parsers, so it is a good example of end-to-end translation. Calc ~~~~ The project ``examples/calc`` implements a calculator for simple expressions, and is written as a tutorial over most of the features provided by |TatSu|. g2e ~~~ The project ``examples/g2e`` contains an example `ANTLR`_ to |TatSu| grammar translation. The project is a good example of the use ``g2e``. It generates the |TatSu| grammar on standard output, but because the model used is |TatSu|'s own, the same code can be used to directly generate a parser from any `ANTLR`_ grammar. Please take a look at the examples *README* to know about limitations. python-tatsu-lts-5.13.1+ds/docs/index.rst000066400000000000000000000041011474373752700202720ustar00rootroot00000000000000.. tatsu documentation master file, created by sphinx-quickstart on Mon May 1 18:01:31 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. .. include:: links.rst |TatSu| ======= *At least for the people who send me mail about a new language that they're designing, the general advice is: do it to learn about how to write a compiler. Don't have any expectations that anyone will use it, unless you hook up with some sort of organization in a position to push it hard. It's a lottery, and some can buy a lot of the tickets. There are plenty of beautiful languages (more beautiful than C) that didn't catch on. But someone does win the lottery, and doing a language at least teaches you something.* `Dennis Ritchie`_ (1941-2011) Creator of the C_ programming language and of Unix_ |TatSu| is a tool that takes grammars in a variation of `EBNF`_ as input, and outputs `memoizing`_ (`Packrat`_) `PEG`_ parsers in `Python`_. Why use a PEG_ parser? Because `regular languages`_ (those parsable with Python's ``re`` package) *"cannot count"*. Any language with nested structures or with balancing of demarcatiors requires more than regular expressions to be parsed. |TatSu| can compile a grammar stored in a string into a ``tatsu.grammars.Grammar`` object that can be used to parse any given input, much like the `re`_ module does with regular expressions, or it can generate a Python_ module that implements the parser. |TatSu| supports `left-recursive`_ rules in PEG_ grammars, and it honors *left-associativity* in the resulting parse trees. .. toctree:: :maxdepth: 2 intro rationale install use syntax directives ast semantics models translation left_recursion mini-tutorial traces antlr examples support credits contributors contributing license .. toctree:: :hidden: .. comment out Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` python-tatsu-lts-5.13.1+ds/docs/install.rst000066400000000000000000000004041474373752700206330ustar00rootroot00000000000000.. include:: links.rst Installation ------------ .. code:: bash $ pip install tatsu .. warning:: Modern versions of |TatSu| require active versions of Python (if the Python version is more than one and a half years old, things may not work). python-tatsu-lts-5.13.1+ds/docs/intro.rst000066400000000000000000000034611474373752700203260ustar00rootroot00000000000000.. include:: links.rst Introduction ------------ |TatSu| is *different* from other `PEG`_ parser generators: - Generated parsers use `Python`_'s very efficient exception-handling system to backtrack. |TatSu| generated parsers simply assert what must be parsed. There are no complicated *if-then-else* sequences for decision making or backtracking. Memoization allows going over the same input sequence several times in linear time. - *Positive and negative lookaheads*, and the *cut* element (with its cleaning of the memoization cache) allow for additional, hand-crafted optimizations at the grammar level. - Delegation to `Python`_'s `re`_ module for *lexemes* allows for (`Perl`_-like) powerful and efficient lexical analysis. - The use of `Python`_'s `context managers`_ considerably reduces the size of the generated parsers for code clarity, and enhanced CPU-cache hits. - Include files, rule inheritance, and rule inclusion give |TatSu| grammars considerable expressive power. - Automatic generation of Abstract Syntax Trees\_ and Object Models, along with *Model Walkers* and *Code Generators* make analysis and translation approachable The parser generator, the run-time support, and the generated parsers have measurably low `Cyclomatic complexity`_. At around 5 `KLOC`_ of `Python`_, it is possible to study all its source code in a single session. The only dependencies are on the `Python`_ standard library, yet the `regex`_ library will be used if installed, and `colorama`_ will be used on trace output if available. `pygraphviz`_ is required for generating diagrams. |TatSu| is feature-complete and currently being used with complex grammars to parse, analyze, and translate hundreds of thousands of lines of input text, including source code in several programming languages. python-tatsu-lts-5.13.1+ds/docs/left_recursion.rst000066400000000000000000000024061474373752700222140ustar00rootroot00000000000000.. include:: links.rst Left Recursion -------------- |TatSu| supports direct and indirect left recursion in grammar rules using the the algorithm described by *Nicolas Laurent* and *Kim Mens* in their 2015 paper_ *Parsing Expression Grammars Made Practical*. The design and implementation of left recursion was done by `Vic Nightfall`_ with research and help by `Nicolas Laurent`_ on Autumn_, and research by `Philippe Sigaud`_ on PEGGED_. .. _Autumn: https://github.com/norswap/autumn .. _PEGGED: https://github.com/PhilippeSigaud/Pegged/wiki/Left-Recursion Left recursive rules produce left-associative parse trees (AST_), as most users would expect, *except if some of the rules involved recurse on the right (a pending topic)*. .. _paper: http://norswap.com/pubs/sle2015.pdf Left recursion support is enabled by default in |TatSu|. To disable it for a particular grammar, use the ``@@left_recursion`` directive: .. code:: ocaml @@left_recursion :: False .. warning:: Not all left-recursive grammars that use the |TatSu| syntax are PEG_ (the same happens with right-recursive grammars). **The order of rules matters in PEG**. For right-recursive grammars the choices that parse the most input must come first. The same is true for left-recursive grammars. python-tatsu-lts-5.13.1+ds/docs/license.rst000066400000000000000000000000521474373752700206060ustar00rootroot00000000000000License ------- .. include:: LICENSE.txt python-tatsu-lts-5.13.1+ds/docs/links.rst000066400000000000000000000104571474373752700203160ustar00rootroot00000000000000.. include:: links_contributors.rst .. |dragon| unicode:: 0x7ADC .. unicode dragon .. |TatSu| replace:: |dragon| **TatSu** .. _ANTLR: http://www.antlr.org/ .. _AST: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _Abstract Syntax Tree: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _Algol W: http://en.wikipedia.org/wiki/Algol_W .. _Algorithms + Data Structures = Programs: http://www.amazon.com/Algorithms-Structures-Prentice-Hall-Automatic-Computation/dp/0130224189/ .. _BSD: http://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29 .. _C: http://en.wikipedia.org/wiki/C_language .. _COBOL: https://en.wikipedia.org/wiki/COBOL .. _CSAIL at MIT: http://www.csail.mit.edu/ .. _CST: https://en.wikipedia.org/wiki/Parse_tree .. _Cyclomatic complexity: http://en.wikipedia.org/wiki/Cyclomatic_complexity .. _Dennis Ritchie: http://en.wikipedia.org/wiki/Dennis_Ritchie .. _EBNF: http://en.wikipedia.org/wiki/Ebnf .. _English: http://en.wikipedia.org/wiki/English_grammar .. _Euler: http://en.wikipedia.org/wiki/Euler_programming_language .. _Grako: https://pypi.python.org/pypi/grako/ .. _Jack: http://en.wikipedia.org/wiki/Javacc .. _Japanese: http://en.wikipedia.org/wiki/Japanese_grammar .. _Java: https://en.wikipedia.org/wiki/Java .. _KLOC: http://en.wikipedia.org/wiki/KLOC .. _Keywords: https://en.wikipedia.org/wiki/Reserved_word .. _`left-recursive`: https://en.wikipedia.org/wiki/Left_recursion .. _LICENSE.txt: LICENSE.txt .. _LL(1): http://en.wikipedia.org/wiki/LL(1) .. _MediaWiki: http://www.mediawiki.org/wiki/MediaWiki .. _Modula-2: http://en.wikipedia.org/wiki/Modula-2 .. _Modula: http://en.wikipedia.org/wiki/Modula .. _Oberon-2: http://en.wikipedia.org/wiki/Oberon-2 .. _Oberon: http://en.wikipedia.org/wiki/Oberon_(programming_language) .. _PEG and Packrat parsing mailing list: https://lists.csail.mit.edu/mailman/listinfo/peg .. _PEG.js: http://pegjs.majda.cz/ .. _PEG: http://en.wikipedia.org/wiki/Parsing_expression_grammar .. _PL/0: http://en.wikipedia.org/wiki/PL/0 .. _PLY: http://www.dabeaz.com/ply/ply.html#ply_nn22 .. _Packrat: http://bford.info/packrat/ .. _Pascal: http://en.wikipedia.org/wiki/Pascal_programming_language .. _Perl: http://www.perl.org/ .. _PyPy team: http://pypy.org/people.html .. _PyPy: http://pypy.org/ .. _Python Weekly: http://www.pythonweekly.com/ .. _Python: http://python.org .. _Reserved Words: https://en.wikipedia.org/wiki/Reserved_word .. _Ruby: http://www.ruby-lang.org/ .. _Semantic Graph: http://en.wikipedia.org/wiki/Abstract_semantic_graph .. _SQL: https://en.wikipedia.org/wiki/SQL .. _StackOverflow: http://stackoverflow.com/tags/tatsu/info .. _Sublime Text: https://www.sublimetext.com .. _TatSu Forum: https://groups.google.com/forum/?fromgroups#!forum/tatsu .. _UCAB: http://www.ucab.edu.ve/ .. _USB: http://www.usb.ve/ .. _Unix: http://en.wikipedia.org/wiki/Unix .. _VIM: http://www.vim.org/ .. _WTK: http://en.wikipedia.org/wiki/Well-known_text .. _Warth et al: http://www.vpri.org/pdf/tr2007002_packrat.pdf .. _Well-known text: http://en.wikipedia.org/wiki/Well-known_text .. _Wirth: http://en.wikipedia.org/wiki/Niklaus_Wirth .. _blog post: http://dietbuddha.blogspot.com/2012/12/52python-encapsulating-exceptions-with.html .. _colorama: https://pypi.python.org/pypi/colorama/ .. _context managers: http://docs.python.org/2/library/contextlib.html .. _declensions: http://en.wikipedia.org/wiki/Declension .. _email: mailto:apalala@gmail.com .. _exceptions: http://www.jeffknupp.com/blog/2013/02/06/write-cleaner-python-use-exceptions/ .. _introduced: http://dl.acm.org/citation.cfm?id=964001.964011 .. _keyword: https://en.wikipedia.org/wiki/Reserved_word .. _keywords: https://en.wikipedia.org/wiki/Reserved_word .. _lambdafu: http://blog.marcus-brinkmann.de/ .. _`make a donation`: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=2TW56SV6WNJV6 .. _memoizing: http://en.wikipedia.org/wiki/Memoization .. _parsewkt: https://github.com/cleder/parsewkt .. _pygraphviz: https://pypi.python.org/pypi/pygraphviz .. _raw string literal: https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals .. _re: https://docs.python.org/3.4/library/re.html .. _regex: https://pypi.python.org/pypi/regex .. _smc.mw: https://github.com/lambdafu/smc.mw .. _regular languages: https://en.wikipedia.org/wiki/Regular_language python-tatsu-lts-5.13.1+ds/docs/links_contributors.rst000066400000000000000000000066211474373752700231310ustar00rootroot00000000000000 .. _Alberto Berti: https://github.com/azazel75 .. _Andy Wright: https://github.com/acw1251 .. _Basel Shishani: https://bitbucket.org/basel-shishani .. _Basel Shishani: https://bitbucket.org/basel-shishani .. _Daniel Martin: https://github.com/fizbin .. _Daniele Nicolodi: https://github.com/dnicolodi .. _David Chen: https://github.com/davidchen .. _David Delassus: https://bitbucket.org/linkdd .. _David Röthlisberger: https://bitbucket.org/drothlis/ .. _David Röthlisberger: https://bitbucket.org/drothlis/ .. _David Sanders: https://github.com/davesque .. _Dmytro Ivanov: https://bitbucket.org/jimon .. _Felipe: https://github.com/fcoelho .. _Franck Pommereau: https://github.com/fpom .. _Franklin Lee: https://bitbucket.org/leewz .. _Gabriele Paganelli: https://bitbucket.org/gapag .. _Guido van Rossum: https://github.com/gvanrossum .. _Jack Taylor: https://github.com/rayjolt .. _Kathryn Long: https://bitbucket.org/starkat .. _Karthikeyan Singaravelan: https://github.com/tirkarthi .. _Manuel Jacob: https://github.com/manueljacob .. _Marcus Brinkmann: https://bitbucket.org/lambdafu/ .. _Mark Jason Dominus: https://github.com/mjdominus .. _Max Liebkies: https://bitbucket.org/gegenschall .. _Michael Noronha: https://github.com/mtn .. _Nicholas Bishop: https://github.com/nicholasbishop .. _Nicolas Laurent: https://github.com/norswap .. _Nils-Hero Lindemann: https://github.com/heronils .. _Oleg Komarov: https://github.com/okomarov .. _Paul Houle: https://github.com/paulhoule .. _Paul Sargent: https://bitbucket.org/pauls .. _Philippe Sigaud: https://github.com/PhilippeSigaud .. _Robert Speer: https://bitbucket.org/r_speer .. _Ruth-Polymnia: https://github.com/Ruth-Polymnia .. _Ryan Gonzales: https://github.com/kirbyfan64 .. _Ryan: https://github.com/r-chaves .. _S Brown: https://bitbucket.org/sjbrownBitbucket .. _Tonico Strasser: https://bitbucket.org/tonico_strasser .. _Vic Nightfall: https://github.com/Victorious3 .. _Victor Uriarte: https://bitbucket.org/vmuriart .. _Vinay Sajip: https://bitbucket.org/vinay.sajip .. _basel-shishani: https://bitbucket.org/basel-shishani .. _basel-shishani: https://bitbucket.org/basel-shishani .. _drothlis: https://bitbucket.org/drothlis .. _drothlis: https://bitbucket.org/drothlis .. _franz\_g: https://bitbucket.org/franz_g .. _franz_g: https://bitbucket.org/franz_g .. _gapag: https://bitbucket.org/gapag .. _gegenschall: https://bitbucket.org/gegenschall .. _gkimbar: https://bitbucket.org/gkimbar .. _gkimbar: https://bitbucket.org/gkimbar .. _jimon: https://bitbucket.org/jimon .. _leewz: https://bitbucket.org/leewz .. _linkdd: https://bitbucket.org/linkdd .. _nehz: https://bitbucket.org/nehz .. _nehz: https://bitbucket.org/nehz .. _neumond: https://bitbucket.org/neumond .. _neumond: https://bitbucket.org/neumond .. _pauls: https://bitbucket.org/pauls .. _pdw-mb: https://bitbucket.org/pdw-mb .. _pgebhard: https://bitbucket.org/pgebhard .. _pgebhard: https://bitbucket.org/pgebhard .. _r_speer: https://bitbucket.org/r_speer .. _siemer: https://bitbucket.org/siemer .. _siemer: https://bitbucket.org/siemer .. _sjbrownBitbucket: https://bitbucket.org/sjbrownBitbucket .. _starkat: https://bitbucket.org/starkat .. _tonico_strasser: https://bitbucket.org/tonico_strasser .. _vinay.sajip: https://bitbucket.org/vinay.sajip .. _vmuriart: https://bitbucket.org/vmuriart .. _by-Exist: https://github.com/by-Exist .. _commonism: https://github.com/commonism .. _Vincent Fazio: https://github.com/vfazio python-tatsu-lts-5.13.1+ds/docs/make.bat000066400000000000000000000014511474373752700200430ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build set SPHINXPROJ=tatsu if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd python-tatsu-lts-5.13.1+ds/docs/mini-tutorial.rst000066400000000000000000000351121474373752700217660ustar00rootroot00000000000000.. include:: links.rst .. _pegen: https://github.com/we-like-parsers/pegen .. _PEG parser: https://peps.python.org/pep-0617/ `Calc` Mini Tutorial -------------------- |TatSu| users have suggested that a simple calculator, like the one in the documentation for `PLY`_ would be useful. Here it is. The initial grammar ~~~~~~~~~~~~~~~~~~~ This is the original `PLY`_ grammar for arithmetic expressions: .. code:: expression : expression + term | expression - term | term term : term * factor | term / factor | factor factor : NUMBER | ( expression ) And this is the input expression for testing: .. code:: python 3 + 5 * ( 10 - 20 ) The Tatsu grammar ~~~~~~~~~~~~~~~~~ The first step is to convert the grammar to |TatSu| syntax and style, add rules for lexical elements (``number`` in this case), add a ``start`` rule that checks for end of input, and a directive to name the generated classes: .. code:: @@grammar::CALC start = expression $ ; expression = | expression '+' term | expression '-' term | term ; term = | term '*' factor | term '/' factor | factor ; factor = | '(' expression ')' | number ; number = /\d+/ ; Add *cut* expressions ~~~~~~~~~~~~~~~~~~~~~ *Cut* expressions make a parser commit to a particular option after certain tokens have been seen. They make parsing more efficient, because other options are not tried. They also make error messages more precise, because errors will be reported closest to the point of failure in the input. .. code:: @@grammar::CALC start = expression $ ; expression = | expression '+' ~ term | expression '-' ~ term | term ; term = | term '*' ~ factor | term '/' ~ factor | factor ; factor = | '(' ~ expression ')' | number ; number = /\d+/ ; Let's save the above grammar in a file called ``calc_cut.ebnf``. We can now compile the grammar, and test the parser: .. code:: python import json from pprint import pprint import tatsu def simple_parse(): with open('calc_cut.ebnf') as f: grammar = f.read() parser = tatsu.compile(grammar) ast = parser.parse('3 + 5 * ( 10 - 20 )') print('# SIMPLE PARSE') print('# AST') pprint(ast, width=20, indent=4) print() print('# JSON') print(json.dumps(ast, indent=4)) if __name__ == '__main__': simple_parse() .. Save the above in ``calc.py``. This is the output: .. code:: bash $ python calc.py .. code:: python # SIMPLE PARSE # AST [ '3', '+', [ '5', '*', [ '(', [ '10', '-', '20'], ')']]] # JSON [ "3", "+", [ "5", "*", [ "(", [ "10", "-", "20" ], ")" ] ] ] Annotating the grammar ~~~~~~~~~~~~~~~~~~~~~~ Dealing with `AST`_\ s that are lists of lists leads to code that is difficult to read, and error-prone. |TatSu| allows naming the elements in a rule to produce more humanly-readable `AST`_\ s and to allow for clearer semantics code. This is an annotated version of the grammar: .. code:: @@grammar::CALC start = expression $ ; expression = | left:expression op:'+' ~ right:term | left:expression op:'-' ~ right:term | term ; term = | left:term op:'*' ~ right:factor | left:term '/' ~ right:factor | factor ; factor = | '(' ~ @:expression ')' | number ; number = /\d+/ ; Save the annotated grammar in ``calc_annotated.ebnf``, change the grammar filename in ``calc.py`` and re-execute it to get the resulting AST: .. code:: python # ANNOTATED AST { 'left': '3', 'op': '+', 'right': { 'left': '5', 'op': '*', 'right': { 'left': '10', 'op': '-', 'right': '20'}}} Semantics ~~~~~~~~~~ Semantic actions for |TatSu| parsers are not specified in the grammar, but in a separate *semantics* class. .. code:: python from pprint import pprint import tatsu from tatsu.ast import AST class CalcBasicSemantics: def number(self, ast): return int(ast) def term(self, ast): if not isinstance(ast, AST): return ast elif ast.op == '*': return ast.left * ast.right elif ast.op == '/': return ast.left / ast.right else: raise Exception('Unknown operator', ast.op) def expression(self, ast): if not isinstance(ast, AST): return ast elif ast.op == '+': return ast.left + ast.right elif ast.op == '-': return ast.left - ast.right else: raise Exception('Unknown operator', ast.op) def parse_with_basic_semantics(): with open('calc_annotated.ebnf') as f: grammar = f.read() parser = tatsu.compile(grammar) ast = parser.parse( '3 + 5 * ( 10 - 20 )', semantics=CalcBasicSemantics() ) print('# BASIC SEMANTICS RESULT') pprint(ast, width=20, indent=4) if __name__ == '__main__': parse_with_basic_semantics() Save the above in ``calc_semantics.py`` and execute it with ``python calc_semantics.py``. The result is: .. code:: python # BASIC SEMANTICS RESULT -47 One rule per expression type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Having semantic actions determine what was parsed with ``isinstance()`` or querying the AST_ for operators is not very pythonic, nor object oriented, and it leads to code that's more difficult to maintain. It's preferable to have one rule per *expression kind*, something that will be necessary if we want to build object models to use *walkers* and *code generation*. .. code:: @@grammar::CALC start = expression $ ; expression = | addition | subtraction | term ; addition = left:expression op:'+' ~ right:term ; subtraction = left:expression op:'-' ~ right:term ; term = | multiplication | division | factor ; multiplication = left:term op:'*' ~ right:factor ; division = left:term op:'/' ~ right:factor ; factor = | '(' ~ @:expression ')' | number ; number = /\d+/ ; Save the above in ``calc_refactored.ebnf``. .. code:: python from pprint import pprint import tatsu class CalcSemantics: def number(self, ast): return int(ast) def addition(self, ast): return ast.left + ast.right def subtraction(self, ast): return ast.left - ast.right def multiplication(self, ast): return ast.left * ast.right def division(self, ast): return ast.left / ast.right def parse_refactored(): with open('calc_refactored.ebnf') as f: grammar = f.read() parser = tatsu.compile(grammar) ast = parser.parse( '3 + 5 * ( 10 - 20 )', semantics=CalcSemantics() ) print('# REFACTORED SEMANTICS RESULT') pprint(ast, width=20, indent=4) print() if __name__ == '__main__': parse_refactored() The semantics implementation is simpler, and the results are the same: .. code:: python # REFACTORED SEMANTICS RESULT -47 Object models ~~~~~~~~~~~~~ Binding semantics to grammar rules is powerful and versatile, but this approach risks tying the semantics to the *parsing process*, rather than to the parsed *objects*. That is not a problem for simple languages, like the arithmetic expression language in this tutorial. But as the complexity of the parsed language increases, the number of grammar rules quickly becomes larger than the types of objects parsed. |TatSu| can create typed object models directly from the parsing process which can be navigated (*walked*) and transformed (with *code generation*) in later passes. The first step to create an object model is to annotate the rule names with the desired class names: .. code:: @@grammar::Calc start = expression $ ; expression = | addition | subtraction | term ; addition::Add = left:term op:'+' ~ right:expression ; subtraction::Subtract = left:term op:'-' ~ right:expression ; term = | multiplication | division | factor ; multiplication::Multiply = left:factor op:'*' ~ right:term ; division::Divide = left:factor op:'/' ~ right:term ; factor = | subexpression | number ; subexpression = '(' ~ @:expression ')' ; number::int = /\d+/ ; Save the grammar in a file name ``calc_model.ebnf``. The ``tatsu.objectmodel.Node`` descendants are synthetized at runtime using ``tatsu.semantics.ModelBuilderSemantics``. This is how the model looks like when generated with the ``tatsu.to_python_model()`` function or from the command line with ``tatsu --object-model calc_model.ebnf -G calc_semantics_model.py``: .. code:: python from tatsu.objectmodel import Node from tatsu.semantics import ModelBuilderSemantics class ModelBase(Node): pass class CalcModelBuilderSemantics(ModelBuilderSemantics): def __init__(self, context=None, types=None): types = [ t for t in globals().values() if type(t) is type and issubclass(t, ModelBase) ] + (types or []) super(CalcModelBuilderSemantics, self).__init__(context=context, types=types) class Add(ModelBase): left = None op = None right = None class Subtract(ModelBase): left = None op = None right = None class Multiply(ModelBase): left = None op = None right = None class Divide(ModelBase): left = None right = None The model that results from a parse can be printed, and walked: .. code:: python import tatsu from tatsu.walkers import NodeWalker class CalcWalker(NodeWalker): def walk_object(self, node): return node def walk__add(self, node): return self.walk(node.left) + self.walk(node.right) def walk__subtract(self, node): return self.walk(node.left) - self.walk(node.right) def walk__multiply(self, node): return self.walk(node.left) * self.walk(node.right) def walk__divide(self, node): return self.walk(node.left) / self.walk(node.right) def parse_and_walk_model(): with open('calc_model.ebnf') as f: grammar = f.read() parser = tatsu.compile(grammar, asmodel=True) model = parser.parse('3 + 5 * ( 10 - 20 )') print('# WALKER RESULT IS:') print(CalcWalker().walk(model)) print() if __name__ == '__main__': parse_and_walk_model() Save the above program in ``calc_model.py`` and execute it to get this result: .. code:: python # WALKER RESULT IS: -47 Code Generation ~~~~~~~~~~~~~~~ Translation is one of the most common tasks in language processing. Analysis often sumarizes the parsed input, and *walkers* are good for that. In translation, the output can often be as verbose as the input, so a systematic approach that avoids bookkeeping as much as possible is convenient. |TatSu| provides support for template-based code generation (translation) in the ``tatsu.codegen`` module. Code generation works by defining a translation class for each class in the model specified by the grammar. Nowadays the preferred code generation strategy is to walk down the AST_ and `print()` the desired output, with the help of the ``NodWalker`` class, and the ``IndentPrintMixin`` mixin. That's the strategy used by pegen_, the precursor to the new `PEG parser`_ in Python_. The following code generator translates input expressions to the postfix instructions of a stack-based processor: .. code:: python import sys from tatsu.model import Node from tatsu.walkers import NodeWalker from tatsu.mixins.indent import IndentPrintMixin from tatsu.codegen import ModelRenderer THIS_MODULE = sys.modules[__name__] class PostfixCodeGenerator(NodeWalker, IndentPrintMixin): def walk_Add(self, node: Node, *args, **kwargs): with self.indent(): self.walk(node.left) # type: ignore self.walk(node.right) # type: ignore self.print('ADD') def walk_Subtract(self, node: Node, *args, **kwargs): with self.indent(): self.walk(node.left) # type: ignore self.walk(node.right) # type: ignore self.print('SUB') def walk_Multiply(self, node: Node, *args, **kwargs): with self.indent(): self.walk(node.left) # type: ignore self.walk(node.right) # type: ignore self.print('MUL') def walk_Divide(self, node: Node, *args, **kwargs): with self.indent(): self.walk(node.left) # type: ignore self.walk(node.right) # type: ignore self.print('DIV') def walk_int(self, node: Node, *args, **kwargs): self.print('PUSH', node) Save the above program in ``calc_translate.py`` and execute it to get this result: .. code:: python # TRANSLATED TO POSTFIX PUSH 3 PUSH 5 PUSH 10 PUSH 20 SUB MUL ADD python-tatsu-lts-5.13.1+ds/docs/models.rst000066400000000000000000000116411474373752700204550ustar00rootroot00000000000000.. include:: links.rst Models ------ Building Models ~~~~~~~~~~~~~~~ Naming elements in grammar rules makes the parser discard uninteresting parts of the input, like punctuation, to produce an *Abstract Syntax Tree* (`AST`_) that reflects the semantic structure of what was parsed. But an `AST`_ doesn't carry information about the rule that generated it, so navigating the trees may be difficult. |TatSu| defines the ``tatsu.model.ModelBuilderSemantics`` semantics class which helps construct object models from abtract syntax trees: .. code:: python from tatsu.model import ModelBuilderSemantics parser = MyParser(semantics=ModelBuilderSemantics()) Then you add the desired node type as first parameter to each grammar rule: .. code:: ocaml addition::AddOperator = left:mulexpre '+' right:addition ; ``ModelBuilderSemantics`` will synthesize a ``class AddOperator(Node):`` class and use it to construct the node. The synthesized class will have one attribute with the same name as the named elements in the rule. You can also use `Python`_'s built-in types as node types, and ``ModelBuilderSemantics`` will do the right thing: .. code:: ocaml integer::int = /[0-9]+/ ; ``ModelBuilderSemantics`` acts as any other semantics class, so its default behavior can be overidden by defining a method to handle the result of any particular grammar rule. Viewing Models as JSON ~~~~~~~~~~~~~~~~~~~~~~ Models generated by |TatSu| can be viewed by converting them to a JSON-compatible structure with the help of ``tatsu.util.asjson()``. The protocol tries to provide the best representation for common types, and can handle any type using ``repr()``. There are provisions for structures with back-references, so there's no infinite recursion. .. code:: python import json print(json.dumps(asjson(model), indent=2)) The ``model``, with richer semantics, remains unaltered. Conversion to a JSON-compatible structure relies on the protocol defined by ``tatsu.utils.AsJSONMixin``. The mixin defines a ``__json__(seen=None)`` method that allows classes to define their best translation. You can use ``AsJSONMixin`` as a base class in your own models to take advantage of ``asjson()``, and you can specialize the conversion by overriding ``AsJSONMixin.__json__()``. You can also write your own version of ``asjson()`` to handle special cases that are recurrent in your context. Walking Models ~~~~~~~~~~~~~~ The class ``tatsu.model.NodeWalker`` allows for the easy traversal (*walk*) a model constructed with a ``ModelBuilderSemantics`` instance: .. code:: python from tatsu.model import NodeWalker class MyNodeWalker(NodeWalker): def walk_AddOperator(self, node): left = self.walk(node.left) right = self.walk(node.right) print('ADDED', left, right) model = MyParser(semantics=ModelBuilderSemantics()).parse(input) walker = MyNodeWalker() walker.walk(model) When a method with a name like ``walk_AddOperator()`` is defined, it will be called when a node of that type is *walked*. The *pythonic* version of the class name may also be used for the *walk* method: ``walk__add_operator()`` (note the double underscore). If a *walk* method for a node class is not found, then a method for the class's bases is searched, so it is possible to write *catch-all* methods such as: .. code:: python def walk_Node(self, node): print('Reached Node', node) def walk_str(self, s): return s def walk_object(self, o): raise Exception(f'Unexpected type {type(o).__name__} walked') Which nodes get *walked* is up to the ``NodeWalker`` implementation. Some strategies for walking *all* or *most* nodes are implemented as classes in ``tatsu.wakers``, such as ``PreOrderWalker`` and ``DepthFirstWalker``. Sometimes nodes must be walked more than once for the purpose at hand, and it's up to the walker how and when to do that. Take a look at ``tatsu.ngcodegen.PythonCodeGenerator`` for the walker that generates a parser in Python from the model of a parsed grammar. Model Class Hierarchies ~~~~~~~~~~~~~~~~~~~~~~~ It is possible to specify a a base class for generated model nodes: .. code:: ocaml additive = | addition | substraction ; addition::AddOperator::Operator = left:mulexpre op:'+' right:additive ; substraction::SubstractOperator::Operator = left:mulexpre op:'-' right:additive ; |TatSu| will generate the base class if it's not already known. Base classes can be used as the target class in *walkers*, and in *code generators*: .. code:: python class MyNodeWalker(NodeWalker): def walk_Operator(self, node): left = self.walk(node.left) right = self.walk(node.right) op = self.walk(node.op) print(type(node).__name__, op, left, right) class Operator(ModelRenderer): template = '{left} {op} {right}' python-tatsu-lts-5.13.1+ds/docs/rationale.rst000066400000000000000000000043451474373752700211530ustar00rootroot00000000000000.. include:: links.rst Rationale --------- |TatSu| was created to address some recurring problems encountered over decades of working with parser generation tools: - Some programming languages allow the use of *keywords* as identifiers, or have different meanings for symbols depending on context (`Ruby`_). A parser needs control of lexical analysis to be able to handle those languages. - LL and LR grammars become contaminated with myriads of lookahead statements to deal with ambiguous constructs in the source language. `PEG`_ parsers address ambiguity from the onset. - Separating the grammar from the code that implements the semantics, and using a variation of a well-known grammar syntax (`EBNF`_) allows for full declarative power in language descriptions. General-purpose programming languages are not up to the task. - Semantic actions *do not* belong in a grammar. They create yet another programming language to deal with when doing parsing and translation: the source language, the grammar language, the semantics language, the generated parser's language, and the translation's target language. Most grammar parsers do not check the syntax of embedded semantic actions, so errors get reported at awkward moments, and against the generated code, not against the grammar. - Preprocessing (like dealing with includes, fixed column formats, or structure-through-indentation) belongs in well-designed program code; not in the grammar. - It is easy to recruit help with knowledge about a mainstream programming language like `Python`_, but help is hard to find for working with complex grammar-description languages. |TatSu| grammars are in the spirit of a *Translators and Interpreters 101* course (if something is hard to explain to a college student, it's probably too complicated, or not well understood). - Generated parsers should be easy to read and debug by humans. Looking at the generated source code is sometimes the only way to find problems in a grammar, the semantic actions, or in the parser generator itself. It's inconvenient to trust generated code that one cannot understand. - `Python`_ is a great language for working with language parsing and translation. python-tatsu-lts-5.13.1+ds/docs/roles.rst000066400000000000000000000001351474373752700203120ustar00rootroot00000000000000 .. role:: console .. role:: blackboard .. role:: fail .. role:: succeed .. role:: try python-tatsu-lts-5.13.1+ds/docs/semantics.rst000066400000000000000000000043731474373752700211640ustar00rootroot00000000000000.. include:: links.rst Semantic Actions ---------------- There are no constructs for semantic actions in |TatSu| grammars. This is on purpose, because semantic actions obscure the declarative nature of grammars and provide for poor modularization from the parser-execution perspective. Semantic actions are defined in a class, and applied by passing an object of the class to the ``parse()`` method of the parser as the ``semantics=`` parameter. |TatSu| will invoke the method that matches the name of the grammar rule every time the rule parses. The argument to the method will be the `AST`_ constructed from the right-hand-side of the rule: .. code:: python class MySemantics: def some_rule_name(self, ast): return ''.join(ast) def _default(self, ast): pass If there's no method matching the rule's name, |TatSu| will try to invoke a ``_default()`` method if it's defined: .. code:: python def _default(self, ast): ... Nothing will happen if neither the per-rule method nor ``_default()`` are defined. The per-rule methods in classes implementing the semantics provide enough opportunity to do rule post-processing operations, like verifications (for inadequate use of keywords as identifiers), or `AST`_ transformation: .. code:: python class MyLanguageSemantics: def identifier(self, ast): if my_lange_module.is_keyword(ast): raise FailedSemantics('"%s" is a keyword' % str(ast)) return ast For finer-grained control it is enough to declare more rules, as the impact on the parsing times will be minimal. If preprocessing is required at some point, it is enough to place invocations of empty rules where appropriate: .. code:: python myrule = first_part preproc {second_part} ; preproc = () ; The abstract parser will honor as a semantic action a method declared as: .. code:: python def preproc(self, ast): ... .. _Abstract Syntax Tree: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _AST: http://en.wikipedia.org/wiki/Abstract_syntax_tree .. _EBNF: http://en.wikipedia.org/wiki/Ebnf .. _PEG: http://en.wikipedia.org/wiki/Parsing_expression_grammar .. _Python: http://python.org .. _keywords: https://en.wikipedia.org/wiki/Reserved_word python-tatsu-lts-5.13.1+ds/docs/support.rst000066400000000000000000000001561474373752700207050ustar00rootroot00000000000000.. include:: links.rst Support ------- For general Q&A, please use the ``[tatsu]`` tag on `StackOverflow`_. python-tatsu-lts-5.13.1+ds/docs/syntax.rst000066400000000000000000000452331474373752700205240ustar00rootroot00000000000000.. include:: links.rst .. highlight:: none Grammar Syntax -------------- |TatSu| uses a variant of the standard `EBNF`_ syntax. Syntax definitions for `VIM`_ and for `Sublime Text`_ can be found under the ``etc/vim`` and ``etc/sublime`` directories in the source code distribution. Rules ~~~~~ A grammar consists of a sequence of one or more rules of the form:: name = ; If a *name* collides with a `Python`_ keyword, an underscore (``_``) will be appended to it on the generated parser. Rule names that start with an uppercase character:: FRAGMENT = /[a-z]+/ ; *do not* advance over whitespace before beginning to parse. This feature becomes handy when defining complex lexical elements, as it allows breaking them into several rules. The parser returns an `AST`_ value for each rule depending on what was parsed: - A single value - A list of `AST`_ - A dict-like object for rules with named elements - An object, when ModelBuilderSemantics is used - None See the `Abstract Syntax Trees`_ and `Building Models`_ sections for more details. .. _Abstract Syntax Trees: ast.html .. _Building Models: models.html Expressions ~~~~~~~~~~~ The expressions, in reverse order of operator precedence, can be: ``# comment`` ^^^^^^^^^^^^^ `Python`_-style comments are allowed. ``e1 | e2`` ^^^^^^^^^^^ Choice. Match either ``e1`` or ``e2``. A `|` may be used before the first option if desired:: choices = | e1 | e2 | e3 ; ``e1 e2`` ^^^^^^^^^ Sequence. Match ``e1`` and then match ``e2``. ``( e )`` ^^^^^^^^^ Grouping. Match ``e``. For example: ``('a' | 'b')``. ``[ e ]`` ^^^^^^^^^ Optionally match ``e``. ``{ e }`` or ``{ e }*`` ^^^^^^^^^^^^^^^^^^^^^^^ Closure. Match ``e`` zero or more times. The `AST`_ returned for a closure is always a ``list``. ``{ e }+`` ^^^^^^^^^^ Positive closure. Match ``e`` one or more times. The `AST`_ is always a ``list``. ``{}`` ^^^^^^ Empty closure. Match nothing and produce an empty ``list`` as `AST`_. ``~`` ^^^^^ The *cut* expression. Commit to the current active option and prevent other options from being considered even if what follows fails to parse. In this example, other options won't be considered if a parenthesis is parsed:: atom = | '(' ~ @:expre ')' | int | bool ; There are also options in optional expressions, because ``[foo]`` is equivalent to ``(foo|())``. There are options also in closures, because of a similar equivalency, so the following rule will fail if ``expression`` is not parsed after an ``=`` is parsed, while the version without the ``~`` would succeed over a partial parse of the ``name '=' expression`` ahead in the input:: parameters = ','.{name '=' ~ expression} ; ``s%{ e }+`` ^^^^^^^^^^^^ Positive join. Inspired by `Python`_'s ``str.join()``, it parses the same as this expression:: e {s ~ e} yet the result is a single list of the form: .. code:: python [e, s, e, s, e, ...] Use grouping if `s` is more complex than a *token* or a *pattern*:: (s t)%{ e }+ ``s%{ e }`` or ``s%{ e }*`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Join. Parses the list of ``s``-separated expressions, or the empty closure. It is equivalent to:: s%{e}+|{} ``op<{ e }+`` ^^^^^^^^^^^^^ Left join. Like the *join expression*, but the result is a left-associative tree built with ``tuple()``, in wich the first element is the separator (``op``), and the other two elements are the operands. The expression:: '+'<{/\d+/}+ Will parse this input:: 1 + 2 + 3 + 4 To this tree: .. code:: python ( '+', ( '+', ( '+', '1', '2' ), '3' ), '4' ) ``op>{ e }+`` ^^^^^^^^^^^^^ Right join. Like the *join expression*, but the result is a right-associative tree built with ``tuple()``, in wich the first element is the separator (``op``), and the other two elements are the operands. The expression:: '+'>{/\d+/}+ Will parse this input:: 1 + 2 + 3 + 4 To this tree: .. code:: python ( '+', '1', ( '+', '2', ( '+', '3', '4' ) ) ) ``s.{ e }+`` ^^^^^^^^^^^^ Positive *gather*. Like *positive join*, but the separator is not included in the resulting `AST`_. ``s.{ e }`` or ``s.{ e }*`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^ *Gather*. Like the *join*, but the separator is not included in the resulting `AST`_. It is equivalent to:: s.{e}+|{} ``&e`` ^^^^^^ Positive lookahead. Succeed if ``e`` can be parsed, but do not consume any input. ``!e`` ^^^^^^ Negative lookahead. Fail if ``e`` can be parsed, and do not consume any input. ``'text'`` or ``"text"`` ^^^^^^^^^^^^^^^^^^^^^^^^ Match the token *text* within the quotation marks. Note that if *text* is alphanumeric, then |TatSu| will check that the character following the token is not alphanumeric. This is done to prevent tokens like *IN* matching when the text ahead is *INITIALIZE*. This feature can be turned off by passing ``nameguard=False`` to the ``Parser`` or the ``Buffer``, or by using a pattern expression (see below) instead of a token expression. Alternatively, the ``@@nameguard`` or ``@@namechars`` directives may be specified in the grammar:: @@nameguard :: False or to specify additional characters that should also be considered part of names:: @@namechars :: '$-.' ``r'text'`` or ``r"text"`` ^^^^^^^^^^^^^^^^^^^^^^^^^^ Match the token *text* within the quotation marks, interpreting *text* like `Python`_'s `raw string literal`_\ s. ``?"regexp"`` or ``?'regexp'`` or ``/regexp/`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The *pattern* expression. Match the `Python`_ regular expression ``regexp`` at the current text position. Unlike other expressions, this one does not advance over whitespace or comments. For that, place the ``regexp`` as the only term in its own rule. The *regex* is interpreted as a Python_ `raw string literal`_ and passed the Python_ re_ module using ``match()`` at the current position in the text. The returned AST_ has the semantics of ``re.findall(pattern, text)[0]`` (a `tuple` if there is more than one group), so use ``(?:)`` for groups that should not be in the resulting AST_. Consecutive *patterns* are concatenated to form a single one. ``/./`` ^^^^^^^ The *any* expression, matches the next position in the input. It works exactly like the ``?'.'`` pattern, but is implemented at the lexical level, without regular expressions. ``->e`` ^^^^^^^ The "*skip to*" expression; useful for writing *recovery* rules. The parser will advance over input, one character at time, until ``e`` matches. Whitespace and comments will be skipped at each step. Advancing over input is done efficiently, with no regular expressions are involved. The expression is equivalent to:: { !e /./ } e A common form of the expression is ``->&e``, which is equivalent to:: { !e /./ } &e This is an example of the use of the "*skip to*" expression for recovery:: statement = | if_statement # ... ; if_statement = | 'if' condition 'then' statement ['else' statement] | 'if' statement_recovery ; statement_recovery = ->&statement ; ```constant``` ^^^^^^^^^^^^^^ Match nothing, but behave as if ``constant`` had been parsed. Constants can be used to inject elements into the concrete and abstract syntax trees, perhaps avoiding having to write a semantic action. For example:: boolean_option = name ['=' (boolean|`true`) ] ; If the text evaluates to a Python literal (with ``ast.literal_eval()``), that will be the returned value. Otherwise, string interpolation in the style of ``str.format()`` over the names in the current `AST`_ is applied for *constant* elements. Occurrences of the ``{`` character must be scaped to ``\{`` if they are not intended for interpolation. A *constant* expression that hast type ``str`` is evaluated using:: eval(f'{"f" + repr(text)}', {}, ast) `````constant````` ^^^^^^^^^^^^^^^^^^ A multiline version of ```constant```. ^ ```constant``` and ^ `````constant````` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ An alert. There will be no token returned by the parser, but an alert will be registed in the parse context and added to the current node's ``parseinfo``. The ``^`` character may appear more than once to indicate the alert level:: assignment = identifier '=' ( | value | ->'&; ^^^`could not parse value in assignment to {identifier}` ``rulename`` ^^^^^^^^^^^^ Invoke the rule named ``rulename``. To help with lexical aspects of grammars, rules with names that begin with an uppercase letter will not advance the input over whitespace or comments. ``>rulename`` ^^^^^^^^^^^^^ The include operator. Include the *right hand side* of rule ``rulename`` at this point. The following set of declarations:: includable = exp1 ; expanded = exp0 >includable exp2 ; Has the same effect as defining *expanded* as:: expanded = exp0 exp1 exp2 ; Note that the included rule must be defined before the rule that includes it. ``()`` ^^^^^^ The empty expression. Succeed without advancing over input. Its value is ``None``. ``!()`` ^^^^^^^ The *fail* expression. This is actually ``!`` applied to ``()``, which always fails. ``name:e`` ^^^^^^^^^^ Add the result of ``e`` to the `AST`_ using ``name`` as key. If ``name`` collides with any attribute or method of ``dict``, or is a `Python`_ keyword, an underscore (``_``) will be appended to the name. When there are no named items in a rule, the `AST`_ consists of the elements parsed by the rule, either a single item or a ``list``. This default behavior makes it easier to write simple rules:: number = /[0-9]+/ ; Without having to write:: number = number:/[0-9]+/ ; When a rule has named elements, the unnamed ones are excluded from the `AST`_ (they are ignored). ``name+:e`` ^^^^^^^^^^^ Add the result of ``e`` to the `AST`_ using ``name`` as key. Force the entry to be a ``list`` even if only one element is added. Collisions with ``dict`` attributes or `Python`_ keywords are resolved by appending an underscore to ``name``. ``@:e`` ^^^^^^^ The override operator. Make the `AST`_ for the complete rule be the `AST`_ for ``e``. The override operator is useful to recover only part of the right hand side of a rule without the need to name it, or add a semantic action. This is a typical use of the override operator:: subexp = '(' @:expre ')' ; The `AST`_ returned for the ``subexp`` rule will be the `AST`_ recovered from invoking ``expre``. ``@+:e`` ^^^^^^^^ Like ``@:e``, but make the `AST`_ always be a ``list``. This operator is convenient in cases such as:: arglist = '(' @+:arg {',' @+:arg}* ')' ; In which the delimiting tokens are of no interest. ``$`` ^^^^^ The *end of text* symbol. Verify that the end of the input text has been reached. .. Deprecated Expressions ~~~~~~~~~~~~~~~~~~~~~~ The following expressions are still recognized in grammars, but they are considered deprecated, and will be removed in a future version of |TatSu|. ``?/regexp/?`` ^^^^^^^^^^^^^^ Another form of the pattern expression that can be used when there are slashes (``/``) in the pattern. Use the ``?"regexp"`` or ``?'regexp'`` forms instead. ``+?"regexp"`` or ``+?'regexp'`` or ``+/regexp/`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Concatenate the given pattern with the preceding one. ``(* comment *)`` ^^^^^^^^^^^^^^^^^^^^^^^ Comments may appear anywhere in the text. Use the `Python`_-style comments instead. Rules with Arguments ~~~~~~~~~~~~~~~~~~~~ |TatSu| allows rules to specify `Python`_-style arguments:: addition(Add, op='+') = addend '+' addend ; The arguments values are fixed at grammar-compilation time. An alternative syntax is available if no *keyword parameters* are required:: addition::Add, '+' = addend '+' addend ; Semantic methods must be ready to receive any arguments declared in the corresponding rule: .. code:: python def addition(self, ast, name, op=None): ... When working with rule arguments, it is good to define a ``_default()`` method that is ready to take any combination of standard and keyword arguments: .. code:: python def _default(self, ast, *args, **kwargs): ... Based Rules ~~~~~~~~~~~ Rules may extend previously defined rules using the ``<`` operator. The *base rule* must be defined previously in the grammar. The following set of declarations:: base::Param = exp1 ; extended < base = exp2 ; Has the same effect as defining *extended* as:: extended::Param = exp1 exp2 ; Parameters from the *base rule* are copied to the new rule if the new rule doesn't define its own. Repeated inheritance should be possible, but it *hasn't been tested*. Memoization ~~~~~~~~~~~ |TatSu| is a packrat parser. The result of parsing a rule at a given position in the input is cached, so the next time the parser visits the same input position with the same rule the same result is returned and the input advanced, without repeating the parsing. Memoization allows for grammars that are clearer and easier to write because there's no fear that repeating subexpressions will impact performance. There are rules that should not be memoized. For example, rules that may succeed or not depending on the associated semantic action should not be memoized if sucess depends on more than just the input. The ``@nomemo`` decorator turns off memoization for a particular rule:: @nomemo INDENT = () ; @nomemo DEDENT = () ; Rule Overrides ~~~~~~~~~~~~~~ A grammar rule may be redefined by using the ``@override`` decorator:: start = ab $; ab = 'xyz' ; @override ab = @:'a' {@:'b'} ; When combined with the ``#include`` directive, rule overrides can be used to create a modified grammar without altering the original. Grammar Name ~~~~~~~~~~~~ The prefix to be used in classes generated by |TatSu| can be passed to the command-line tool using the ``-m`` option: .. code:: bash $ tatsu -m MyLanguage mygrammar.ebnf will generate: .. code:: python class MyLanguageParser(Parser): ... The name can also be specified within the grammar using the ``@@grammar`` directive:: @@grammar :: MyLanguage Whitespace ~~~~~~~~~~ By default, |TatSu| generated parsers skip the usual whitespace characters with the regular expression ``r'\s+'`` using the ``re.UNICODE`` flag (or with the ``Pattern_White_Space`` property if the `regex`_ module is available), but you can change that behavior by passing a ``whitespace`` parameter to your parser. For example, the following will skip over *tab* (``\t``) and *space* characters, but not so with other typical whitespace characters such as *newline* (``\n``): .. code:: python parser = MyParser(text, whitespace='\t ') The character string is converted into a regular expression character set before starting to parse. You can also provide a regular expression directly instead of a string. The following is equivalent to the above example: .. code:: python parser = MyParser(text, whitespace=re.compile(r'[\t ]+')) Note that the regular expression must be pre-compiled to let |TatSu| distinguish it from plain string. If you do not define any whitespace characters, then you will have to handle whitespace in your grammar rules (as it's often done in `PEG`_ parsers): .. code:: python parser = MyParser(text, whitespace='') Whitespace may also be specified within the grammar using the ``@@whitespace`` directive, although any of the above methods will overwrite the setting in the grammar:: @@whitespace :: /[\t ]+/ If no ``whitespace`` or ``@@whitespace`` is specified, |TatSu| will use ``r'(?m)\s+'`` as a default. Use ``None`` to have *no whitespace definition*. .. code:: python parser = MyParser(text, whitespace=None) or: .. code:: @@whitespace :: None Case Sensitivity ~~~~~~~~~~~~~~~~ If the source language is case insensitive, it can be specified in the parser by using the ``ignorecase`` parameter: .. code:: python parser = MyParser(text, ignorecase=True) You may also specify case insensitivity within the grammar using the ``@@ignorecase`` directive:: @@ignorecase :: True The change will affect token matching, but not pattern matching. Use `(?i)` in patterns that should ignore case. Comments ~~~~~~~~ Parsers will skip over comments specified as a regular expression using the ``comments`` parameter: .. code:: python parser = MyParser(text, comments="\(\*.*?\*\)") For more complex comment handling, you can override the ``Buffer.eat_comments()`` method. For flexibility, it is possible to specify a pattern for end-of-line comments separately: .. code:: python parser = MyParser( text, comments="\(\*.*?\*\)", eol_comments="#.*?$" ) Both patterns may also be specified within a grammar using the ``@@comments`` and ``@@eol_comments`` directives:: @@comments :: /\(\*.*?\*\)/ @@eol_comments :: /#.*?$/ Reserved Words and Keywords ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some languages must reserve the use of certain tokens as valid identifiers because the tokens are used to mark particular constructs in the language. Those reserved tokens are known as `Reserved Words`_ or `Keywords`_ |TatSu| provides support for preventing the use of `keywords`_ as identifiers though the ``@@keyword`` directive,and the ``@name`` decorator. A grammar may specify reserved tokens providing a list of them in one or more ``@@keyword`` directives:: @@keyword :: if endif @@keyword :: else elseif The ``@name`` decorator checks that the result of a grammar rule does not match a token defined as a `keyword`_:: @name identifier = /(?!\d)\w+/ ; There are situations in which a token is reserved only in a very specific context. In those cases, a negative lookahead will prevent the use of the token:: statements = {!'END' statement}+ ; Include Directive ~~~~~~~~~~~~~~~~~ |TatSu| grammars support file inclusion through the include directive:: #include :: "filename" The resolution of the *filename* is relative to the directory/folder of the source. Absolute paths and ``../`` navigations are honored. The functionality required for implementing includes is available to all |TatSu|-generated parsers through the ``Buffer`` class; see the ``EBNFBuffer`` class in the ``tatsu.parser`` module for an example. Left Recursion ~~~~~~~~~~~~~~ |TatSu| supports left recursion in `PEG`_ grammars. The algorithm used is `Warth et al`_'s. Sometimes, while debugging a grammar, it is useful to turn left-recursion support on or off: .. code:: python parser = MyParser( text, left_recursion=True, ) Left recursion can also be turned off from within the grammar using the ``@@left_recursion`` directive:: @@left_recursion :: False python-tatsu-lts-5.13.1+ds/docs/traces.rst000066400000000000000000000247521474373752700204620ustar00rootroot00000000000000.. include:: links.rst .. include:: roles.rst Traces ------ |TatSu| compiling and parsing actions have a ``trace=`` argument ( ``--trace`` on the command line). When used with the ``colorize=`` option ( ``--color`` on the command line), it produces trace like the following, in which colors mean :try:`try`, :succeed:`suceed`, and :fail:`fail`. | :try:`↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙expression↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :fail:`⟲ expression↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙expression↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :fail:`⟲ expression↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :fail:`⟲ term↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :fail:`⟲ term↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙factor↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :fail:`≢'(' ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :try:`↙number↙factor↙term↙expression↙start ~1:1` | :console:`\3 + 5 * ( 10 - 20 )` | :succeed:`≡'3' /\d+/ ~1:2` | :console:`\ + 5 * ( 10 - 20 )` | :succeed:`≡number↙factor↙term↙expression↙start ~1:2` | :console:`\ + 5 * ( 10 - 20 )` | :succeed:`≡factor↙term↙expression↙start ~1:2` | :console:`\ + 5 * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :succeed:`≡term↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :fail:`≢'*' ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :succeed:`≡term↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :fail:`≢'/' ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :try:`↙factor↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :fail:`≢'(' ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :try:`↙number↙factor↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :fail:`≢'' /\d+/ ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :fail:`≢factor↙term↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :succeed:`≡term↙expression↙start ~1:2` | :console:`\ + 5 * ( 10 - 20 )` | :try:`↙expression↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :succeed:`≡expression↙expression↙start ~1:3` | :console:`\+ 5 * ( 10 - 20 )` | :succeed:`≡'+' ~1:4` | :console:`\ 5 * ( 10 - 20 )` | :try:`↙term↙expression↙start ~1:4` | :console:`\ 5 * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:5` | :console:`\5 * ( 10 - 20 )` | :fail:`⟲ term↙term↙expression↙start ~1:5` | :console:`\5 * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:5` | :console:`\5 * ( 10 - 20 )` | :fail:`⟲ term↙term↙expression↙start ~1:5` | :console:`\5 * ( 10 - 20 )` | :try:`↙factor↙term↙expression↙start ~1:5` | :console:`\5 * ( 10 - 20 )` | :fail:`≢'(' ~1:5` | :console:`\5 * ( 10 - 20 )` | :try:`↙number↙factor↙term↙expression↙start ~1:5` | :console:`\5 * ( 10 - 20 )` | :succeed:`≡'5' /\d+/ ~1:6` | :console:`\ * ( 10 - 20 )` | :succeed:`≡number↙factor↙term↙expression↙start ~1:6` | :console:`\ * ( 10 - 20 )` | :succeed:`≡factor↙term↙expression↙start ~1:6` | :console:`\ * ( 10 - 20 )` | :try:`↙term↙term↙expression↙start ~1:7` | :console:`\* ( 10 - 20 )` | :succeed:`≡term↙term↙expression↙start ~1:7` | :console:`\* ( 10 - 20 )` | :succeed:`≡'*' ~1:8` | :console:`\ ( 10 - 20 )` | :try:`↙factor↙term↙expression↙start ~1:8` | :console:`\ ( 10 - 20 )` | :succeed:`≡'(' ~1:10` | :console:`\ 10 - 20 )` | :try:`↙expression↙factor↙term↙expression↙start ~1:10` | :console:`\ 10 - 20 )` | :try:`↙expression↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :fail:`⟲ expression↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :try:`↙expression↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :fail:`⟲ expression↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :try:`↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :fail:`⟲ term↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :fail:`⟲ term↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :try:`↙factor↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :fail:`≢'(' ~1:11` | :console:`\10 - 20 )` | :try:`↙number↙factor↙term↙expression↙factor↙term↙expression↙start ~1:11` | :console:`\10 - 20 )` | :succeed:`≡'10' /\d+/ ~1:13` | :console:`\ - 20 )` | :succeed:`≡number↙factor↙term↙expression↙factor↙term↙expression↙start ~1:13` | :console:`\ - 20 )` | :succeed:`≡factor↙term↙expression↙factor↙term↙expression↙start ~1:13` | :console:`\ - 20 )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :succeed:`≡term↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :fail:`≢'*' ~1:14` | :console:`\- 20 )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :succeed:`≡term↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :fail:`≢'/' ~1:14` | :console:`\- 20 )` | :try:`↙factor↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :fail:`≢'(' ~1:14` | :console:`\- 20 )` | :try:`↙number↙factor↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :fail:`≢'' /\d+/ ~1:14` | :console:`\- 20 )` | :fail:`≢factor↙term↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :succeed:`≡term↙expression↙factor↙term↙expression↙start ~1:13` | :console:`\ - 20 )` | :try:`↙expression↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :succeed:`≡expression↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :fail:`≢'+' ~1:14` | :console:`\- 20 )` | :try:`↙expression↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :succeed:`≡expression↙expression↙factor↙term↙expression↙start ~1:14` | :console:`\- 20 )` | :succeed:`≡'-' ~1:15` | :console:`\ 20 )` | :try:`↙term↙expression↙factor↙term↙expression↙start ~1:15` | :console:`\ 20 )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:16` | :console:`\20 )` | :fail:`⟲ term↙term↙expression↙factor↙term↙expression↙start ~1:16` | :console:`\20 )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:16` | :console:`\20 )` | :fail:`⟲ term↙term↙expression↙factor↙term↙expression↙start ~1:16` | :console:`\20 )` | :try:`↙factor↙term↙expression↙factor↙term↙expression↙start ~1:16` | :console:`\20 )` | :fail:`≢'(' ~1:16` | :console:`\20 )` | :try:`↙number↙factor↙term↙expression↙factor↙term↙expression↙start ~1:16` | :console:`\20 )` | :succeed:`≡'20' /\d+/ ~1:18` | :console:`\ )` | :succeed:`≡number↙factor↙term↙expression↙factor↙term↙expression↙start ~1:18` | :console:`\ )` | :succeed:`≡factor↙term↙expression↙factor↙term↙expression↙start ~1:18` | :console:`\ )` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :succeed:`≡term↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢'*' ~1:19` | :console:`\)` | :try:`↙term↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :succeed:`≡term↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢'/' ~1:19` | :console:`\)` | :try:`↙factor↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢'(' ~1:19` | :console:`\)` | :try:`↙number↙factor↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢'' /\d+/ ~1:19` | :console:`\)` | :fail:`≢factor↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :succeed:`≡term↙expression↙factor↙term↙expression↙start ~1:18` | :console:`\ )` | :try:`↙expression↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :succeed:`≡expression↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢'+' ~1:19` | :console:`\)` | :try:`↙expression↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :succeed:`≡expression↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢'-' ~1:19` | :console:`\)` | :try:`↙term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :fail:`≢term↙expression↙factor↙term↙expression↙start ~1:19` | :console:`\)` | :succeed:`≡expression↙factor↙term↙expression↙start ~1:18` | :console:`\ )` | :succeed:`≡')'` | :succeed:`≡factor↙term↙expression↙start` | :try:`↙term↙term↙expression↙start` | :succeed:`≡term↙term↙expression↙start` | :fail:`≢'*'` | :try:`↙term↙term↙expression↙start` | :succeed:`≡term↙term↙expression↙start` | :fail:`≢'/'` | :try:`↙factor↙term↙expression↙start` | :fail:`≢'('` | :try:`↙number↙factor↙term↙expression↙start` | :fail:`≢'' /\d+/` | :fail:`≢factor↙term↙expression↙start` | :succeed:`≡term↙expression↙start` | :try:`↙expression↙expression↙start` | :succeed:`≡expression↙expression↙start` | :fail:`≢'+'` | :try:`↙expression↙expression↙start` | :succeed:`≡expression↙expression↙start` | :fail:`≢'-'` | :try:`↙term↙expression↙start` | :fail:`≢term↙expression↙start` | :succeed:`≡expression↙start` | :succeed:`≡start` ---- .. python-tatsu-lts-5.13.1+ds/docs/translation.rst000066400000000000000000000124531474373752700215320ustar00rootroot00000000000000.. include:: links.rst .. _mini-tutorial: mini-tutorial.rst .. _pegen: https://github.com/we-like-parsers/pegen .. _PEG parser: https://peps.python.org/pep-0617/ Translation ----------- Translation is one of the most common tasks in language processing. Analysis often sumarizes the parsed input, and *walkers* are good for that. |TatSu| doesn't impose a way to create translators, but it exposes the facilities it uses to generate the `Python`_ source code for parsers. Print Translation ~~~~~~~~~~~~~~~~~ Translation in |TatSu| is based on subclasses of ``NodeWalker``. Print-based translation relies on classes that inherit from ``IndentPrintMixin``, a strategy copied from the new PEG_ parser in Python_ (see `PEP 617`_). ``IndentPrintMixin`` provides an ``indent()`` method, which is a context manager, and should be used thus: .. code:: python class MyTranslationWalker(NodeWalker, IndentPrintMixin): def walk_SomeNodeType(self, node: NodeType): self.print('some preamble') with self.indent(): # continue walking the tree self.print('something else') The ``self.print()`` method takes note of the current level of indentation, so output will be indented by the `indent` passed to the ``IndentPrintMixin`` constructor, or to the ``indent(amount: int)`` method. The mixin keeps as stack of the indent ammounts so it can go back to where it was after each ``with indent(amount=n):`` statement: .. code:: python def walk_SomeNodeType(self, node: NodeType): with self.indent(amount=2): self.print(node.exp) The printed code can be retrieved using the ``printed_text()`` method, but other posibilities are available by assigning a stream-like object to ``self.output_stream`` in the ``__init__()`` method. A good example of how to do code generation with a ``NodeWalker`` and ``IndentPrintMixin`` is |TatSu|'s own code generator, which can be found in ``tatsu/ngcodegen/python.py``, or the model generation found in ``tatsu/ngcodegen/objectomdel.py``. .. _PEP 617: https://peps.python.org/pep-0617/ Declarative Translation (deprecated) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |TatSu| provides support for template-based code generation ("translation", see below) in the ``tatsu.codegen`` module. Code generation works by defining a translation class for each class in the model specified by the grammar. Nowadays the preferred code generation strategy is to walk down the AST_ and `print()` the desired output, with the help of the ``NodWalker`` class, and the ``IndentPrintMixin`` mixin. That's the strategy used by pegen_, the precursor to the new `PEG parser`_ in Python_. Please take a lookt at the `mini-tutorial`_ for an example. Basically, the code generation strategy changed from declarative with library support, to procedural, breadth or depth first, using only standard Python_. The procedural code must know the AST_ structure to navigate it, although other strategies are available with ``PreOrderWalker``, ``DepthFirstWalker``, and ``ContextWalker``. |TatSu| doesn't impose a way to create translators with it, but it exposes the facilities it uses to generate the `Python`_ source code for parsers. Translation in |TatSu| was *template-based*, but instead of defining or using a complex templating engine (yet another language), it relies on the simple but powerful ``string.Formatter`` of the `Python`_ standard library. The templates are simple strings that, in |TatSu|'s style, are inlined with the code. To generate a parser, |TatSu| constructs an object model of the parsed grammar. A ``tatsu.codegen.CodeGenerator`` instance matches model objects to classes that descend from ``tatsu.codegen.ModelRenderer`` and implement the translation and rendering using string templates. Templates are left-trimmed on whitespace, like `Python`_ *doc-comments* are. This is an example taken from |TatSu|'s source code: .. code:: python class Lookahead(ModelRenderer): template = '''\ with self._if(): {exp:1::}\ ''' Every *attribute* of the object that doesn't start with an underscore (``_``) may be used as a template field, and fields can be added or modified by overriding the ``render_fields(fields)`` method. Fields themselves are *lazily rendered* before being expanded by the template, so a field may be an instance of a ``ModelRenderer`` descendant. The ``rendering`` module defines a ``Formatter`` enhanced to support the rendering of items in an *iterable* one by one. The syntax to achieve that is: .. code:: python ''' {fieldname:ind:sep:fmt} ''' All of ``ind``, ``sep``, and ``fmt`` are optional, but the three *colons* are not. A field specified that way will be rendered using: .. code:: python indent(sep.join(fmt % render(v) for v in value), ind) The extended format can also be used with non-iterables, in which case the rendering will be: .. code:: python indent(fmt % render(value), ind) The default multiplier for ``ind`` is ``4``, but that can be overridden using ``n*m`` (for example ``3*1``) in the format. **note** Using a newline character (``\n``) as separator will interfere with left trimming and indentation of templates. To use a newline as separator, specify it as ``\\n``, and the renderer will understand the intention. python-tatsu-lts-5.13.1+ds/docs/use.rst000066400000000000000000000203441474373752700177660ustar00rootroot00000000000000.. include:: links.rst Using the Tool -------------- As a Library ~~~~~~~~~~~~ |TatSu| can be used as a library, much like `Python`_'s ``re``, by embedding grammars as strings and generating grammar models instead of generating Python_ code. - ``tatsu.compile(grammar, name=None, **kwargs)`` Compiles the grammar and generates a *model* that can subsequently be used for parsing input with. - ``tatsu.parse(grammar, input, start=None, **kwargs)`` Compiles the grammar and parses the given input producing an AST_ as result. The result is equivalent to calling:: model = compile(grammar) ast = model.parse(input) Compiled grammars are cached for efficiency. - ``tatsu.to_python_sourcecode(grammar, name=None, filename=None, **kwargs)`` Compiles the grammar to the `Python`_ sourcecode that implements the parser. - ``to_python_model(grammar, name=None, filename=None, **kwargs)`` Compiles the grammar and generates the `Python`_ sourcecode that implements the object model defined by rule annotations. This is an example of how to use **Tatsu** as a library: .. code:: python GRAMMAR = ''' @@grammar::Calc start = expression $ ; expression = | term '+' ~ expression | term '-' ~ expression | term ; term = | factor '*' ~ term | factor '/' ~ term | factor ; factor = | '(' ~ @:expression ')' | number ; number = /\d+/ ; ''' def main(): import pprint import json from tatsu import parse from tatsu.util import asjson ast = parse(GRAMMAR, '3 + 5 * ( 10 - 20 )') print('PPRINT') pprint.pprint(ast, indent=2, width=20) print() print('JSON') print(json.dumps(asjson(ast), indent=2)) print() if __name__ == '__main__': main() And this is the output: .. code:: bash PPRINT [ '3', '+', [ '5', '*', [ '10', '-', '20']]] JSON [ "3", "+", [ "5", "*", [ "10", "-", "20" ] ] ] Compiling grammars to Python ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Tatsu** can be run from the command line: .. code:: bash $ python -m tatsu Or: .. code:: bash $ scripts/tatsu Or just: .. code:: bash $ tatsu if **Tatsu** was installed using *easy\_install* or *pip*. The *-h* and *--help* parameters provide full usage information: .. code:: bash $ python -m tatsu -h usage: tatsu [--generate-parser | --draw | --object-model | --pretty] [--color] [--trace] [--no-left-recursion] [--name NAME] [--no-nameguard] [--outfile FILE] [--object-model-outfile FILE] [--whitespace CHARACTERS] [--help] [--version] GRAMMAR TatSu takes a grammar in a variation of EBNF as input, and outputs a memoizing PEG/Packrat parser in Python. positional arguments: GRAMMAR the filename of the Tatsu grammar to parse optional arguments: --generate-parser generate parser code from the grammar (default) --draw, -d generate a diagram of the grammar (requires --outfile) --object-model, -g generate object model from the class names given as rule arguments --pretty, -p generate a prettified version of the input grammar parse-time options: --color, -c use color in traces (requires the colorama library) --trace, -t produce verbose parsing output generation options: --no-left-recursion, -l turns left-recursion support off --name NAME, -m NAME Name for the grammar (defaults to GRAMMAR base name) --no-nameguard, -n allow tokens that are prefixes of others --outfile FILE, --output FILE, -o FILE output file (default is stdout) --object-model-outfile FILE, -G FILE generate object model and save to FILE --whitespace CHARACTERS, -w CHARACTERS characters to skip during parsing (use "" to disable) common options: --help, -h show this help message and exit --version, -v provide version information and exit $ The Generated Parsers ~~~~~~~~~~~~~~~~~~~~~ A **Tatsu** generated parser consists of the following classes: - A ``MyLanguageBuffer`` class derived from ``tatsu.buffering.Buffer`` that handles the grammar definitions for *whitespace*, *comments*, and *case significance*. - A ``MyLanguageParser`` class derived from ``tatsu.parsing.Parser`` which uses a ``MyLanguageBuffer`` for traversing input text, and implements the parser using one method for each grammar rule: .. code:: python def _somerulename_(self): ... - A ``MyLanguageSemantics`` class with one semantic method per grammar rule. Each method receives as its single parameter the `Abstract Syntax Tree`_ (`AST`_) built from the rule invocation: .. code:: python def somerulename(self, ast): return ast - A ``if __name__ == '__main__':`` definition, so the generated parser can be executed as a `Python`_ script. The methods in the delegate class return the same `AST`_ received as parameter, but custom semantic classes can override the methods to have them return anything (for example, a `Semantic Graph`_). The semantics class can be used as a template for the final semantics implementation, which can omit methods for the rules that do not need semantic treatment. If present, a ``_default()`` method will be called in the semantics class when no method matched the rule name: .. code:: python def _default(self, ast): ... return ast If present, a ``_postproc()`` method will be called in the semantics class after each rule (including the semantics) is processed. This method will receive the current parsing context as parameter: .. code:: python def _postproc(self, context, ast): ... Using the Generated Parser ~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the generated parser, just subclass the base or the abstract parser, create an instance of it, and invoke its ``parse()`` method passing the grammar to parse and the starting rule's name as parameter: .. code:: python from tatsu.util import asjson from myparser import MyParser parser = MyParser() ast = parser.parse('text to parse', start='start') print(ast) print(json.dumps(asjson(ast), indent=2)) The generated parsers' constructors accept named arguments to specify whitespace characters, the regular expression for comments, case sensitivity, verbosity, and more (see below). To add semantic actions, just pass a semantic delegate to the parse method: .. code:: python model = parser.parse(text, start='start', semantics=MySemantics()) If special lexical treatment is required (as in *80 column* languages), then a descendant of ``tatsu.tokenizing.Tokenizer`` can be passed instead of the text: .. code:: python class MySpecialTokenizer(Tokenizer): ... tokenizer = MySpecialTokenizer(text) model = parser.parse(tokenizer, start='start', semantics=MySemantics()) The generated parser's module can also be invoked as a script: .. code:: bash $ python myparser.py inputfile startrule As a script, the generated parser's module accepts several options: .. code:: bash $ python myparser.py -h usage: myparser.py [-h] [-c] [-l] [-n] [-t] [-w WHITESPACE] FILE [STARTRULE] Simple parser for DBD. positional arguments: FILE the input file to parse STARTRULE the start rule for parsing optional arguments: -h, --help show this help message and exit -c, --color use color in traces (requires the colorama library) -l, --list list all rules and exit -n, --no-nameguard disable the 'nameguard' feature -t, --trace output trace information -w WHITESPACE, --whitespace WHITESPACE whitespace specification python-tatsu-lts-5.13.1+ds/etc/000077500000000000000000000000001474373752700162605ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/etc/sublime/000077500000000000000000000000001474373752700177205ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/etc/sublime/tatsu.YAML-tmLanguage000066400000000000000000000024461474373752700236340ustar00rootroot00000000000000# [PackageDev] target_format: plist, ext: tmLanguage name: tatsu scopeName: source.tatsu fileTypes: [tatsu] uuid: D9954069-097B-4361-818D-EF4986D442E7 patterns: - comment: directive name: keyword.other match: '@@.*::' - comment: decorator name: keyword.other match: '^@.*' - comment: include directive name: keyword.other match: '#include\s*::' - name: comment.line.number-sign.tatsu match: '#.*$' - name: comment.block.tatsu begin: ^\(\* end: \*\) - name: meta.tatsu.syntax-rule begin: ^\s*(\b(?:\w|\s|[\-_0-9])+\b)\s*= beginCaptures: '1': {name: entity.name.function} end: ; patterns: - comment: comment blocks name: comment.block.tatsu begin: ^\(\* end: \*\) - comment: special characters name: support.type match: '[~+*%\.]' - comment: double-quote-string name: string.quoted.double begin: '"' end: '"' - comment: single-quote-string name: string.quoted.single begin: '''' end: '''' - comment: regex name: string.regexp begin: '/' end: '/' - comment: special regex name: string.regexp begin: '\?' end: '\?' - comment: special regex name: support.constant begin: '`' end: '`' - comment: non-terminal name: variable.other.tatsu.non-terminal match: \w(?:\w|[\-_0-9])*\w python-tatsu-lts-5.13.1+ds/etc/sublime/tatsu.tmLanguage000066400000000000000000000066771474373752700231060ustar00rootroot00000000000000 fileTypes tatsu name tatsu patterns comment directive match @@.*:: name keyword.other comment decorator match ^@.* name keyword.other comment include directive match #include\s*:: name keyword.other match #.*$ name comment.line.number-sign.tatsu begin ^\(\* end \*\) name comment.block.tatsu begin ^\s*(\b(?:\w|\s|[\-_0-9])+\b)\s*= beginCaptures 1 name entity.name.function end ; name meta.tatsu.syntax-rule patterns begin ^\(\* comment comment blocks end \*\) name comment.block.tatsu comment special characters match [~+*%\.] name support.type begin " comment double-quote-string end " name string.quoted.double begin ' comment single-quote-string end ' name string.quoted.single begin / comment regex end / name string.regexp begin \? comment special regex end \? name string.regexp begin ` comment special regex end ` name support.constant comment non-terminal match \w(?:\w|[\-_0-9])*\w name variable.other.tatsu.non-terminal scopeName source.tatsu uuid D9954069-097B-4361-818D-EF4986D442E7 python-tatsu-lts-5.13.1+ds/etc/vim/000077500000000000000000000000001474373752700170535ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/etc/vim/ftdetect/000077500000000000000000000000001474373752700206555ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/etc/vim/ftdetect/tatsu.vim000066400000000000000000000001411474373752700225260ustar00rootroot00000000000000au BufNewFile,BufRead *.tatsu set filetype=tatsu au BufNewFile,BufRead *.ebnf set filetype=tatsu python-tatsu-lts-5.13.1+ds/etc/vim/syntax/000077500000000000000000000000001474373752700204015ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/etc/vim/syntax/tatsu.vim000066400000000000000000000053261474373752700222640ustar00rootroot00000000000000" Vim syntax file " Language: EBNF/Tatsu " Maintainer: Apalala " With thanks to Michael Brailsford for the BNF syntax file. " Quit when a syntax file was already loaded if version < 600 syntax clear elseif exists("b:current_syntax") finish endif syn match ebnfInclude /#[ \t\n]*[A-Za-z0-9_-]\+/ skipwhite skipempty nextgroup=ebnfParamStart syn match ebnfRuleInclude />[ \t\n]*[A-Za-z0-9_-]\+/ skipwhite skipempty syn match ebnfMetaIdentifier /[A-Za-z0-9_-]\+/ skipwhite skipempty nextgroup=ebnfSeparator syn match ebnfName /@:\|@+:\|@\|[A-Za-z0-9_-]\+:\|[A-Za-z0-9_-]\++:/ contained skipwhite skipempty syn match ebnfDecorator /@@\?[A-Za-z0-9_-]\+/ skipwhite skipempty syn match ebnfInherit /<[ \t\n]*[A-Za-z0-9_-]\+/ skipwhite skipempty nextgroup=ebnfParamStart,ebnfSeparator syn match ebnfParamsStart "::" nextgroup=ebnfParams skipwhite skipempty syn match ebnfParams /.*[^=]/ contained skipwhite skipempty nextgroup=ebnfSeparator syn region ebnfParams start=/(/ end=')' skipwhite skipempty nextgroup=ebnfSeparator syn match ebnfSeparator /[=]/ contained nextgroup=ebnfProduction skipwhite skipempty syn region ebnfProduction start=/\zs[^;]/ end=/[;]/me=e-1 contained contains=ebnfSpecial,ebnfDelimiter,ebnfTerminal,ebnfConstant,ebnfSpecialSequence,ebnfPattern,ebnfComment,ebnfName,ebnfRuleInclude nextgroup=ebnfEndProduction skipwhite skipempty syn match ebnfDelimiter #[\-\*+]\|>>\|[&~,(|)\]}\[{!]\|\(\*)\)\|\((\*\)\|\(:)\)\|\((:\)# contained syn match ebnfSpecial /[~+*%\.]/ contained syn region ebnfPattern matchgroup=Delimiter start=/\// end=/\// contained syn region ebnfSpecialSequence matchgroup=Delimiter start=/?\// end=/\/?/ contained syn match ebnfEndProduction /[;]/ contained syn region ebnfTerminal matchgroup=delimiter start=/"/ end=/"/ contained syn region ebnfTerminal matchgroup=delimiter start=/'/ end=/'/ contained syn region ebnfConstant matchgroup=delimiter start=/`/ end=/`/ contained syn region ebnfComment start="#" end="$" contains=ebnfTodo syn region ebnfComment start="(\*" end="\*)" contains=ebnfTodo syn keyword ebnfTodo FIXME NOTE NOTES TODO XXX contained syn region ebnfClosure start="'.{" end="}" contains=ebnfTodo hi link ebnfComment Comment hi link ebnfMetaIdentifier Identifier hi link ebnfSeparator ebnfDelimiter hi link ebnfEndProduction ebnfDelimiter hi link ebnfDelimiter Delimiter hi link ebnfDelimiter Delimiter hi link ebnfSpecial Type hi link ebnfSpecialSequence Statement hi link ebnfPattern Statement hi link ebnfTerminal String hi link ebnfName Keyword hi link ebnfRuleInclude Include hi link ebnfDecorator Include hi link ebnfConstant ebnfDecorator hi link ebnfInherit Include hi link ebnfParamsStart ebnfParams hi link ebnfParams Type hi link ebnfClosure Type hi link ebnfTodo Todo python-tatsu-lts-5.13.1+ds/grammar/000077500000000000000000000000001474373752700171335ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/grammar/tatsu.ebnf000066400000000000000000000127471474373752700211420ustar00rootroot00000000000000@@grammar :: TatSu @@whitespace :: /(?m)\s+/ @@comments :: ?"(?sm)[(][*](?:.|\n)*?[*][)]" @@eol_comments :: ?"(?m)#[^\n]*$" @@parseinfo :: True @@left_recursion :: False start = grammar ; grammar::Grammar = title:`TATSU` { directives+:directive | keywords+:keyword } rules+:rule { rules+:rule | keywords+:keyword } $ ; directive = '@@' !'keyword' ~ ( name:('comments' | 'eol_comments') ~ ~ '::' ~ value:regex | name:('whitespace') ~ ~ '::' ~ value:(regex|string|'None'|'False'|`None`) | name:('nameguard' | 'ignorecase' | 'left_recursion' | 'parseinfo') ~ ('::' ~ value:boolean|value:`True`) | name:('grammar') ~ '::' ~ value:word | name:('namechars') ~ '::' ~ value:string ) ~ ; keywords = { keywords }+ ; keyword = '@@keyword' ~ '::' ~ {@+:(word|string) !(':'|'=')} ; paramdef = (* no keyword params if ':: *) '::' ~ params:params | '(' ~ ( kwparams:kwparams | params:params ',' ~ kwparams:kwparams | params:params ) ')' ; rule::Rule = decorators:{decorator} name:name ~ [>paramdef] ['<' ~ base:known_name] '=' ~ exp:expre ';' ~ ; decorator = '@' !'@' ~ @:('override'|'name'|'nomemo') ; params = @+:first_param {',' @+:literal !'=' ~ } ; first_param = | path | literal ; kwparams = ','.{pair}+ ; pair = @+:word '=' ~ @+:literal ; expre = choice | sequence ; choice::Choice = ['|' ~] @+:option {'|' ~ @+:option}+ ; option::Option = @:sequence ; sequence::Sequence = sequence:{element}+ ; element = rule_include | named | override | term ; rule_include::RuleInclude = '>' ~ @:known_name ; named = named_list | named_single ; named_list::NamedList = name:name '+:' ~ exp:term ; named_single::Named = name:name ':' ~ exp:term ; override = override_list | override_single | override_single_deprecated ; override_list::OverrideList = '@+:' ~ @:term ; override_single::Override = '@:' ~ @:term ; override_single_deprecated::Override = '@' ~ @:term ; term = | void | gather | join | left_join | right_join | group | empty_closure | positive_closure | closure | optional | special | skip_to | lookahead | negative_lookahead | atom ; group::Group = '(' ~ exp:expre ')' ~ ; gather = &(separator '.{') ~ ( | positive_gather | normal_gather ) ; positive_gather::PositiveGather = sep:separator '.{' exp:expre '}' ('+'|'-') ~ ; normal_gather::Gather = sep:separator '.{' ~ exp:expre '}' ['*' ~] ~ ; join = &(separator '%{') ~ ( | positive_join | normal_join ) ; positive_join::PositiveJoin = sep:separator '%{' exp:expre '}' ('+'|'-') ~ ; normal_join::Join = sep:separator '%{' ~ exp:expre '}' ['*' ~] ~ ; left_join::LeftJoin = sep:separator '<{' ~ exp:expre '}' ('+'|'-') ~ ; right_join::RightJoin = sep:separator '>{' ~ exp:expre '}' ('+'|'-') ~ ; separator = group | token | constant | any | pattern ; positive_closure::PositiveClosure = '{' @:expre '}' ('-' | '+') ~ ; closure::Closure = '{' @:expre '}' ['*'] ~ ; empty_closure::EmptyClosure = '{' @:() '}' ; optional::Optional = '[' ~ @:expre ']' ~ ; special::Special = '?(' ~ @:/.*?(?!\)\?)/ ')?' ~ ; lookahead::Lookahead = '&' ~ @:term ; negative_lookahead::NegativeLookahead = '!' ~ @:term ; skip_to::SkipTo = '->' ~ @:term ; atom = cut | cut_deprecated | token | alert | constant | call | pattern | eof ; call::RuleRef = word ; void::Void = '()' ~ ; cut::Cut = '~' ~ ; cut_deprecated::Cut = '>>' ~ ; known_name = name ~ ; name = word ; constant::Constant = &"`" ( | /(?ms)```((?:.|\n)*?)```/ | "`" @:literal "`" | /`(.*?)`/ ) ; alert::Alert = level:/\^+/ message:constant ; token::Token = string | raw_string ; literal = string | raw_string | boolean | word | hex | float | int | null ; string = STRING ; raw_string = /r/ @:STRING ; STRING = | @:/"((?:[^"\n]|\\"|\\\\)*?)"/ ~ | @:/'((?:[^'\n]|\\'|\\\\)*?)'/ ~ ; hex = /0[xX](?:\d|[a-fA-F])+/ ; float = /[-+]?(?:\d+\.\d*|\d*\.\d+)(?:[Ee][-+]?\d+)?/ ; int = /[-+]?\d+/ ; path = /(?!\d)\w+/ + /(?:::(?!\d)\w+)+/ ; word = /(?!\d)\w+/ ; any::Any = '/./' ; pattern::Pattern = regexes ; regexes = '+'.{regex}+ ; regex = | '/' ~ @:?"(?:[^/\\]|\\/|\\.)*" '/' ~ | '?/' ~ @:?"(?:.|\n)*?(?=/\?)" ?"/\?+" ~ | '?' @:STRING ; boolean = 'True' | 'False' ; null = 'None' ; eof::EOF = '$' ~ ; python-tatsu-lts-5.13.1+ds/media/000077500000000000000000000000001474373752700165645ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/media/japanese_tatsu.png000066400000000000000000000440421474373752700223040ustar00rootroot00000000000000PNG  IHDR^t iCCPICC ProfileHWXS[R -)7AtAtH*vtQl*kdQQ,,?QYY 6Tޤ}|s399w@ٖ-F0PnzGF(˻[ukI8 pElkrrڡhv~B. &Ú"61Q SlaJN$h+W@Igs!~|'o1SbicXT~|Qn{Y-Y5 ᠦ $9úUeH0&AJxj_s|/]+|ap~!@gz˱=[(h8??8FS9Qh +Y(/cZJg )ٸ6ɂw]Gʣ~ bb΂CF<`4oO ]DŽn]Hȭf?0g0 ȳK1GmpS= g3ƽ`nP=Coq= ,,R~1~W#.|hamYք&vkڱS< O0Z[&lzys%oN\!?-= wc3Xs@˶7 鞍0.5Zitl#N>5l䈅2d;1(@~Z@s=pb@" +!`>XA)X 6m`'!pԃ&p\W@' þ/ x!!4h! b#.'⏄"QH"!DG"zdF~EN"gKHrA'C:Q Ach=֡g+M}aSf`XbBl!VaX-X76}ĉ8gְ7X U6 [x>%:+!@H#&'wGxG$D33.yUfb8D"HV$RM'ΐHȊd}=9Dekga7\5 {*) ST)fJ %RK9Oy@yh8UXqŋ=jTK/u:UL]MOmޥh4-O[M=}P+(+q)+)]Szl=7nqƽihּI寕N^6m=U{ǻ/t=TR'Jgv!]=@\ݭtz, z|g`j0Y-V栁A`AaaaÇF#TF-Fak(l6i3yojfoܴYYYsyy EvNK2ݲjdŷn50u`BTkoMMMˉ&6񫭣m^vjvS^[so8948d57iǤ;t0-_NNη]]"]V\t%.rmrv/wkL'MM;Ѓۣۓ˳Uey[xgx~c#9woWQa@Z@M`c BPHк)SLi Dl yj* m Cæm{n."FEE685rjԧQvQڢѳDYs?)Pd ZsrtZvm QC:<洋?{ < >̎}lsW}VP<|g^|K,^{!0ea"E-\\$sEE._ڸLwe?TST,,} +Z-\j[ZVyg~kXK\+X{k׺ nPdM6]*Ts3exs- [y[>+t*VV~mkGNݝ;?ﺳ;pw]ie➂=Om}J}/]UZ\]}@F\pC~jkwf.=k򯷎m9r%uHܺĆSN47MM4N9M9ș3C͹gj.܍֩C_p\wۙ.]:yr+u'~wDSGU ]N_v 7o\~V;ýnW _òG:*aNݧzzG?}Ds߲eU?Ǵ?^(Sϊ/}0aUo;mPУwߗ|PcۧOφg&}KאFGFrB(z?Dxv${I',I ,b (;0 ߒw cC.T{Y,*>/{!ٻ4|!. {  :me pHYs%%IR$iTXtXML:com.adobe.xmp 2017-02-14T09:41:41 Flying Meat Acorn 5.6.1 144 5 144 606 626 B8IDATxU}/qhoԀaMq埽 1 z/LGZm8ܹfJi*Fd& ƸLnaٙ }y> #w|}>G8p_ @@FA@׀nJ  Pݔ @@ @\)  @ @@ tS @l @p $@ @5MI=@j5`{ k@7%A@׀nJ  Pݔ @@ @\)  @ @@ tS @l @p $@ @5MI=@j5`{@R#C{&Ub O}=F@?}#;{n7VY5_%*8\#@@ᡡ<s=Mѹ{txT:άFG?z=[Zk>2P8T;C?}⇻{䑇{nA;>{!Mѳ ) 7w~_}r·vܿԮzۢYŏkDq6K$L?C7IU;J2  x\{'$恃w$W粯95`j! سW}l&jl܊UY!8P5f>3~/f)_{}&G>J& pmQ`^soppaT« +pyJ#@`Bh{CM p(B~2ʾ~;R@ `;a_/;[ʟ  @4Dfz/WܼknHKNQ@55GsB[}8D@O&#!_{D5{88N/T?<`ѫa$C@|Iً."x`6B]J\Ys"x𖍻r l[ka^4nnj6Y>l:O$'xpՖގd- CGqI'8c[տ# =Ehnl-@9UOwbcC(\@Nj& `_R8ˑv4L H]`kzոJR/(='\nwbp1F!6sǯzNz %Nz h4EI@[۬EX$S@싪RJ_ɯ' xV@ h+݋\(H@i tBa w #!,0ky ríw =MBO'P.ՐMXʱ)tGsqw;N}G?Ke[-n˗zK{'(Bgɇ1i B BPy`Jqh|Mn\w̟s>\l%* msn^B'mܜ^[i3_<Ѓ}mO&$@`"㣣###,'-|N,0k[10gWǼzӚm3;f6G8p`bds,8KM[K-쨻:G֡gN@nji3 dN  me @@^ @@CpCo P P[6+ 7;4T@7M zN  me @@^ @@CpCo P P[6+ 7;4T@7M zN  me @@^ @@CpCo P P[6+ 7;4T@7M zN  me @@^ @@CpCo P P[6+ 7;4T@7M zN  me @@^ @@CpCo P PLxO5Y,3UX'_>rW屜2W79awqoapS [gS^(Cv ?Irpޔ\YSq ]rs~u8noʮ,)tލTvdccΥp@ ^ޡ y m>Ğ4;?_tމ9 [KlG<^y˷F#-薛oRsu,>s/GoP<^ρm{l_kybsU\uȉ"Boc)t|%/?9'DRK8&YbSB |`X'$~98~ʭ0)8.퉏>}M;7qTW/ 76cS;}&V}=woTX-KΙ"$ hR%F<^WmtFZPU.3sSշs0;$V!C7X$jp"*̈}wXv=wo>T5]?O׭ : oZs)|d-P)8fXjSK\ڶ&Wݗsp=QAS՛f#P7yIqN]%zͭ;J\_=s}d$TNYzv=n1sϒsfeH8R7%)ݻ|&V"6:1Nc%z;K\Sh?yѢ9ΟSCj:Rg=OV}eO$"0g˪`_^|(յΟ{Hi UlJ%E{k}7XO-G'ۺr x =x].vKnZNseo7BPo3دm[ǯ_I.F'ݾ2x F*~`}{ѶIWrO.yՆ#0oeϟ<l 4%D<^{HY5n,G;^~"ϱvIXjO76=֭ S˳Zz98ZS fyX?ٹΟC썌5<^}=>VrhX]t`)'y| ]@S NmznxQOw"_ @zЛzӍXyǺ*MWp-)Mze߷BZu~oۥ]"\N`r8?'Y#hcx }vUnk}x`_u-_낝?.9s)pRQ\ذi _Ϛ$)pā[Ȯ>fگU7xeۃ׭m˖/ov#lֻlm[_?# {t__jvY @/ ܶ'##'+  @@q3ؑ bk7HV@': @@8ɶN N{j'@dpS8, S  @ YlN) vHV@': @@8ɶN N{j'@dpS8, S  @ YlN) vHV@': @@8ɶN N{j'@dpS8, S  @ YlN) vHV@': @@8ɶN N{j'@dpS8, S  @ YlN) vHV@': @@8ɶN N{j'@d O$+pHP`xԢ8p@3֢BS @@53f͙=Zn: @L=Ѳ @@U1 @p @ V\O  * [s=(@@h ЪnU @\! @@U1 @p @ V\O  * [s=(@@h ЪnU @\! @@U1 @p @ V\O  * [s=(@@h ЪnU @\! @@U1 @p @ V\O  * [s=(@@h ЪnU @\! @@U1 @p @ V\O  * [s=(@@h ЪnU @\! @@U1 @ C{XE @?}Ζ)OPH9񳟼nptSgwd@ q=f*IcFNj!@ƴB  @  @@cpcZm I@GZ @11P$ #uC- ܘV[(DhnL-" HP 4F@7Jpn# j %@H8R7Bi @@$j!@ƴB  @  @@cpcZm I@GZ @11P$ #uC- ܘV[(DhnL-" HP 4F@7Jpn# j %@H8R7Bi @@$j!@ƴB  @  @@cpcZm I@GZ @11P$L._Wnm#}gsbaU?SەSS qv±v(E Ͷ lLڮpD?p(tbmhb7e礷#RX%5 @@v8Z S  @ ;]K-R)tI ήD)FN@gR "@p ]R#d' k @@ 8.ٵԂ @ BH ZjA NKj$@pv- HA@%5 @@v8Z S  @ ;]K-R)tI ήD)FN@gR "@p ]R#d' k @@ 8.ٵԂ @ BH ZjA NKj$@pv- HA@%5 @@v8Z S  @ ;]K-R)tI ήD)FN@gR "@p ]R#d' k @@ 8.ٵԂ @ BH ZjA NKj$@pb-}E+hz;'N/ZI3՚]׊@@Rvs]w $3a=sF_7?LA yym3~w8/򇿸3e:ԓO%饕 @@B8f)J @ !PJ|zi% NYJ%@|p>HH@', @@>8^Z $$ jR  @ O/ 5K ΧVB TG@K+!@pBR*# 饕 @@B8f)J @ !PJ|zi% NYJ%@|p>HH@', @@>8^Z $$ jR  @ O/ 5K ΧVB TG@K+!@pBR*# 饕 @@B8f)J @ !PJ糔fd5_;^Xnʫ1cƾoYS-#jٮkE@7u3w^GB0ik< +Ω)"ퟂfgA'~1:XM.wf9H'=*)tI ήD)FN@gR "@p ]R#d' k @@ 8.ٵԂ @ BH ZjA NKj$@pv- HA@%5 @@v8Z S  @ ;]K-R)tI ήD)FN@gR "@p ]R#d' k @@ 8.ٵԂ @ BH ZjA NKj$@pv- HA@%5 @@v8Z S  @ ;]K-R)tI ήD)FN@gR "@p ]R#d' k @@ 8.ٵԂ @ BH ZjA NKj$@pv- HA@%5 @@v8Z S  @ ;]K-R)tI ήG`4N)*IPIi-,[W"T0EOM/s7.zH@;n%_HW;<0fvNVLHgEVK PRsRzp MQ/  @@@)J"@p=B( 6EI οVHp(  @ % @@8[!$_@c+$@8`SD {l P@l @  MQ/  @@@)J"@p=B( 6EI οVHp(  @ % @@8[!$_@c+$@8`SD {lHH`dhHB~< x>}굛3zǑb XO @@?Q']7 7' @@Ï_ t+*, D#@}ʂ_ w' @@{{g#_~8OsAOϳ  @`zC?|xFypb'@"okXGuV&2s @ mw!XmGa#i7: L S,'&]~4%YO#=XoAc|=Hc:b [wj/7`\{@ 츳 O|c-- v @@[螯7kiݤ 6}3tu@pWJ^Q,&!Њc߾˹[p95*`'JRNF}Go; \ PmG,0oj;G )HZ`G%p̮nnk{ =Oݦ  q=Gغz&~Ѭ]ǿq֋ \!)0puon*0}߀W!@I본5`G)mXM(W5t>儺n+tzRطzUk{.cu}峭nq+#pwP_W/u?Do[[[\O~癑@6nqvS8(0s+bPSOpMLF۞ 68֓ƕs?.HϴtvW'wtV7fZݿݛF2QwN0<+O tu1MCO?\ W> pK\.&В/{[Z #w/WubWw`U XF!0c5>6g7l7Q`Ϧ^⽿w^M+ٿY\ShʏHSsn~4z:3eMs}rkx䕻" M߽ڐŞqܜ!r8zAwc_:8rmx8Ǭs颹*f4qi'ͪqFGy7tͫ|˃us엿 MXc3G?ֳx>9wEO>o,w X&O>r.q}+>8AgWoϻ̝YQ"{gֶ̘ёz߇G?|?m`0׹rs߫G8pFa#~ La-& ,}gnjFJf΀]MB @ҳ=ԓOՊ-B% vmF %3렚Ni5QNY ,>HoY. *0$p(Av3 |Z{ 8~^? )p),s|c K`~j˷w $0gN 4 R"K58f/|T+w~G3N> wb 4Vz />l|/ &0?Nt L`bT(%sndpO۟+S/(Cs YcX?~OP~T 0珮\Z4 @9+gkv)JA7CW*nf ,Ł>Ž/]~+A.^ǿϴGۥސT(p_aIXMܒ\q͕Wy t_y°{Z]P?pRC펲 }Ջe=|E"?LCqK4A9\^Tf!0/|K{lf/_XXzgN ~ k@ZᳱlE ^gs8p@i)ݲ] LB 0@-+ɺ_U$'u*;܄=i:}/EIۤˮcSy $P>rdp=0yZ޹iπj3 /L拴<E \v;QKONl- g^>s\L˾|'͎ 'ȷ 0t_u}5lJ ,WRNh)QC^~JNjT-vpϟn\j]Gz[8^ތq:{§?)xyF>Kn=k?'Wn]_sz$bx &A`xז~#4C 5t_?܅)w Jx H.!C`Ih*^|K۩&ᒭּ\MnnWovtݭ0_4wV='pX{v~u?u>,?O )ٽ{8{,_.%F~ϝ_Yn(Nu}VO;gۏ;;Nc< oHE`xݶyӝt>JXggWς'xo> sf5+tW5@&C{|=l۶Ygսmo}o{15Mo|YM8]R\D iёᡡ}O>d'6ӟ=46.4", "wheel"] build-backend = "setuptools.build_meta" [project] name = "TatSu-LTS" dynamic = ["version"] authors = [ {name = "Juancarlo Añez", email = "apalala@gmail.com"}, ] maintainers = [ {name = "Daniele Nicolodi", email = "daniele@grinta.net"}, ] description = "TatSu takes a grammar in a variation of EBNF as input, and outputs a memoizing PEG/Packrat parser in Python." readme = "README.rst" requires-python = ">=3.8" keywords = [] license = {file = "LICENSE.TXT"} classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Environment :: Console", "Operating System :: OS Independent", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Software Development :: Code Generators", "Topic :: Software Development :: Compilers", "Topic :: Software Development :: Interpreters", "Topic :: Text Processing :: General", ] dependencies = [] [project.optional-dependencies] colorization = ["colorama"] parproc = ["rich"] [project.scripts] tatsu = "tatsu:main" g2e = "tatsu.g2e:main" [project.urls] Homepage = "https://github.com/dnicolodi/TatSu-LTS" Repository = "https://github.com/dnicolodi/TatSu-LTS" Documentation = "https://tatsu.readthedocs.io/en/stable/" Questions = "https://stackoverflow.com/questions/tagged/tatsu" [tool.setuptools] include-package-data = false [tool.setuptools.dynamic] version = {attr = "tatsu._version.__version__"} [tool.setuptools.packages.find] include = ["tatsu*"] [tool.blue] target-version = ["py310"] line-length=79 skip-magic-trailing-comma=true python-tatsu-lts-5.13.1+ds/readthedocs.yaml000066400000000000000000000006051474373752700206570ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 build: os: "ubuntu-22.04" tools: python: "3.12" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py # Optionally build your docs in additional formats such as PDF formats: - pdf python-tatsu-lts-5.13.1+ds/requirements-dev.txt000066400000000000000000000001301474373752700215370ustar00rootroot00000000000000-r requirements-test.txt git-pylint-commit-hook ipython pip psutil twine auto-changelog python-tatsu-lts-5.13.1+ds/requirements-test.txt000066400000000000000000000001711474373752700217450ustar00rootroot00000000000000astroid docutils mypy pandoc pytest pytest-flake8 pytest-mypy rich rst2html5 ruff setuptools sphinx sphinx-rtd-theme tox python-tatsu-lts-5.13.1+ds/ruff.toml000066400000000000000000000035071474373752700173510ustar00rootroot00000000000000line-length = 88 lint.select = [ "F", "E", "W", "UP", "I", "YTT", "S", "B", "COM", "C4", "SIM", "PTH", "TRY", "FLY", "PERF", "FURB", "RUF", "PL", "PGH", "RET", # "ERA", # commented out code ] lint.ignore = [ "B019", # memory leaks possible with functools.cache # "C408", # unnecessary-collection-call "E501", # line-too-long "E741", # ambiguous-variable-name "E402", # module-import-not-at-top-of-file "FURB189", # subclassing str "S101", # use of assert "PERF203", # try-except within loop "PLC0415", # import-outside-top-level "PLC1802", # len-test "PLR6301", # no-self-use "PLR0904", # too-many-public-methods "PLR0913", # too-many-arguments "PLR0915", # too-many-statements "PLR0917", # too many possitional arguments "PLR2004", # magic-value-comparison "PLW1514", # unspecified-encoding # "PLW0603", # global-statement # "PLW2901", # redefined-loop-name "PLW3201", # bad-dunder-method-name # "PTH123", # builtin-open "RET505", # superfluous-else-return "RUF022", # __all__ sorting "RUF027", # possible f-string "RUF039", # unraw-re-pattern "RUF052", # used-dummy-variable "S102", # exec-builtin # "S105", # hardcoded-password-string "S301", # suspicious-pickle-usage # "S311", # suspicious-non-cryptographic-random-usage "S403", # insecure modules "SIM108", # use ternary instead of if=else "SIM115", # open-file-with-context-handler "SIM114", # if-with-same-arms "TRY003", # raise-vanilla-args "TRY300", # try-consider-else "UP031", # use of % formatting ] exclude = [] target-version = "py38" [lint.per-file-ignores] [lint.mccabe] # Unlike Flake8, default to a complexity level of 10. max-complexity = 10 [lint.pydocstyle] convention = "numpy" python-tatsu-lts-5.13.1+ds/tatsu/000077500000000000000000000000001474373752700166455ustar00rootroot00000000000000python-tatsu-lts-5.13.1+ds/tatsu/__init__.py000066400000000000000000000006121474373752700207550ustar00rootroot00000000000000from __future__ import annotations from ._config import __toolname__, __version__ from .tool import ( # pylint: disable=W0622 compile, main, parse, to_python_model, to_python_sourcecode, ) assert __version__ assert __toolname__ assert bool(compile) assert bool(parse) assert bool(to_python_sourcecode) assert bool(to_python_model) if __name__ == '__main__': main() python-tatsu-lts-5.13.1+ds/tatsu/__main__.py000066400000000000000000000001361474373752700207370ustar00rootroot00000000000000from __future__ import annotations import tatsu if __name__ == '__main__': tatsu.main() python-tatsu-lts-5.13.1+ds/tatsu/_config.py000066400000000000000000000001101474373752700206130ustar00rootroot00000000000000from ._version import __version__ # noqa: F401 __toolname__ = 'TatSu' python-tatsu-lts-5.13.1+ds/tatsu/_version.py000066400000000000000000000000271474373752700210420ustar00rootroot00000000000000__version__ = '5.13.1' python-tatsu-lts-5.13.1+ds/tatsu/ast.py000066400000000000000000000065121474373752700200120ustar00rootroot00000000000000from __future__ import annotations import copy import operator from functools import reduce from .util import asjson, is_list class AST(dict): _frozen = False def __init__(self, *args, **kwargs): super().__init__() self.update(*args, **kwargs) self._frozen = True @property def frozen(self): return self._frozen @property def parseinfo(self): try: return super().__getitem__('parseinfo') except KeyError: pass def set_parseinfo(self, value): super().__setitem__('parseinfo', value) def copy(self): return copy.copy(self) def asjson(self): return asjson(self) def _set(self, key, value, force_list=False): key = self._safekey(key) previous = self.get(key) if previous is None and force_list: value = [value] elif previous is None: pass elif is_list(previous): value = [*previous, value] else: value = [previous, value] super().__setitem__(key, value) def _setlist(self, key, value): return self._set(key, value, force_list=True) def __copy__(self): return AST(self) def __getitem__(self, key): if key in self: return super().__getitem__(key) key = self._safekey(key) if key in self: return super().__getitem__(key) return None def __setitem__(self, key, value): self._set(key, value) def __delitem__(self, key): key = self._safekey(key) super().__delitem__(key) def __setattr__(self, name, value): if self._frozen and name not in vars(self): raise AttributeError( f'{type(self).__name__} attributes are fixed. ' f' Cannot set attribute "{name}".', ) super().__setattr__(name, value) def __getattr__(self, name): key = self._safekey(name) if key in self: return self[key] elif name in self: return self[name] try: return super().__getattribute__(name) except AttributeError: return None def __hasattribute__(self, name): try: super().__getattribute__(name) except (TypeError, AttributeError): return False else: return True def __reduce__(self): return (AST, (list(self.items()),)) def _safekey(self, key): while self.__hasattribute__(key): key += '_' return key def _define(self, keys, list_keys=None): for key in (self._safekey(k) for k in keys): if key not in self: super().__setitem__(key, None) for key in (self._safekey(k) for k in list_keys or []): if key not in self: super().__setitem__(key, []) def __json__(self, seen=None): return {name: asjson(value, seen=seen) for name, value in self.items()} def __repr__(self): return repr(self.asjson()) def __str__(self): return str(self.asjson()) def __hash__(self): # NOTE: objects are actually mutable during creation return reduce( operator.xor, (hash((name, id(value))) for name, value in self.items()), 0, ) python-tatsu-lts-5.13.1+ds/tatsu/bootstrap.py000066400000000000000000001036301474373752700212370ustar00rootroot00000000000000#!/usr/bin/env python3 # WARNING: CAVEAT UTILITOR # # This file was automatically generated by TatSu. # # https://pypi.python.org/pypi/tatsu/ # # Any changes you make to it will be overwritten the next time # the file is generated. # ruff: noqa: C405, COM812, I001, F401, PLR1702, PLC2801, SIM117 from __future__ import annotations import sys from pathlib import Path from tatsu.buffering import Buffer from tatsu.parsing import Parser from tatsu.parsing import tatsumasu from tatsu.parsing import leftrec, nomemo, isname from tatsu.infos import ParserConfig from tatsu.util import re, generic_main KEYWORDS: set[str] = set() class EBNFBootstrapBuffer(Buffer): def __init__(self, text, /, config: ParserConfig | None = None, **settings): config = ParserConfig.new( config, owner=self, whitespace='(?m)\\s+', nameguard=None, ignorecase=False, namechars='', parseinfo=True, comments='(?sm)[(][*](?:.|\\n)*?[*][)]', eol_comments='(?sm)*#[^\\n]*$', keywords=KEYWORDS, start='start', ) config = config.replace(**settings) super().__init__(text, config=config) class EBNFBootstrapParser(Parser): def __init__(self, /, config: ParserConfig | None = None, **settings): config = ParserConfig.new( config, owner=self, whitespace='\\s+', nameguard=None, ignorecase=False, namechars='', parseinfo=True, comments='(?sm)[(][*](?:.|\\n)*?[*][)]', eol_comments='(?m)#[^\\n]*$', keywords=KEYWORDS, start='start', ) config = config.replace(**settings) super().__init__(config=config) @tatsumasu() def _start_(self): self._grammar_() @tatsumasu('Grammar') def _grammar_(self): self._constant('TATSU') self.name_last_node('title') def block0(): with self._choice(): with self._option(): self._directive_() self.add_last_node_to_name('directives') with self._option(): self._keyword_() self.add_last_node_to_name('keywords') self._error( 'expecting one of: ' ' ' ) self._closure(block0) self._rule_() self.add_last_node_to_name('rules') def block1(): with self._choice(): with self._option(): self._rule_() self.add_last_node_to_name('rules') with self._option(): self._keyword_() self.add_last_node_to_name('keywords') self._error( 'expecting one of: ' ' ' ) self._closure(block1) self._check_eof() self._define( ['title'], ['directives', 'keywords', 'rules'], ) @tatsumasu() def _directive_(self): self._token('@@') with self._ifnot(): self._token('keyword') self._cut() with self._group(): with self._choice(): with self._option(): with self._group(): with self._choice(): with self._option(): self._token('comments') with self._option(): self._token('eol_comments') self._error( 'expecting one of: ' "'comments' 'eol_comments'" ) self.name_last_node('name') self._cut() self._cut() self._token('::') self._cut() self._regex_() self.name_last_node('value') self._define(['name', 'value'], []) with self._option(): with self._group(): self._token('whitespace') self.name_last_node('name') self._cut() self._cut() self._token('::') self._cut() with self._group(): with self._choice(): with self._option(): self._regex_() with self._option(): self._string_() with self._option(): self._token('None') with self._option(): self._token('False') with self._option(): self._constant('None') self._error( 'expecting one of: ' "'False' 'None' " ) self.name_last_node('value') self._define(['name', 'value'], []) with self._option(): with self._group(): with self._choice(): with self._option(): self._token('nameguard') with self._option(): self._token('ignorecase') with self._option(): self._token('left_recursion') with self._option(): self._token('parseinfo') self._error( 'expecting one of: ' "'ignorecase' 'left_recursion'" "'nameguard' 'parseinfo'" ) self.name_last_node('name') self._cut() with self._group(): with self._choice(): with self._option(): self._token('::') self._cut() self._boolean_() self.name_last_node('value') self._define(['value'], []) with self._option(): self._constant(True) self.name_last_node('value') self._error( 'expecting one of: ' "'::'" ) self._define(['name', 'value'], []) with self._option(): with self._group(): self._token('grammar') self.name_last_node('name') self._cut() self._token('::') self._cut() self._word_() self.name_last_node('value') self._define(['name', 'value'], []) with self._option(): with self._group(): self._token('namechars') self.name_last_node('name') self._cut() self._token('::') self._cut() self._string_() self.name_last_node('value') self._define(['name', 'value'], []) self._error( 'expecting one of: ' "'comments' 'eol_comments' 'grammar'" "'ignorecase' 'left_recursion'" "'namechars' 'nameguard' 'parseinfo'" "'whitespace'" ) self._cut() self._define(['name', 'value'], []) @tatsumasu() def _keywords_(self): def block0(): self._keywords_() self._positive_closure(block0) @tatsumasu() def _keyword_(self): self._token('@@keyword') self._cut() self._token('::') self._cut() def block0(): with self._group(): with self._choice(): with self._option(): self._word_() with self._option(): self._string_() self._error( 'expecting one of: ' ' ' ) self.add_last_node_to_name('@') with self._ifnot(): with self._group(): with self._choice(): with self._option(): self._token(':') with self._option(): self._token('=') self._error( 'expecting one of: ' "':' '='" ) self._closure(block0) @tatsumasu() def _paramdef_(self): with self._choice(): with self._option(): self._token('::') self._cut() self._params_() self.name_last_node('params') self._define(['params'], []) with self._option(): self._token('(') self._cut() with self._group(): with self._choice(): with self._option(): self._kwparams_() self.name_last_node('kwparams') with self._option(): self._params_() self.name_last_node('params') self._token(',') self._cut() self._kwparams_() self.name_last_node('kwparams') self._define(['kwparams', 'params'], []) with self._option(): self._params_() self.name_last_node('params') self._error( 'expecting one of: ' ' ' ) self._token(')') self._define(['kwparams', 'params'], []) self._error( 'expecting one of: ' "'(' '::'" ) @tatsumasu('Rule') def _rule_(self): def block0(): self._decorator_() self._closure(block0) self.name_last_node('decorators') self._name_() self.name_last_node('name') self._cut() with self._optional(): with self._choice(): with self._option(): self._token('::') self._cut() self._params_() self.name_last_node('params') self._define(['params'], []) with self._option(): self._token('(') self._cut() with self._group(): with self._choice(): with self._option(): self._kwparams_() self.name_last_node('kwparams') with self._option(): self._params_() self.name_last_node('params') self._token(',') self._cut() self._kwparams_() self.name_last_node('kwparams') self._define(['kwparams', 'params'], []) with self._option(): self._params_() self.name_last_node('params') self._error( 'expecting one of: ' ' ' ) self._token(')') self._define(['kwparams', 'params'], []) self._error( 'expecting one of: ' "'(' '::'" ) with self._optional(): self._token('<') self._cut() self._known_name_() self.name_last_node('base') self._define(['base'], []) self._token('=') self._cut() self._expre_() self.name_last_node('exp') self._token(';') self._cut() self._define(['base', 'decorators', 'exp', 'kwparams', 'name', 'params'], []) @tatsumasu() def _decorator_(self): self._token('@') with self._ifnot(): self._token('@') self._cut() with self._group(): with self._choice(): with self._option(): self._token('override') with self._option(): self._token('name') with self._option(): self._token('nomemo') self._error( 'expecting one of: ' "'name' 'nomemo' 'override'" ) self.name_last_node('@') @tatsumasu() def _params_(self): self._first_param_() self.add_last_node_to_name('@') def block0(): self._token(',') self._literal_() self.add_last_node_to_name('@') with self._ifnot(): self._token('=') self._cut() self._closure(block0) @tatsumasu() def _first_param_(self): with self._choice(): with self._option(): self._path_() with self._option(): self._literal_() self._error( 'expecting one of: ' '(?!\\d)\\w+(?:::(?!\\d)\\w+)+ ' ' ' ' ' ) @tatsumasu() def _kwparams_(self): def sep0(): self._token(',') def block1(): self._pair_() self._positive_gather(block1, sep0) @tatsumasu() def _pair_(self): self._word_() self.add_last_node_to_name('@') self._token('=') self._cut() self._literal_() self.add_last_node_to_name('@') @tatsumasu() def _expre_(self): with self._choice(): with self._option(): self._choice_() with self._option(): self._sequence_() self._error( 'expecting one of: ' "'|'