pax_global_header 0000666 0000000 0000000 00000000064 14351074657 0014526 g ustar 00root root 0000000 0000000 52 comment=66dbe45c17cdabf56a4b988021e6259130748560
sphinx-a4doc-1.6.0/ 0000775 0000000 0000000 00000000000 14351074657 0014033 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/.github/ 0000775 0000000 0000000 00000000000 14351074657 0015373 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14351074657 0017430 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/.github/workflows/build-prod.yml 0000664 0000000 0000000 00000003655 14351074657 0022225 0 ustar 00root root 0000000 0000000 name: Build and publish to PyPI
on:
push:
tags:
- 'v*'
jobs:
build:
name: Build and publish to PyPI
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 20
- run: "git fetch --depth=1 origin +refs/tags/*:refs/tags/* || :"
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\/v/}
- name: Set up Python 3.9
uses: actions/setup-python@v1
with:
python-version: 3.9
- name: Install pep517 and twine
run: python -m pip install pep517 twine --user
- name: Build a binary wheel and a source tarball
run: python -m pep517.build --source --binary --out-dir dist/ ./
- name: twine check
run: python -m twine check dist/*
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release v${{ steps.get_version.outputs.VERSION }}
draft: false
prerelease: false
- name: Upload source Asset
id: upload-source-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: dist/sphinx-a4doc-${{ steps.get_version.outputs.VERSION }}.tar.gz
asset_name: sphinx-a4doc-${{ steps.get_version.outputs.VERSION }}.tar.gz
asset_content_type: application/tar+gzip
- name: Publish distribution to Test PyPI
uses: pypa/gh-action-pypi-publish@master
with:
password: ${{ secrets.TEST_PYPI_PASSWORD }}
repository_url: https://test.pypi.org/legacy/
- name: Publish distribution to PyPI
uses: pypa/gh-action-pypi-publish@master
with:
password: ${{ secrets.PYPI_PASSWORD }}
sphinx-a4doc-1.6.0/.gitignore 0000664 0000000 0000000 00000002263 14351074657 0016026 0 ustar 00root root 0000000 0000000 # Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
sphinx-a4doc-1.6.0/.readthedocs.yml 0000664 0000000 0000000 00000000110 14351074657 0017111 0 ustar 00root root 0000000 0000000 build:
image: latest
python:
version: 3.7
setup_py_install: true
sphinx-a4doc-1.6.0/LICENSE 0000664 0000000 0000000 00000002056 14351074657 0015043 0 ustar 00root root 0000000 0000000 MIT License
Copyright (c) 2018 Tamika Nomara
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
sphinx-a4doc-1.6.0/MANIFEST.in 0000664 0000000 0000000 00000000125 14351074657 0015567 0 ustar 00root root 0000000 0000000 recursive-include sphinx_a4doc/_static *
recursive-exclude statbox_abt_backend *.pyc
sphinx-a4doc-1.6.0/README.md 0000664 0000000 0000000 00000004110 14351074657 0015306 0 ustar 00root root 0000000 0000000 # Sphinx plugin for Antlr4
A4Doc is a sphinx plugin for documenting Antlr4 grammars.
It’s primary target is to provide some overview for DSL users
(generated documentation may not include some nuances essential
for compiler developers).
A4Doc's features are:
- a new domain with grammar and rule directives called ``a4``;
- directives for rendering railroad diagrams;
- directive for extracting documentation comments and rendering docs and
diagrams from `.g4` source files.
## Resources
- [Documentation](https://taminomara.github.io/sphinx-a4doc/)
- [Installation](https://taminomara.github.io/sphinx-a4doc/#installation)
- [Quickstart](https://taminomara.github.io/sphinx-a4doc/#quickstart)
- [Example output](https://taminomara.github.io/sphinx-a4doc/#example-output)
## Requirements
- python >= 3.7
- sphinx >= 1.8.0
## Installation
```sh
pip3 install sphinx-a4doc
```
## Use cases
- [Solidity specification](https://docs.soliditylang.org/en/latest/grammar.html)
- [7zip format specification](https://py7zr.readthedocs.io/en/latest/archive_format.html)
## Changelog
*v1.6.0*
- Support LaTeX builder.
*v1.5.0*
- Fixed position of text in diagram nodes in Firefox.
- Added an option to set custom classes to diagram nodes: `//@ doc:css-class`.
*v1.4.0*
- Fixed compatibility with `singlehtml` mode (see [#15](https://github.com/taminomara/sphinx-a4doc/issues/15)).
*v1.3.0*
- Fixed python 3.9 compatibility issue (by [@sandrotosi](https://github.com/sandrotosi)).
*v1.2.2, v1.2.3, v1.2.4*
- No functional changes, just setting up CI to push PyPI releases automatically.
*v1.2.1*
- Fixed integration with intersphinx.
*v1.2.0*:
- Renamed `conf.py` settings: `a4_autodoc_*` became `a4_autogrammar_*`.
- Added support for section comments in grammar files.
- Added flexible settings to control how literal lexer rules are rendered.
- Added setting to convert rule names from ``CamelCase`` to ``dash-case``.
- Fixed documentation comments are parsed incorrectly in some cases.
*v1.0.1*:
- Fixed absence of `.css` file for railroad diagrams.
*v1.0.0*:
- Initial release.
sphinx-a4doc-1.6.0/docs/ 0000775 0000000 0000000 00000000000 14351074657 0014763 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/docs/Makefile 0000664 0000000 0000000 00000001142 14351074657 0016421 0 ustar 00root root 0000000 0000000 # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = A4Doc
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -E
sphinx-a4doc-1.6.0/docs/examples/ 0000775 0000000 0000000 00000000000 14351074657 0016601 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/docs/examples/Json.g4 0000664 0000000 0000000 00000005211 14351074657 0017745 0 ustar 00root root 0000000 0000000 /**
* JSON (JavaScript Object Notation) is a lightweight data-interchange format.
*/
grammar Json;
/**
* On top level, JSON consists of a single value. That value can be either
* a complex structure (such as an `object` or an `array`) or a primitive
* type (a `STRING` in double quotes, a `NUMBER`,
* or ``true`` or ``false`` or ``null``).
*/
value
: object
| array
| STRING
| NUMBER
| TRUE
| FALSE
| NULL
;
/**
* Object is a collection of name/value pairs. In various languages,
* this is realized as an object, record, struct, dictionary,
* hash table, keyed list, or associative array.
*/
object
: '(' (STRING ':' value (',' STRING ':' value)*)? ')'
;
/**
* Array is an ordered list of values. In most languages, this is realized as
* vector, list, array or sequence.
*/
array
: '[' (value (',' value)*)? ']'
;
/**
* A number is very much like a C or Java number,
* except that the octal and hexadecimal formats are not used.
*/
//@ doc:name number
NUMBER
: '-'? ('0' | [1-9] [0-9]*) ('.' [0-9]+)? EXPONENT?
;
//@ doc:inline
//@ doc:importance 0
fragment EXPONENT
: ('e' | 'E')? ('+' | '-')? [0-9]+
;
/**
* A string is a sequence of zero or more Unicode characters,
* wrapped in double quotes, using backslash escapes.
* A character is represented as a single character string.
* A string is very much like a C or Java string.
*
* .. railroad-diagram::
*
* - terminal: '"'
* - zero_or_more:
* - choice:
* - terminal: 'Any unicode character except " and \'
* - sequence:
* - terminal: '\'
* - choice:
* - [terminal: '"', comment: quotation mark]
* - [terminal: '\', comment: reverse solidus]
* - [terminal: '/', comment: solidus]
* - [terminal: 'b', comment: backspace]
* - [terminal: 'f', comment: formfeed]
* - [terminal: 'n', comment: newline]
* - [terminal: 'r', comment: carriage return]
* - [terminal: 't', comment: horizontal tab]
* - [terminal: 'u', terminal: 4 hexdecimal digits]
* - terminal: '"'
*/
//@ doc:name string
//@ doc:no-diagram
STRING
: '"' (ESC | SAFECODEPOINT)* '"'
;
//@ doc:nodoc
fragment ESC
: '\\' (["\\/bfnrt] | UNICODE)
;
//@ doc:nodoc
fragment UNICODE
: 'u' HEX HEX HEX HEX
;
//@ doc:nodoc
fragment HEX
: [0-9a-fA-F]
;
//@ doc: nodoc
fragment SAFECODEPOINT
: ~ ["\\\u0000-\u001F]
;
//@ doc:nodoc
//@ doc:name true
TRUE
: 'true'
;
//@ doc:nodoc
//@ doc:name false
FALSE
: 'false'
;
//@ doc:nodoc
//@ doc:name null
NULL
: 'null'
;
sphinx-a4doc-1.6.0/docs/make.bat 0000664 0000000 0000000 00000001411 14351074657 0016365 0 ustar 00root root 0000000 0000000 @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
set SPHINXPROJ=A4Doc
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
sphinx-a4doc-1.6.0/docs/source/ 0000775 0000000 0000000 00000000000 14351074657 0016263 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/docs/source/_templates/ 0000775 0000000 0000000 00000000000 14351074657 0020420 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/docs/source/_templates/to_github.html 0000664 0000000 0000000 00000000616 14351074657 0023275 0 ustar 00root root 0000000 0000000
sphinx-a4doc-1.6.0/docs/source/conf.py 0000664 0000000 0000000 00000012351 14351074657 0017564 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# -- Project information -----------------------------------------------------
project = 'A4Doc'
copyright = '2021, Tamika Nomara'
author = 'Tamika Nomara'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_a4doc',
'sphinx_a4doc.contrib.rst_autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Role to be used by default.
default_role = 'any'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
html_sidebars = {
'**': [
'about.html',
'localtoc.html',
'relations.html',
'to_github.html',
'searchbox.html',
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'A4Docdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'A4Doc.tex', 'A4Doc Documentation',
'Tamika Nomara', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'a4doc', 'A4Doc Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'A4Doc', 'A4Doc Documentation',
author, 'A4Doc', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'rst': ('http://www.sphinx-doc.org', None),
}
# -- A4Doc settings
a4_base_path = os.path.dirname(__file__) + '/../examples'
sphinx-a4doc-1.6.0/docs/source/index.rst 0000664 0000000 0000000 00000024064 14351074657 0020132 0 ustar 00root root 0000000 0000000 Sphinx plugin for Antlr4
========================
A4Doc is a sphinx plugin for documenting Antlr4 grammars.
It's primary target is to provide some overview for DSL users
(generated documentation may not include some nuances essential
for compiler developers).
See an example output: :a4:g:`Json`.
.. _features:
Features
--------
- A new domain with ``grammar`` and ``rule`` directives called ``a4``.
- Directives for rendering railroad diagrams, such as this one:
.. railroad-diagram::
- choice:
- terminal: 'parser'
-
- terminal: 'lexer '
default: 1
- terminal: 'grammar'
- non_terminal: 'identifier'
- terminal: ';'
- Directive for extracting documentation comments and rendering docs and
diagrams from ``.g4`` source files.
Requirements
------------
A4Doc uses dataclasses to represent parsed antlr files, thus ``python >= 3.7``
is required. Also, this extension requires ``sphinx >= 1.8.0`` because it
relies on some features added in that release.
Installation
------------
Install ``sphinx-a4doc`` with pip:
.. code-block:: sh
pip3 install sphinx-a4doc
Add ``sphinx_a4doc`` to the list of extensions in your ``conf.py``.
If you intend to generate documentation from sources, also specify the
location of your grammar files:
.. code-block:: python
extensions = [
'sphinx_a4doc',
]
# Assuming conf.py is in project/docs/source/conf.py
# and grammars are in project/project/grammars
from os.path import dirname
a4_base_path = dirname(__file__) + '/../../project/grammars'
Quickstart
----------
Use the :rst:dir:`a4:grammar` directive to declare a new grammar.
Within the grammar block, use the :rst:dir:`a4:rule` to declare a new rule:
.. code-block:: rst
.. a4:grammar:: MyGrammar
A grammar for my DSL.
.. a4:rule:: root
The root grammar rule.
The above code produces this output:
.. highlights::
.. a4:grammar:: MyGrammar
A grammar for my DSL.
.. a4:rule:: root
The root grammar rule.
Use :rst:role:`a4:grammar` (or :rst:role:`a4:g` as a shortcut) or
:rst:role:`a4:rule` (or :rst:role:`a4:r`) roles to refer the declared
grammars and rules:
.. code-block:: rst
Grammar :a4:g:`MyGrammar` has a root rule :a4:r:`MyGrammar.root`.
The above code produces this output:
.. highlights::
Grammar :a4:g:`MyGrammar` has a root rule :a4:r:`MyGrammar.root`.
Use :rst:dir:`railroad-diagram`, :rst:dir:`lexer-rule-diagram` and
:rst:dir:`parser-rule-diagram` directives to render diagrams:
.. code-block:: rst
.. parser-rule-diagram:: 'def' ID '(' (arg (',' arg)*)? ')' ':'
The above code produces this output:
.. highlights::
.. parser-rule-diagram:: 'def' ID '(' (arg (',' arg)*)? ')' ':'
Use :rst:dir:`a4:autogrammar` directive to generate documentation
from a grammar file.
RST reference
-------------
Declaring objects
~~~~~~~~~~~~~~~~~
.. rst:autodirective:: .. a4:grammar:: name
.. rst:autodirective:: .. a4:rule:: name
Cross-referencing objects
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rst:role:: any
:noindex:
All ``a4`` objects can be cross-referenced via the :rst:role:`any` role.
If given a full path, e.g. ``:any:`grammar_name.rule_name```,
:rst:role:`any` will search a rule called ``rule_name`` in the
grammar called ``grammar_name`` and then, should this search fail, in all
grammars that are imported from ``grammar_name``, recursively.
If given a relative path, e.g. ``:any:`name```,
:rst:role:`any` will perform a global search for a rule or a grammar with the
corresponding name.
.. rst:role:: a4:grammar
a4:g
Cross-reference a grammar by its name.
There's nothing special about this role, just specify the grammar name.
.. rst:role:: a4:rule
a4:r
Cross-reference a grammar by its name or full path.
If given a full path, e.g. ``:a4:r:`grammar_name.rule_name```,
the rule will be first searched in the corresponding grammar, then in
all imported grammars, recursively.
If given a rule name only, e.g. ``:a4:r:`rule_name```, the behavior depends
on context:
- when used in a grammar declaration body, the rule will be first searched
in that grammar, then in any imported grammar, and at last, in the default
grammar.
- when used without context, the rule will only be searched
in the default grammar.
Prepending full path with a tilde works as expected.
Rendering diagrams
~~~~~~~~~~~~~~~~~~
.. rst:autodirective:: railroad-diagram
.. rst:autodirective:: lexer-rule-diagram
:no-options:
.. rst:autodirective:: parser-rule-diagram
:no-options:
Autodoc directive
~~~~~~~~~~~~~~~~~
.. rst:autodirective:: .. a4:autogrammar:: filename
:no-inherited-options:
:no-options-header:
.. rst:autodirective:: .. a4:autorule:: filename rulename
:no-inherited-options:
:no-options-header:
.. rst:autodirective:: docstring-marker
.. rst:autodirective:: members-marker
.. _grammar_comments:
Grammar comments and annotations
--------------------------------
The :rst:dir:`a4:autogrammar` directive does not parse any comment that's found
in a grammar file. Instead, it searches for 'documentation' comments, i.e. ones
specially formatted. There are three types of such comments:
- documentation comments are multiline comments that start with ``/**``
(that is, a slash followed by double asterisk). These comments should contain
valid rst-formatted text.
It is common to outline documentation comments by adding an asterisk on each
row. Though this is completely optional, a4doc can recognize and handle
this pattern.
Example:
.. code-block:: antlr
/**
* This is the grammar root.
*/
module: moduleItem* EOF
- control comments are inline comments that start with ``//@``. Control
comments contain special commands that affect rendering process.
Example:
.. code-block:: antlr
//@ doc:no-diagram
module: moduleItem* EOF
- section comments are comments that start with ``///``. They're used to render text
between production rules and split grammar definition in sections.
Example:
.. code-block:: antlr
/// **Module definition**
///
/// This paragraph describes the ``Module definition``
/// section of the grammar.
module: moduleItem* EOF
moduleItem: import | symbol
.. versionadded:: 1.2.0
There are also restrictions on were documentation and control comments may
appear:
- documentation comments can be placed either at the beginning of the file,
before the ``grammar`` keyword (in which case they document the whole
grammar), or they can be found right before a production rule or a fragment
declaration (in which case they are rendered as a rule description).
Also, they can be embedded into the rule description, in which case they
are rendered as part of the railroad diagram;
- control comments can only be placed before a production rule declaration.
They only affect rendering of that specific production rule;
- multiple documentation and control comments can appear before a rule. In this
case, the first documentation comment will be rendered before automatically
generated railroad diagram, all sequential documentation comments will
be rendered after it, and all control comments will be applied before
rendering documentation comments;
- section comments can only be placed between rules in the main section
of a file.
.. _control_comments:
Control comments
~~~~~~~~~~~~~~~~
The list of control comments includes:
- ``//@ doc:nodoc`` -- exclude this rule from ``autogrammar`` output.
- ``//@ doc:inline`` -- exclude this rule from ``autogrammar`` output; any
automatically generated railroad diagram that refer this rule will
include its contents instead of a single node.
Useful for fragments and simple lexer rules.
For example
.. code-block:: antlr
NUMBER
: '-'? ('0' | [1-9] [0-9]*) ('.' [0-9]+)? EXPONENT?
;
//@ doc:inline
fragment EXPONENT
: ('e' | 'E')? ('+' | '-')? [0-9]+
;
will produce the :a4:r:`Json.NUMBER` rule (note how exponent is rendered
inside of the number diagram).
- ``//@ doc:no-diagram`` -- do not generate railroad diagram.
- ``//@ doc:importance `` -- controls the 'importance' of a rule.
By default, all rules have importance of ``1``.
Rules with importance of ``0`` will be rendered off the main line in optional
groups:
.. parser-rule-diagram:: R1? R0?;
//@ doc:name Rule with importance 0
//@ doc:importance 0
R0 : EOF;
//@ doc:name Rule with importance 1
//@ doc:importance 1
R1 : EOF
In alternative groups, rule with the highest importance will be centered:
.. parser-rule-diagram:: (R0 | R1) (R2 | R1);
//@ doc:name Rule with importance 0
//@ doc:importance 0
R0 : EOF;
//@ doc:name Rule with importance 1
//@ doc:importance 1
R1 : EOF;
//@ doc:name Rule with importance 2
//@ doc:importance 2
R2 : EOF
- ``//@ doc:unimportant`` -- set importance to ``0``.
- ``//@ doc:name `` -- set a human-readable name for this rule.
See :rst:opt:`a4:rule:name` option.
- ``//@ doc:css-class`` -- add a custom CSS class to all diagrams
referencing this rule.
.. versionadded:: 1.5.0
.. _config:
Configuration
-------------
.. _custom_style:
Customizing diagram style
~~~~~~~~~~~~~~~~~~~~~~~~~
To customize diagram style, one can replace
`the default css file `_
by placing a ``a4_railroad_diagram.css`` file to the ``_static`` directory.
.. versionadded:: 1.6.0
to customise how diagrams look in latex build,
place a ``a4_railroad_diagram_latex.css`` file to the ``_static`` directory.
.. . .. _custom_lookup:
.. . Customizing process of grammar files lookup
.. . ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Example output
--------------
This example was generated from
`Json.g4 `_.
.. a4:autogrammar:: ./Json
:only-reachable-from: value
Indices and tables
------------------
* :ref:`genindex`
* :ref:`search`
sphinx-a4doc-1.6.0/push_docs.py 0000775 0000000 0000000 00000002112 14351074657 0016373 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import contextlib
import os
import shutil
import tempfile
REPO = 'git@github.com:taminomara/sphinx-a4doc.git'
DOCS_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'docs'))
BUILD_PATH = os.path.join(DOCS_PATH, 'build', 'html')
@contextlib.contextmanager
def tmpdir():
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
@contextlib.contextmanager
def cd(path):
old_path = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_path)
def build_html():
with cd(DOCS_PATH):
os.system('make html')
def push_html():
with tmpdir() as tmp:
path = os.path.join(tmp, 'html')
shutil.copytree(BUILD_PATH, path)
with cd(path):
shutil.rmtree('.doctrees', ignore_errors=True)
os.system('git init')
os.system('git add .')
os.system('git commit -m "update docs"')
os.system('git push -f ' + REPO + ' main:gh-pages')
if __name__ == '__main__':
build_html()
push_html()
sphinx-a4doc-1.6.0/pyproject.toml 0000664 0000000 0000000 00000000133 14351074657 0016744 0 ustar 00root root 0000000 0000000 [build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
sphinx-a4doc-1.6.0/setup.cfg 0000664 0000000 0000000 00000002154 14351074657 0015656 0 ustar 00root root 0000000 0000000 [metadata]
name = sphinx-a4doc
description = Sphinx domain and autodoc for Antlr4 grammars
long_description = file: README.md
long_description_content_type = text/markdown
python_requires = '>=3.7'
author = Tamika Nomara
author_email = taminomara@gmail.com
url = https://github.com/taminomara/sphinx-a4doc
license = MIT
keywords = sphinx antlr4 autodoc
classifiers =
Development Status :: 5 - Production/Stable
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
License :: OSI Approved :: MIT License
Framework :: Sphinx
Framework :: Sphinx :: Extension
Topic :: Software Development :: Documentation
Topic :: Documentation
Topic :: Documentation :: Sphinx
[options]
zip_safe = False
include_package_data = True
install_requires =
sphinx>=1.8.0
antlr4-python3-runtime==4.7.1
PyYAML
svglib
setup_requires =
setuptools-scm>=3.5.0
setuptools>=42.0
wheel
pep517
packages = find:
[options.package_data]
sphinx_a4doc =
_static/a4_railroad_diagram.css
sphinx-a4doc-1.6.0/setup.py 0000664 0000000 0000000 00000000527 14351074657 0015551 0 ustar 00root root 0000000 0000000 from setuptools import setup
setup(
project_urls={
'Documentation': 'https://taminomara.github.io/sphinx-a4doc/',
'Source': 'https://github.com/taminomara/sphinx-a4doc',
'Tracker': 'https://github.com/taminomara/sphinx-a4doc/issues',
},
use_scm_version={
"local_scheme": "no-local-version"
}
)
sphinx-a4doc-1.6.0/sphinx_a4doc/ 0000775 0000000 0000000 00000000000 14351074657 0016416 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/__init__.py 0000664 0000000 0000000 00000003203 14351074657 0020525 0 ustar 00root root 0000000 0000000 import os
import sphinx.application
from sphinx_a4doc.domain import A4Domain
from sphinx_a4doc.diagram_directive import RailroadDiagramNode, RailroadDiagram, LexerRuleDiagram, ParserRuleDiagram
from sphinx_a4doc.settings import register_settings
from sphinx_a4doc.autodoc_directive import AutoGrammar, AutoRule
def config_inited(app, config):
static_path = os.path.join(os.path.dirname(__file__), '_static')
config.html_static_path.append(static_path)
def setup(app: sphinx.application.Sphinx):
app.setup_extension('sphinx_a4doc.contrib.marker_nodes')
app.add_domain(A4Domain)
app.add_node(RailroadDiagramNode,
text=(RailroadDiagramNode.visit_node_text,
RailroadDiagramNode.depart_node),
html=(RailroadDiagramNode.visit_node_html,
RailroadDiagramNode.depart_node),
latex=(RailroadDiagramNode.visit_node_latex,
RailroadDiagramNode.depart_node),
man=(RailroadDiagramNode.visit_node_man,
RailroadDiagramNode.depart_node))
app.add_directive('railroad-diagram', RailroadDiagram)
app.add_directive('lexer-rule-diagram', LexerRuleDiagram)
app.add_directive('parser-rule-diagram', ParserRuleDiagram)
app.add_directive_to_domain('a4', 'autogrammar', AutoGrammar)
app.add_directive_to_domain('a4', 'autorule', AutoRule)
register_settings(app)
app.add_css_file('a4_railroad_diagram.css')
app.connect('config-inited', config_inited)
return {
'version': '1.0.0',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
sphinx-a4doc-1.6.0/sphinx_a4doc/_static/ 0000775 0000000 0000000 00000000000 14351074657 0020044 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/_static/a4_railroad_diagram.css 0000664 0000000 0000000 00000001124 14351074657 0024421 0 ustar 00root root 0000000 0000000 .railroad-diagram path {
stroke-width: 1.5;
stroke: black;
fill: none;
}
.railroad-diagram text {
font-size: 14px;
font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
text-anchor: middle;
dominant-baseline: central;
font-weight: bold;
}
.railroad-diagram a {
text-decoration: none;
}
.railroad-diagram rect {
stroke-width: 1.5;
stroke: black;
fill: none;
}
.railroad-diagram g.comment text {
font-weight: normal;
font-style: italic;
}
.railroad-diagram g.comment rect {
stroke-width: 0;
}
sphinx-a4doc-1.6.0/sphinx_a4doc/_static/a4_railroad_diagram_latex.css 0000664 0000000 0000000 00000001050 14351074657 0025614 0 ustar 00root root 0000000 0000000 .railroad-diagram path {
stroke-width: 1.5;
stroke: black;
fill: none;
}
.railroad-diagram text {
font-size: 14px;
font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
text-anchor: middle;
dy: 4;
}
.railroad-diagram a {
text-decoration: none;
}
.railroad-diagram rect {
stroke-width: 1.5;
stroke: black;
fill: none;
}
.railroad-diagram g.comment text {
font-weight: normal;
font-style: italic;
}
.railroad-diagram g.comment rect {
stroke-width: 0;
}
sphinx-a4doc-1.6.0/sphinx_a4doc/autodoc_directive.py 0000664 0000000 0000000 00000044134 14351074657 0022472 0 ustar 00root root 0000000 0000000 import os
import dataclasses
import docutils.parsers.rst
import docutils.statemachine
import docutils.nodes
import sphinx.addnodes
import sphinx.util.docutils
import sphinx.util.nodes
from sphinx_a4doc.settings import GrammarType, OrderSettings, GroupingSettings, EndClass
from sphinx_a4doc.settings import global_namespace, autogrammar_namespace, autorule_namespace
from sphinx_a4doc.domain import Grammar, Rule
from sphinx_a4doc.diagram_directive import RailroadDiagramNode
from sphinx_a4doc.model.model import ModelCache, Model, RuleBase
from sphinx_a4doc.model.reachable_finder import find_reachable_rules
from sphinx_a4doc.model.model_renderer import Renderer, cc_to_dash
from sphinx_a4doc.contrib.marker_nodes import find_or_add_marker
from typing import *
class ModelLoaderMixin:
used_models: Optional[Set[Model]] = None
def load_model(self, name: str) -> Model:
# TODO: use grammar resolver
base_path = global_namespace.load_global_settings(self.env).base_path
if not name.endswith('.g4'):
name += '.g4'
name = os.path.normpath(os.path.expanduser(name))
path = os.path.join(base_path, name)
model = ModelCache.instance().from_file(path)
if self.used_models is None:
self.used_models = set()
self.used_models.add(model)
return model
def register_deps(self):
if self.used_models is None:
return
seen = set()
models = self.used_models.copy()
while models:
model = models.pop()
if model in seen:
continue
if not model.is_in_memory():
self.state.document.settings.record_dependencies.add(model.get_path())
models.update(model.get_imports())
seen.add(model)
class DocsRendererMixin:
def render_docs(self, path: str, docs: List[Tuple[int, str]], node, titles=False):
docs = docs or []
for line, doc in docs:
lines = doc.splitlines()
items = [(path, line + i - 1) for i in range(len(lines))]
content = docutils.statemachine.StringList(lines, items=items)
with sphinx.util.docutils.switch_source_input(self.state, content):
if titles:
sphinx.util.nodes.nested_parse_with_titles(self.state, content, node)
else:
self.state.nested_parse(content, 0, node)
class AutoGrammar(Grammar, ModelLoaderMixin, DocsRendererMixin):
"""
Autogrammar directive generates a grammar description from a ``.g4`` file.
Its only argument, ``name``, should contain path of the grammar file
relative to the ``a4_base_path``. File extension may be omitted.
.. TODO: reference to global settings
.. TODO: mention grammar resolver (once it's implemented).
Autogrammar will read a ``.g4`` file and extract grammar name (which will
be used for cross-referencing), grammar-level documentation comments,
set of production rules, their documentation and contents. It will then
generate railroad diagrams and render extracted information.
See more on how to write documentation comments and control look of the
automatically generated railroad diagrams in the ':ref:`grammar_comments`'
section.
Like :rst:dir:`autoclass` and other default autodoc directives,
``autogrammar`` can have contents on its own. These contents will
be merged with the automatically generated description.
Use :rst:dir:`docstring-marker` and :rst:dir:`members-marker` to control
merging process.
**Options:**
.. rst:option:: name
type
imports
noindex
diagram-*
Inherited from :rst:dir:`a4:grammar` directive.
If not given, :rst:opt:`:type: ` and
:rst:opt:`:imports: `
will be extracted from grammar file.
.. members-marker::
"""
required_arguments = 1
has_content = True
settings = autogrammar_namespace.for_directive()
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
self.root_rule: Optional[RuleBase] = None
def run(self):
self.name = 'a4:grammar'
if 'cc-to-dash' in self.options and 'diagram-cc-to-dash' not in self.options:
self.options['diagram-cc-to-dash'] = self.options['cc-to-dash']
# Load model from file
model = self.load_model(self.arguments[0])
# Early exit
if model.has_errors():
self.register_deps()
return [
self.state_machine.reporter.error(
'unable to document this grammar',
line=self.lineno
)
]
# Update settings from model
if 'imports' not in self.options:
self.options['imports'] = [
i.get_name() for i in model.get_imports() if i.get_name()
]
if 'type' not in self.options and model.get_type():
self.options['type'] = GrammarType[model.get_type().upper()]
self.arguments = [model.get_name()]
self.env.temp_data.setdefault('a4:autogrammar_ctx', []).append(model.get_path())
try:
# Create a skeleton of the grammar description
nodes = super(AutoGrammar, self).run()
# If user described some rules manually, we want that descriptions
# to replace ones obtained from the grammar file. We also want to
# remove all descriptions temporarily to rearrange them according
# to the `ordering` settings
desc_content, rule_nodes = self.cut_rule_descriptions(model, nodes)
# Set proper ref_context
self.before_content()
try:
# Find place where docstring should be rendered
doc_node = find_or_add_marker(desc_content, 'docstring')
# Render model docstring
self.render_docs(model.get_path(), model.get_model_docs(), doc_node)
# Insert docstring to the document
doc_node.replace_self(doc_node.children)
# Find place where to insert rule descriptions
rules_node = find_or_add_marker(desc_content, 'members')
# Arrange rules found in the grammar file and render them
last_section = None
for rule in self.make_order(model):
if (
self.settings.honor_sections and
self.settings.ordering is OrderSettings.BY_SOURCE and
last_section is not rule.section
):
last_section = rule.section
if last_section is not None:
self.render_docs(
rule.position.file,
last_section.docs,
rules_node,
True
)
# Manual description overrides autogenerated description
if rule.name in rule_nodes:
rules_node.append(rule_nodes.pop(rule.name))
else:
rules_node.extend(self.make_rule(rule))
# Add any rule that was described manually but that wasn't found
# in the grammar file
for rule in sorted(rule_nodes.values(), key=lambda x: x.line):
rules_node.append(rule)
# Insert rule descriptions to the document
rules_node.replace_self(rules_node.children)
finally:
self.after_content()
return nodes
finally:
self.env.temp_data['a4:autogrammar_ctx'].pop()
self.register_deps()
def cut_rule_descriptions(self, model, nodes):
desc_content = None
rule_nodes = {}
for node in nodes:
if not isinstance(node, sphinx.addnodes.desc):
continue
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
desc_content = content_node
break
else:
raise RuntimeError('no desc_content can be found')
for rule_node in node.traverse(
lambda x: (
isinstance(x, sphinx.addnodes.desc) and
x['domain'] == 'a4' and
x['objtype'] == 'rule'
)
):
sig = rule_node.next_node(sphinx.addnodes.desc_signature)
if sig is None:
continue
prefix = f'a4.{model.get_name()}.'
for ident in sig['ids']:
if ident.startswith(prefix):
rule_nodes[ident[len(prefix):]] = rule_node
rule_node.replace_self([])
break
assert desc_content is not None
return desc_content, rule_nodes
def make_order(self, model: Model) -> List[RuleBase]:
lexer_rules = []
if self.settings.lexer_rules:
lexer_rules = model.get_terminals()
if not self.settings.fragments:
lexer_rules = filter(lambda r: not r.is_fragment, lexer_rules)
if not self.settings.undocumented:
lexer_rules = filter(lambda r: r.documentation, lexer_rules)
lexer_rules = list(lexer_rules)
parser_rules = []
if self.settings.parser_rules:
parser_rules = model.get_non_terminals()
if not self.settings.undocumented:
parser_rules = filter(lambda r: r.documentation, parser_rules)
parser_rules = list(parser_rules)
precedence = {
OrderSettings.BY_SOURCE: lambda rule: rule.position,
OrderSettings.BY_NAME: lambda rule: rule.name.lower(),
}[self.settings.ordering]
if self.settings.grouping is GroupingSettings.MIXED:
all_rules = sorted(lexer_rules + parser_rules, key=precedence)
elif self.settings.grouping is GroupingSettings.LEXER_FIRST:
all_rules = sorted(lexer_rules, key=precedence) + sorted(parser_rules, key=precedence)
elif self.settings.grouping is GroupingSettings.PARSER_FIRST:
all_rules = sorted(parser_rules, key=precedence) + sorted(lexer_rules, key=precedence)
else:
raise RuntimeError('invalid grouping parameter')
if self.settings.only_reachable_from:
rule_name = self.settings.only_reachable_from
rule_model = model
if '.' in rule_name:
model_name, rule_name = rule_name.split('.', 1)
rule_model = self.load_model(model_name)
rule = rule_model.lookup(rule_name)
self.root_rule = rule
if rule is None:
return all_rules
reachable = find_reachable_rules(rule)
return [r for r in all_rules if r in reachable]
return all_rules
def make_rule(self, rule: RuleBase) -> List[docutils.nodes.Node]:
if rule.is_doxygen_nodoc or rule.is_doxygen_inline:
return [] # implicitly disabled
if not rule.documentation and rule.content is None:
return [] # nothing to document
options = {}
if 'noindex' in self.options:
options['noindex'] = None
if self.settings.cc_to_dash and not rule.display_name:
options['name'] = cc_to_dash(rule.name)
elif rule.display_name:
options['name'] = rule.display_name
rule_dir = Rule(
name='a4:rule',
arguments=[rule.name],
options=options,
content=docutils.statemachine.StringList(),
lineno=self.lineno,
content_offset=self.content_offset,
block_text=self.block_text,
state=self.state,
state_machine=self.state_machine
)
nodes = rule_dir.run()
for node in nodes:
if not isinstance(node, sphinx.addnodes.desc):
continue
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
desc_content = content_node
break
else:
raise RuntimeError('no desc_content can be found')
if rule.documentation:
self.render_docs(rule.position.file, rule.documentation[:1], desc_content)
docs = rule.documentation[1:]
else:
docs = rule.documentation
if not rule.is_doxygen_no_diagram:
env = self.env
grammar = env.ref_context.get('a4:grammar', '__default__')
renderer = Renderer(
self.diagram_settings.literal_rendering,
self.diagram_settings.cc_to_dash
)
dia = renderer.visit(rule.content)
settings = self.diagram_settings
if (
self.settings.mark_root_rule and
self.root_rule is not None and
rule.name == self.root_rule.name and
rule.model is self.root_rule.model
):
settings = dataclasses.replace(settings, end_class=EndClass.COMPLEX)
desc_content.append(
RailroadDiagramNode(
'', diagram=dia, options=settings, grammar=grammar
)
)
self.render_docs(rule.position.file, docs, desc_content)
break
return nodes
class AutoRule(Rule, ModelLoaderMixin, DocsRendererMixin):
"""
Autorule directive renders documentation for a single rule.
It accepts two arguments, first is a path to the grammar file relative
to the ``a4_base_path``, second is name of the rule that should
be documented.
Note that autorule can only be used when within a grammar definition.
Name of the current grammar definition must match name of the grammar
from which the documented rule is imported.
**Options:**
.. rst:option:: name
noindex
diagram-*
Inherited from :rst:dir:`a4:rule` directive.
.. members-marker::
"""
settings = autorule_namespace.for_directive()
required_arguments = 1
optional_arguments = 2
has_content = True
def run(self):
self.name = 'a4:rule'
if len(self.arguments) == 2:
path, rule_name = self.arguments
else:
rule_name = self.arguments[0]
if self.env.temp_data.get('a4:autogrammar_ctx'):
path = self.env.temp_data['a4:autogrammar_ctx'][-1]
elif 'a4:grammar' in self.env.ref_context:
path = self.env.ref_context['a4:grammar']
else:
return [
self.state_machine.reporter.error(
'could not figure out grammar path for autorule directive',
line=self.lineno
)
]
model = self.load_model(path)
if model.has_errors():
self.register_deps()
return [
self.state_machine.reporter.error(
'unable to document this rule',
line=self.lineno
)
]
if self.env.ref_context.get('a4:grammar') != model.get_name():
return [
self.state_machine.reporter.error(
f'cannot only use autorule while within a proper '
f'grammar definition',
line=self.lineno
)
]
rule = model.lookup(rule_name)
if rule is None:
self.register_deps()
return [
self.state_machine.reporter.error(
f'unknown rule {rule_name!r}',
line=self.lineno
)
]
if rule.display_name and 'name' not in self.options:
self.options['name'] = rule.display_name
self.arguments = [rule.name]
self.env.temp_data.setdefault('a4:autogrammar_ctx', []).append(model.get_path())
try:
nodes = super(AutoRule, self).run()
desc_content = self.find_desc_content(nodes)
self.before_content()
try:
doc_node = find_or_add_marker(desc_content, 'docstring')
if rule.documentation:
self.render_docs(rule.position.file, rule.documentation[:1], doc_node)
docs = rule.documentation[1:]
else:
docs = rule.documentation
if not rule.is_doxygen_no_diagram:
env = self.env
grammar = env.ref_context.get('a4:grammar', '__default__')
renderer = Renderer(
self.diagram_settings.literal_rendering,
self.diagram_settings.cc_to_dash
)
dia = renderer.visit(rule.content)
settings = self.diagram_settings
doc_node.append(
RailroadDiagramNode(
'', diagram=dia, options=settings, grammar=grammar
)
)
self.render_docs(rule.position.file, docs, doc_node)
doc_node.replace_self(doc_node.children)
finally:
self.after_content()
return nodes
finally:
self.env.temp_data['a4:autogrammar_ctx'].pop()
self.register_deps()
def find_desc_content(self, nodes):
for node in nodes:
if not isinstance(node, sphinx.addnodes.desc):
continue
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
return content_node
break
raise RuntimeError('no desc_content can be found')
sphinx-a4doc-1.6.0/sphinx_a4doc/contrib/ 0000775 0000000 0000000 00000000000 14351074657 0020056 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/contrib/__init__.py 0000664 0000000 0000000 00000000000 14351074657 0022155 0 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/contrib/configurator.py 0000664 0000000 0000000 00000050771 14351074657 0023144 0 ustar 00root root 0000000 0000000 import dataclasses
import enum
import sphinx.application
import sphinx.environment
from typing import *
T = TypeVar('T')
class Converter:
"""
Converters are used to parse and validate directive and global options.
They are used as a more powerful substitute for helper functions declared
in `rst.directives`.
"""
def from_str(self, value: str):
"""
Parses string and returns a value.
Invoked when parsing directive arguments.
"""
raise NotImplementedError
def from_any(self, value: Any):
"""
Validate (and probably convert) object of any type.
Intended to validate values loaded from conf.py,
but currently not in use.
"""
raise NotImplementedError
def __call__(self, value: str):
"""
Calls `from_str()`.
With this method present, converters can be used in ``option_spec``.
"""
return self.from_str(value)
def __str__(self):
"""
String representation used as a value description in rst autodoc.
"""
return '...'
class StrConverter(Converter):
"""
Generic converter for stings.
"""
def __init__(self, min_len=0, max_len=None, regex=None):
"""
:param min_len: if given, checks that string is at least this long.
:param max_len: if given, checks that string is at most this long.
:param regex: if given, string will be matched against this regular
expression via `re.match`.
"""
self.min_len = min_len
self.max_len = max_len
self.regex = regex
def from_str(self, value: str):
value = value.strip()
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, str):
raise ValueError(f'expected string, got {type(value)}')
if self.min_len is not None and len(value) < self.min_len:
raise ValueError(f'should be at least {self.min_len} symbols long')
if self.max_len is not None and len(value) > self.max_len:
raise ValueError(f'should be at most {self.min_len} symbols long')
if self.regex is not None:
import re
if re.match(self.regex, value) is None:
raise ValueError(f'should match regex "{self.regex}"')
return value
def __str__(self):
return ''
class IntConverter(Converter):
"""
Generic converter for ints.
"""
def __init__(self, min_val=None, max_val=None):
"""
:param min_val: if given, checks that int is no less than this value.
:param max_val: if given, checks that int is no greater than this value.
"""
self.min_val = min_val
self.max_val = max_val
def from_str(self, value: str):
try:
value = int(value)
except ValueError:
raise ValueError('should be an integer')
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, int):
raise ValueError(f'expected int, got {type(value)}')
if self.min_val is not None and value < self.min_val:
if self.min_val == 1:
raise ValueError(f'should be positive')
if self.min_val == 0:
raise ValueError(f'should not be negative')
raise ValueError(f'should be no less than {self.min_val}')
if self.max_val is not None and value > self.max_val:
if self.max_val == -1:
raise ValueError(f'should be negative')
if self.max_val == 0:
raise ValueError(f'should not be positive')
raise ValueError(f'should be no greater than {self.min_val}')
return value
def __str__(self):
return ''
class FloatConverter(Converter):
"""
Generic converter for floats.
"""
def __init__(self, min_val=None, max_val=None):
"""
:param min_val: if given, checks that int is no less than this value.
:param max_val: if given, checks that int is no greater than this value.
"""
self.min_val = min_val
self.max_val = max_val
def from_str(self, value: str):
try:
value = float(value)
except ValueError:
raise ValueError('should be a float')
return self.from_any(value)
def from_any(self, value: Any):
if not isinstance(value, (float, int)):
raise ValueError(f'expected float, got {type(value)}')
value = float(value)
if self.min_val is not None and value < self.min_val:
if self.min_val == 0:
raise ValueError(f'should not be negative')
raise ValueError(f'should be no less than {self.min_val}')
if self.max_val is not None and value > self.max_val:
if self.max_val == 0:
raise ValueError(f'should not be positive')
raise ValueError(f'should be no greater than {self.min_val}')
return value
def __str__(self):
return ''
class ListConverter(Converter):
"""
Parses space- or comma-separated lists, similar to `positive_int_list`.
"""
def __init__(self, u: Converter, min_len=0, max_len=None):
"""
:param u: nested converter which will be used to parse list elements.
:param min_len: if given, checks that list is at least this long.
:param max_len: if given, checks that list is at most this long.
"""
self.u = u
self.min_len = min_len
self.max_len = max_len
def from_str(self, value: str):
if ',' in value:
value = value.split(',')
else:
value = value.split()
self.check_len(value)
result = []
for i, v in enumerate(value):
result.append(self.u.from_str(v))
return result
def from_any(self, value: Any):
if not isinstance(value, (list, tuple)):
raise ValueError(f'expected list, got {type(value)}')
self.check_len(value)
result = []
for i, v in enumerate(value):
result.append(self.u.from_any(v))
return result
def check_len(self, value):
if self.min_len is not None and len(value) < self.min_len:
raise ValueError(f'should be at least {self.min_len} elements long')
if self.max_len is not None and len(value) > self.max_len:
raise ValueError(f'should be at most {self.min_len} elements long')
def __str__(self):
return f'{self.u}[, {self.u}[, ...]]'
class TupleConverter(Converter):
"""
Parses space- or comma-separated tuples.
"""
def __init__(self, *u: Converter):
"""
:param u: nested converters. Each tuple element will be parsed with the
corresponding converter.
"""
self.u = u
def from_str(self, value: str):
if ',' in value:
value = value.split(',')
else:
value = value.split()
self.check_len(value)
result = []
for i, (v, u) in enumerate(zip(value, self.u)):
result.append(u.from_str(v))
return result
def from_any(self, value: Any):
if not isinstance(value, (list, tuple)):
raise ValueError(f'expected tuple, got {type(value)}')
self.check_len(value)
result = []
for i, (v, u) in enumerate(zip(value, self.u)):
result.append(u.from_any(v))
return result
def check_len(self, value):
if len(value) != len(self.u):
raise ValueError(f'should contain exactly {len(self.u)} items')
def __str__(self):
return ', '.join(map(str, self.u))
class EnumConverter(Converter):
"""
Parses enums.
"""
def __init__(self, cls: Type[enum.Enum]):
"""
:param cls: enum class (from the standard python `enum` module).
"""
self.cls = cls
def from_str(self, value: str):
value_orig = value
value = value.strip().upper().replace('-', '_')
try:
return self.cls[value]
except KeyError:
items = ', '.join([repr(x.name) for x in self.cls])
raise ValueError(f'expected one of [{items}], got {value_orig!r} instead')
def from_any(self, value: Any):
if not isinstance(value, self.cls):
raise ValueError(f'expected {self.cls.__name__}, got {type(value)}')
return value
def __str__(self):
return '|'.join(map(lambda x: x.name.lower().replace('_', '-'), self.cls))
class BoolConverter(Converter):
"""
Converts ``'on'``, ``'off'``, ``'true'``, ``'false'`` strings.
"""
def from_str(self, value: str):
value = value.strip().lower()
if value in ['on', 'yes', 'true']:
return True
elif value in ['off', 'no', 'false']:
return False
else:
raise ValueError(f'expected one of [\'on\', \'yes\', \'true\', '
f'\'off\', \'no\', \'false\'], '
f'got {value!r} instead')
def from_any(self, value: Any):
if not isinstance(value, bool):
raise ValueError(f'expected bool, got {type(value)}')
return value
def __str__(self):
return 'True|False'
class FlagConverter(Converter):
"""
Converts empty strings to ``True``.
"""
def from_str(self, value: str):
if value:
raise ValueError('value is not expected')
return True
def from_any(self, value: Any):
if not isinstance(value, bool):
raise ValueError(f'expected bool, got {type(value)}')
return value
def __str__(self):
return ''
def make_converter(tp) -> Converter:
if tp is str:
return StrConverter()
elif tp is bool:
return BoolConverter()
elif tp is int:
return IntConverter()
elif tp is float:
return FloatConverter()
elif tp is list:
return ListConverter(StrConverter())
elif getattr(tp, '__origin__', None) is list:
return ListConverter(make_converter(tp.__args__[0]))
elif getattr(tp, '__origin__', None) is tuple:
if ... in tp.__args__:
raise TypeError('variadic tuples are not supported')
return TupleConverter(*[make_converter(a) for a in tp.__args__])
elif getattr(tp, '__origin__', None) is Union:
if len(tp.__args__) != 2 or type(None) not in tp.__args__:
raise TypeError('unions are not supported (optionals are, though)')
if tp.__args__[0] is type(None):
return make_converter(tp.__args__[1])
else:
return make_converter(tp.__args__[0])
elif isinstance(tp, type) and issubclass(tp, enum.Enum):
return EnumConverter(tp)
else:
raise TypeError(f'unsupported type {tp}')
def make_option_spec(cls):
options = {}
for field in dataclasses.fields(cls): # type: dataclasses.Field
name = field.name.replace('_', '-')
if 'converter' in field.metadata:
converter = field.metadata['converter']
elif field.type is bool:
converter = FlagConverter()
options['no-' + name] = FlagConverter()
else:
converter = make_converter(field.type)
options[name] = converter
return options
def _parse_options(cls, options, prefix=''):
result = {}
if prefix:
prefix += '-'
for field in dataclasses.fields(cls): # type: dataclasses.Field
name = prefix + field.name.replace('_', '-')
if name not in options and field.type is not bool:
continue
if field.type is bool:
if name in options:
result[field.name] = True
elif 'no-' + name in options:
result[field.name] = False
else:
result[field.name] = options[name]
return result
class NamespaceHolder:
def __init__(self, namespace: 'Namespace', prefix: str):
self.namespace = namespace
self.prefix = prefix
class Namespace(Generic[T]):
_cls: Type[T] = None
_prefix: str = None
_loaded: Optional[T] = None
def __init__(self, global_prefix: str, cls: Type[T]):
"""
:param global_prefix: prefix to be used when adding options
to ``conf.py`` and to the build environment. The prefix should be
unique across all namespaces registered in all loaded plugins so
it's best to use plugin name or domain name as a prefix.
:param cls: dataclass that contains the settings.
"""
self._prefix = global_prefix
self._cls = cls
def fields(self) -> Iterator[dataclasses.Field]:
return dataclasses.fields(self._cls)
def no_global_fields(self) -> Iterator[dataclasses.Field]:
fields = self.fields()
return filter(lambda f: not f.metadata.get('no_global', False), fields)
def get_cls(self):
return self._cls
def make_option_spec(self, prefix: str = '') -> Dict[str, Converter]:
"""
Creates ``option_spec`` for use in rst directives.
For each boolean options this function will add a corresponding ``no-``
option.
:param prefix: if given, each option name will be prefixed. This is
useful to add settings that are not directly used by the directive
but instead used to override default settings for nested directives
via `push_settings()`.
:return: dict with option names as keys and converters as values.
"""
option_spec = make_option_spec(self._cls)
if prefix:
prefix += '-'
return {prefix + k: v for k, v in option_spec.items()}
else:
return option_spec
def register_settings(self, app: sphinx.application.Sphinx):
"""
Registers settings so that they can be loaded from ``conf.py``.
:param app: current sphinx application.
"""
prefix = self._prefix
if prefix:
prefix += '_'
for field in self.no_global_fields():
default = field.default
if field.default_factory is not dataclasses.MISSING:
default = self._make_default_factory(field.default_factory)
if default is dataclasses.MISSING:
default = None
rebuild = field.metadata.get('rebuild', False)
app.add_config_value(prefix + field.name, default, rebuild)
@staticmethod
def _make_default_factory(default_factory):
def factory(_):
return default_factory()
return factory
def load_global_settings(self, env: sphinx.environment.BuildEnvironment) -> T:
"""
Loads settings from ``conf.py``.
:param env: current build environment.
"""
prefix = self._prefix
if prefix:
prefix += '_'
if self._loaded is None:
options = {}
for field in self.no_global_fields():
options[field.name] = env.config[prefix + field.name]
self._loaded = self._cls(**options)
return self._loaded
def load_settings(self, env: sphinx.environment.BuildEnvironment) -> T:
"""
Loads settings local to the currently processed directive.
If settings stack is not empty, loads last pushed settings, otherwise
loads global settings.
See `push_settings()` and `pop_settings()`.
:param env: current build environment.
"""
stack = self._get_stack(env)
if not stack:
return self.load_global_settings(env)
else:
return stack[-1]
def push_settings(self, env: sphinx.environment.BuildEnvironment, s: T):
"""
Pushes settings to the local stack.
All calls to `load_settings()` will return settings passed to this
function unless new portion of settings is pushed or this settings
are popped from the stack.
This function is intended to be called from `before_content()`
to redefine default settings for nested directives.
See `load_settings()` and `pop_settings()`.
:param env: current build environment.
:param s: new settings.
"""
stack = self._get_stack(env)
stack.append(s)
def pop_settings(self, env: sphinx.environment.BuildEnvironment):
"""
Pops settings from the local stack.
This function is intended to be called from `after_content` to undo
all changes made by calling `push_settings()` from `before_content()`.
See `load_settings()` and `push_settings()`.
:param env: current build environment.
"""
stack = self._get_stack(env)
stack.pop()
def load_from_options(self, options: dict,
env: sphinx.environment.BuildEnvironment,
prefix: str = '') -> T:
"""
Load settings from parsed options and merge them with local settings.
Ignores every option that's not used by this namespace. One can add
options from multiple namespaces as long as all options have unique
names.
Honors ``no-`` options added by `make_option_spec()`.
:param options: parsed directive options.
:param env: current build environment.
:param prefix: prefix that was used in `make_option_spec()`.
:return: parsed settings.
"""
options = _parse_options(self._cls, options, prefix)
local_options = self.load_settings(env)
return dataclasses.replace(local_options, **options)
def _get_stack(self, env: sphinx.environment.BuildEnvironment):
namespaces = env.temp_data.setdefault('configurator_namespaces', {})
return namespaces.setdefault(self._prefix, [])
def for_directive(self, prefix='') -> T:
return NamespaceHolder(self, prefix)
class ManagedDirectiveType(type):
def __new__(mcs, name, bases, members):
option_spec = {}
namespace_attrs: Dict[str, NamespaceHolder] = {}
for base in bases:
new_namespace_attrs: Set[NamespaceHolder] = getattr(base, '_namespace_attrs_', {}) or {}
for new_ns in new_namespace_attrs:
if new_ns.prefix in namespace_attrs:
ns = namespace_attrs[new_ns.prefix]
raise TypeError(
f'cannot combine namespace '
f'{new_ns.namespace.get_cls()} and '
f'{ns.namespace.get_cls()}'
)
namespace_attrs[new_ns.prefix] = new_ns
option_spec.update(getattr(base, 'option_spec', {}) or {})
option_spec.update(members.get('option_spec', {}))
for name, member in list(members.items()):
if isinstance(member, NamespaceHolder):
new_ns = member.namespace
if member.prefix in namespace_attrs:
ns = namespace_attrs[member.prefix].namespace
if not issubclass(new_ns.__class__, ns.__class__):
raise TypeError(
f'cannot override namespace {ns} with '
f'namespace {new_ns}: the later must be a subclass '
f'of the former'
)
namespace_attrs[member.prefix] = member
members[name] = mcs._make_settings_getter(
'_configurator_cache_' + name,
member.namespace,
member.prefix
)
option_spec.update(new_ns.make_option_spec(member.prefix))
members['option_spec'] = option_spec
members['_namespace_attrs_'] = set(namespace_attrs.values())
return super(ManagedDirectiveType, mcs).__new__(mcs, name, bases, members)
@staticmethod
def _make_settings_getter(name, namespace, prefix):
@property
def settings_getter(self):
if not hasattr(self, name):
settings = namespace.load_from_options(
self.options,
self.state.document.settings.env,
prefix
)
setattr(self, name, settings)
return getattr(self, name)
return settings_getter
class ManagedDirective(metaclass=ManagedDirectiveType):
def push_settings(self, namespace: Namespace[T], value: T):
namespace.push_settings(self.state.document.settings.env, value)
def pop_settings(self, namespace: Namespace):
namespace.pop_settings(self.state.document.settings.env)
sphinx-a4doc-1.6.0/sphinx_a4doc/contrib/marker_nodes.py 0000664 0000000 0000000 00000004216 14351074657 0023104 0 ustar 00root root 0000000 0000000 import docutils.nodes
import sphinx.util.docutils
import sphinx.application
class MarkerNode(docutils.nodes.Element):
pass
class DocstringMarker(sphinx.util.docutils.SphinxDirective):
"""
This marker allows customizing where grammar docstring will be rendered.
By default, grammar docstring (i.e., the comment at the very top of
a grammar file) will be added to the end of the autogrammar directive.
However, if there is a docstring marker present, grammar docstring
will be rendered on its place.
**Example:**
.. code-block:: rst
.. a4:autogrammar:: Json
(1) This is the description of the grammar.
.. docstring-marker::
(2) This is the continuation of the description.
In this case, the grammar docstring will be rendered
between ``(1)`` and ``(2)``.
"""
def run(self):
return [MarkerNode(marker='docstring')]
class MembersMarker(sphinx.util.docutils.SphinxDirective):
"""
This marker allows customizing where rule descriptions will be rendered.
See :rst:dir:`docstring-marker`.
"""
def run(self):
return [MarkerNode(marker='members')]
def find_marker(nodes, marker: str):
"""
Find a marker node with the given mark.
"""
for node in nodes:
if isinstance(node, MarkerNode) and node['marker'] == marker:
return node
return None
def find_or_add_marker(nodes, marker: str):
"""
Find a marker node with the given mark or insert one if not found.
"""
node = find_marker(nodes, marker)
if node is None:
node = MarkerNode(marker=marker)
nodes += node
return node
def remove_marker_nodes(app, doctree, fromdocname):
for node in doctree.traverse(MarkerNode):
node.parent.remove(node)
def setup(app: sphinx.application.Sphinx):
app.add_node(MarkerNode)
app.add_directive('docstring-marker', DocstringMarker)
app.add_directive('members-marker', MembersMarker)
app.connect('doctree-resolved', remove_marker_nodes)
return {
'version': '1.0.0',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
sphinx-a4doc-1.6.0/sphinx_a4doc/contrib/railroad_diagrams.py 0000664 0000000 0000000 00000131572 14351074657 0024105 0 ustar 00root root 0000000 0000000 import re
import io
import math
from dataclasses import dataclass, field
from sphinx_a4doc.settings import DiagramSettings, InternalAlignment, EndClass
from typing import *
try:
from typing.io import TextIO
except ImportError:
from typing import TextIO
__all__ = [
'Diagram',
'HrefResolver',
]
T = TypeVar('T')
ESCAPE_RE = re.compile(r"[*_`\[\]<&]", re.UNICODE)
def e(text):
return ESCAPE_RE.sub(lambda c: f'{ord(c[0])};', str(text))
def group_by_subsequences(items: Iterable['DiagramItem'], linebreaks: Iterable[bool]):
subsequences: List[Tuple[int, List['DiagramItem']]] = []
subsequence = []
width = 0
for item, linebreak in zip(items, linebreaks):
subsequence.append(item)
width += item.width
if linebreak:
subsequences.append((width, subsequence))
subsequence = []
width = 0
if subsequence:
subsequences.append((width, subsequence))
return subsequences
def wrap_subsequence(items: Iterable['DiagramItem'], max_width: int):
new_items = []
sequence = []
width = 0
for item in items:
if width + item.width > max_width:
if sequence:
new_items.append(sequence)
width = item.width
sequence = [item]
else:
width += item.width
sequence.append(item)
if sequence:
new_items.append(sequence)
return new_items
def ensure_type(name, x, *types):
if not isinstance(x, types):
types_str = ', '.join([t.__name__ for t in types])
raise ValueError(f'{name} should be {types_str}, '
f'got {type(x)} ({x!r}) instead')
def ensure_empty_dict(name, x):
if x:
keys = ', '.join(x.keys())
raise ValueError(f'{name} got unexpected parameters: {keys}')
class HrefResolver:
def resolve(self, text: str, href: Optional[str], title_is_weak: bool):
return text, href
@dataclass
class Diagram:
settings: DiagramSettings = field(default_factory=DiagramSettings)
"""
Settings used to render a diagram.
"""
href_resolver: HrefResolver = field(default_factory=HrefResolver)
"""
Class that manages adding hrefs to diagram nodes.
"""
def element(self, name: str, **kwargs) -> 'Element':
return Element(self, name, **kwargs)
def path(self, x: int, y: int) -> 'Path':
return Path(self, x, y)
def sequence(self, *items: 'DiagramItem',
autowrap: bool=False,
linebreaks: Iterable[bool]=None) -> 'DiagramItem':
linebreaks = linebreaks or [False] * len(items)
assert len(items) == len(linebreaks)
seq = Sequence(self, list(items))
if autowrap and seq.width > self.settings.max_width:
subsequences = group_by_subsequences(items, linebreaks)
new_items = []
sequence = []
width = 0
for ss_width, ss in subsequences:
if width + ss_width > self.settings.max_width:
if sequence:
new_items.append(self.sequence(*sequence))
if ss_width > self.settings.max_width:
ssss = wrap_subsequence(ss, self.settings.max_width)
else:
ssss = [ss]
for sss in ssss:
new_items.append(self.sequence(*sss))
width = 0
sequence = []
else:
sequence.extend(ss)
width += ss_width
if sequence:
new_items.append(self.sequence(*sequence))
return self.stack(*new_items)
return seq
def stack(self, *items: 'DiagramItem') -> 'DiagramItem':
return Stack(self, list(items))
def choice(self, *items: 'DiagramItem', default: int = 0):
return Choice(self, default, list(items))
def optional(self, item: 'DiagramItem', skip: bool = False) -> 'DiagramItem':
return self.choice(self.skip(), item, default=0 if skip else 1)
def one_or_more(self, item: 'DiagramItem', repeat: Optional['DiagramItem'] = None) -> 'DiagramItem':
return OneOrMore(self, item, repeat)
def zero_or_more(self, item: 'DiagramItem', repeat: Optional['DiagramItem'] = None) -> 'DiagramItem':
return self.optional(self.one_or_more(item, repeat))
def start(self) -> 'DiagramItem':
return Start(self)
def end(self) -> 'DiagramItem':
return End(self)
def node(self, text: str, href: Optional[str] = None, css_class: str = '', radius: int = 0, padding: int = 20, resolve: bool = False, title_is_weak: bool = False) -> 'DiagramItem':
return Node(self, text, href, css_class, radius, padding, resolve, title_is_weak)
def terminal(self, text: str, href: Optional[str] = None, css_class: str = '', resolve: bool = True, title_is_weak: bool = False):
return self.node(text, href, 'node terminal ' + css_class, 10, 20, resolve, title_is_weak)
def non_terminal(self, text: str, href: Optional[str] = None, css_class: str = '', resolve: bool = True, title_is_weak: bool = False):
return self.node(text, href, 'node non-terminal ' + css_class, 0, 20, resolve, title_is_weak)
def comment(self, text: str, href: Optional[str] = None):
return self.node(text, href, 'node comment', 0, 5)
def literal(self, text: str):
return self.node(text, None, 'node literal', 10, 20)
def range(self, text: str):
return self.node(text, None, 'node range', 10, 20)
def charset(self, text: str):
return self.node(text, None, 'node charset', 10, 20)
def wildcard(self, text: str):
return self.node(text, None, 'node wildcard', 10, 20)
def negation(self, text: str):
return self.node(text, None, 'node negation', 10, 20)
def skip(self) -> 'DiagramItem':
return Skip(self)
def load(self, structure) -> 'DiagramItem':
"""
Loa diagram from object (usually a parsed yaml/json).
"""
if structure is None:
return self.skip()
elif isinstance(structure, str):
return self._load_terminal(structure, {})
elif isinstance(structure, list):
return self._load_sequence(structure, {})
elif isinstance(structure, dict):
ctors = {
'sequence': self._load_sequence,
'stack': self._load_stack,
'choice': self._load_choice,
'optional': self._load_optional,
'one_or_more': self._load_one_or_more,
'zero_or_more': self._load_zero_or_more,
'node': self._load_node,
'terminal': self._load_terminal,
'non_terminal': self._load_non_terminal,
'comment': self._load_comment,
'literal': self._load_literal,
'range': self._load_range,
'charset': self._load_charset,
'wildcard': self._load_wildcard,
'negation': self._load_negation,
}
ctors_found = []
for name in structure:
if name in ctors:
ctors_found.append(name)
if len(ctors_found) != 1:
raise ValueError(f'cannot determine type for {structure!r}')
name = ctors_found[0]
structure = structure.copy()
arg = structure.pop(name)
return ctors[name](arg, structure)
else:
raise ValueError(f'diagram item description should be string, '
f'list or object, got {type(structure)} instead')
def _load_sequence(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.sequence, (list, tuple,), self._from_list,
{
'autowrap': ((bool, ), None ),
'linebreaks': ((list, tuple, ), None ),
}
)
def _load_stack(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.stack, (list, tuple,), self._from_list,
{
}
)
def _load_choice(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.choice, (list, tuple,), self._from_list,
{
'default': ((int, ), None ),
}
)
def _load_optional(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.optional, (str, dict, list, tuple), self._from_dict,
{
'skip': ((bool, ), None ),
}
)
def _load_one_or_more(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.one_or_more, (str, dict, list, tuple), self._from_dict,
{
'repeat': ((str, dict, list, tuple), self.load ),
}
)
def _load_zero_or_more(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.zero_or_more, (str, dict, list, tuple), self._from_dict,
{
'repeat': ((str, dict, list, tuple), self.load ),
}
)
def _load_node(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.node, (str,), lambda s: ([s], {}),
{
'href': ((str, ), None ),
'css_class': ((str, ), None ),
'radius': ((int, ), None ),
'padding': ((int, ), None ),
'resolve': ((bool, ), None ),
'title_is_weak': ((bool, ), None ),
}
)
def _load_terminal(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.terminal, (str,), lambda s: ([s], {}),
{
'href': ((str, ), None ),
'css_class': ((str, ), None ),
'resolve': ((bool, ), None ),
'title_is_weak': ((bool, ), None ),
}
)
def _load_non_terminal(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.non_terminal, (str,), lambda s: ([s], {}),
{
'href': ((str, ), None ),
'css_class': ((str, ), None ),
'resolve': ((bool, ), None ),
'title_is_weak': ((bool, ), None ),
}
)
def _load_comment(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.comment, (str,), lambda s: ([s], {}),
{
'href': ((str, ), None ),
}
)
def _load_literal(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.literal, (str,), lambda s: ([s], {}),
{
}
)
def _load_range(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.range, (str,), lambda s: ([s], {}),
{
}
)
def _load_charset(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.charset, (str,), lambda s: ([s], {}),
{
}
)
def _load_wildcard(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.wildcard, (str,), lambda s: ([s], {}),
{
}
)
def _load_negation(self, a, kw) -> 'DiagramItem':
return self._load_generic(
a, kw, self.negation, (str,), lambda s: ([s], {}),
{
}
)
def _load_skip(self, a, kw) -> 'DiagramItem':
return self.skip()
def _load_generic(self, user_a, user_kw, ctor, primary_type, primary_loader,
spec: Dict[str, Tuple[tuple, callable]]):
ensure_type(f'{ctor.__name__} content', user_a, *primary_type)
a, kw = primary_loader(user_a)
user_kw = user_kw.copy()
for name, (types, loader) in spec.items():
if name not in user_kw:
continue
arg = user_kw.pop(name)
if arg is None:
continue
ensure_type(f'{ctor.__name__}\'s parameter {name}', arg, *types)
if loader is not None:
arg = loader(arg)
kw[name] = arg
ensure_empty_dict(ctor.__name__, user_kw)
return ctor(*a, **kw)
def _from_list(self, x):
return [self.load(i) for i in x], {}
def _from_dict(self, x):
return [self.load(x)], {}
@overload
def render(self, root: 'DiagramItem', output: None = None, style=None) -> str: ...
@overload
def render(self, root: 'DiagramItem', output: TextIO, style=None) -> None: ...
def render(self, root, output=None, style=None):
root = self.sequence(
self.start(),
root,
self.end()
)
# Root reference point
x = self.settings.padding[3]
y = self.settings.padding[2] + root.up
# SVG dimensions
width = self.settings.padding[1] + self.settings.padding[3] + root.width
height = self.settings.padding[0] + self.settings.padding[2] + root.height + root.up + root.down
svg = self.element('svg')
svg.attrs['width'] = str(width)
svg.attrs['height'] = str(height)
svg.attrs['viewBox'] = f'0 0 {width} {height}'
svg.attrs['class'] = 'railroad-diagram'
svg = svg.format()
if style:
style_r = self.element('style').format()
style_r.children.append(style)
style_r.add_to(svg)
g = self.element('g')
if self.settings.translate_half_pixel:
g.attrs['transform'] = 'translate(.5 .5)'
g = g.format().add_to(svg)
root.format(x, y, root.width, False, self.settings.internal_alignment).add_to(g)
if output is None:
output = io.StringIO()
svg.write_svg(output)
output.seek(0)
return output.read()
else:
svg.write_svg(output)
def __repr__(self):
return super().__repr__()
@dataclass
class FormattedItem:
diagram_item: 'DiagramItem'
"""Node that this element is formatted from"""
children: List[Union['FormattedItem', str]] = field(default_factory=list)
"""Children SVG nodes"""
def add_to(self, parent: 'FormattedItem') -> 'FormattedItem':
parent.children.append(self)
return self
def write_svg(self, f: TextIO):
f.write(f'<{self.diagram_item.name}')
for name, value in sorted(self.diagram_item.attrs.items()):
f.write(f' {name}="{e(value)}"')
f.write(f' data-dbg-cls="{self.diagram_item.__class__.__name__}"'
f' data-dbg-w="{self.diagram_item.width}"')
f.write('>')
for child in self.children:
if isinstance(child, FormattedItem):
child.write_svg(f)
else:
f.write(e(child))
f.write(f'{self.diagram_item.name}>')
# TODO: make diagram items frozen
@dataclass
class DiagramItem:
diagram: Diagram
"""Diagram that this item is attached to"""
name: str
"""Name of SVG node"""
width: int = 0
"""Total width of the item"""
height: int = 0
"""Distance between the entry/exit lines"""
up: int = 0
"""Distance it projects above the entry line"""
down: int = 0
"""Distance it projects below the exit line"""
attrs: Dict[str, str] = field(default_factory=dict)
"""SVG node attributes"""
needs_space: bool = False
"""Add extra space around this element"""
@property
def settings(self) -> DiagramSettings:
return self.diagram.settings
@property
def href_resolver(self) -> HrefResolver:
return self.diagram.href_resolver
@property
def dia(self) -> Diagram:
return self.diagram
def format(self, x, y, width, reverse, alignment_override) -> FormattedItem:
"""
Prepare the component for rendering, populate children array.
- `x` and `y` determine the reference (top-left) point of the component.
- `width` determine total width available for rendering the component.
- `reverse` is true if the component should be mirrored along y axis.
For normal rendering (the reference point is marked with `#`)::
|<-----width----->|
+---------+ ---
| | up
--->#-------\ | --- < y
| /-----/ | height
| \-------|---> ---
| | down
+---------+ ---
^
x
For reverse rendering (the reference point is marked with `#`)::
|<-----width----->|
+---------+ ---
| | up
# /-------|<--- --- < y
| \-----\ | height
<---|-------/ | ---
| | down
+---------+ ---
^
x
"""
raise NotImplementedError()
def determine_gaps(self, outer, internal_alignment):
if internal_alignment == InternalAlignment.AUTO_LEFT:
internal_alignment = InternalAlignment.LEFT
elif internal_alignment == InternalAlignment.AUTO_RIGHT:
internal_alignment = InternalAlignment.RIGHT
diff = outer - self.width
if internal_alignment == InternalAlignment.LEFT:
return 0, diff
elif internal_alignment == InternalAlignment.RIGHT:
return diff, 0
else:
return math.floor(diff / 2), math.ceil(diff / 2)
def alignment_override_center(self):
if self.settings.internal_alignment == InternalAlignment.AUTO_RIGHT:
return InternalAlignment.CENTER
if self.settings.internal_alignment == InternalAlignment.AUTO_LEFT:
return InternalAlignment.CENTER
return self.settings.internal_alignment
def alignment_override_reverse(self, reverse):
if not reverse:
return self.settings.internal_alignment
if self.settings.internal_alignment == InternalAlignment.AUTO_RIGHT:
return InternalAlignment.AUTO_LEFT
if self.settings.internal_alignment == InternalAlignment.AUTO_LEFT:
return InternalAlignment.AUTO_RIGHT
return self.settings.internal_alignment
@dataclass
class Element(DiagramItem):
def format(self, *args, **kwargs):
return FormattedItem(self)
@dataclass
class Path(DiagramItem):
def __init__(self, dia: Diagram, x: int, y: int):
super().__init__(dia, 'path')
self.attrs = {'d': f'M{x} {y}'}
def m(self, x, y):
self.attrs['d'] += f'm{x} {y}'
return self
def h(self, val):
self.attrs['d'] += f'h{val}'
return self
def right(self, val):
return self.h(max(0, val))
def left(self, val):
return self.h(-max(0, val))
def v(self, val):
self.attrs['d'] += f'v{val}'
return self
def arc(self, sweep):
arc_radius = self.settings.arc_radius
x = arc_radius
y = arc_radius
if sweep[0] == 'e' or sweep[1] == 'w':
x *= -1
if sweep[0] == 's' or sweep[1] == 'n':
y *= -1
cw = 1 if sweep in ['ne', 'es', 'sw', 'wn'] else 0
self.attrs['d'] += f'a{arc_radius} {arc_radius} 0 0 {cw} {x} {y}'
return self
def format(self, *args, **kwargs):
return FormattedItem(self)
@dataclass
class Stack(DiagramItem):
items: List[DiagramItem] = None
skipped: Set[int] = None
def __init__(self, dia: Diagram, items: List[DiagramItem]):
super().__init__(dia, 'g')
if len(items) < 1:
items = [self.dia.skip()]
self.skipped = set()
for i in range(len(items)):
if not isinstance(items[i], Sequence):
items[i] = self.dia.sequence(items[i])
if len(items) > 1:
for i in range(1, len(items)):
item = items[i]
if isinstance(item, Sequence) and len(item.items) == 1:
item = item.items[0]
if (
isinstance(item, Choice) and
len(item.items) == 2 and
(
isinstance(item.items[0], Skip) or
isinstance(item.items[1], Skip)
)
):
self.skipped.add(i)
if isinstance(item.items[0], Skip):
items[i] = item.items[1]
else:
items[i] = item.items[0]
self.items = items
self.up = items[0].up
self.down = items[-1].down
last = len(self.items) - 1
vertical_separation = self.settings.vertical_separation
arc_radius = self.settings.arc_radius
for i, item in enumerate(items):
self.width = max(self.width, item.width)
self.height += item.height
if i < last:
self.height += max(arc_radius * 2,
item.down + 2 * vertical_separation)
self.height += max(arc_radius * 2,
items[i + 1].up + 2 * vertical_separation)
elif i in self.skipped:
# In this case, the end of the stack will look like:
#
# v
# | +-----------+
# \---># last-elem |->--\
# | +-----------+ |
# \---------------------\--->
#
self.down = 0
self.height += max(arc_radius, item.down + vertical_separation)
self.height += arc_radius
if self.settings.internal_alignment == InternalAlignment.CENTER:
self.width += arc_radius
elif self.width < item.width + arc_radius:
self.width += arc_radius
if len(self.items) > 1:
self.width += self.settings.arc_radius * 2
# Add a little bit of extra space on edges ...
self.width += self.settings.horizontal_separation
# ... and bottom of the diagram
self.down += vertical_separation
def format(self, x, y, width, reverse, alignment_override):
fmt = FormattedItem(self)
left_gap, right_gap = self.determine_gaps(width, alignment_override)
alignment_override = self.settings.internal_alignment
if alignment_override != InternalAlignment.CENTER:
if reverse:
alignment_override = InternalAlignment.RIGHT
else:
alignment_override = InternalAlignment.LEFT
# Input line y coordinate
y_in = y
# Output line y coordinate
y_out = y + self.height
if reverse:
y_in, y_out = y_out, y_in
self.dia.path(x, y_in) \
.h(left_gap) \
.format() \
.add_to(fmt)
self.dia.path(x + left_gap + self.width, y_out) \
.h(right_gap) \
.format() \
.add_to(fmt)
x += left_gap
if len(self.items) > 1:
self.dia.path(x, y_in) \
.h(self.settings.arc_radius) \
.format() \
.add_to(fmt)
self.dia.path(x + self.width, y_out) \
.h(-self.settings.arc_radius) \
.format() \
.add_to(fmt)
inner_width = self.width - self.settings.arc_radius * 2
x += self.settings.arc_radius
if len(self.items) - 1 in self.skipped:
if self.settings.internal_alignment == InternalAlignment.CENTER:
# When the last element is skipped and the stack
# is centered, it looks like this:
#
# +----+
# -----| E1 |----\
# +----+ |
# /--------------/
# | +--------+
# \--| E2 |--\
# | +--------+ |
# \--------------\---
#
# | |
# ^
# This extra bit of space is what we're removing from
# the inner width.
if reverse:
x += self.settings.arc_radius
inner_width -= self.settings.arc_radius
else:
inner_width = self.width
current_y = y
last = len(self.items) - 1
vertical_separation = self.settings.vertical_separation
arc_radius = self.settings.arc_radius
for i, item in enumerate(self.items):
if self.settings.internal_alignment == InternalAlignment.CENTER:
elem_width = inner_width
elif len(self.items) > 1:
elem_width = item.width + self.settings.horizontal_separation
else:
elem_width = item.width
if reverse:
x_of = x + inner_width - elem_width
else:
x_of = x
item.format(x_of, current_y, elem_width, reverse, alignment_override) \
.add_to(fmt)
if i < last:
current_y += item.height
y_1 = current_y
current_y += max(arc_radius * 2,
item.down + 2 * vertical_separation)
y_2 = current_y
current_y += max(arc_radius * 2,
self.items[i + 1].up + 2 * vertical_separation)
y_3 = current_y
if reverse:
if i in self.skipped:
self.dia.path(x_of + elem_width + arc_radius, y_1 - item.height - arc_radius) \
.v(item.height + y_3 - y_1) \
.format() \
.add_to(fmt)
self.dia.path(x_of, y_1) \
.arc('nw') \
.v(y_2 - y_1 - 2 * arc_radius) \
.arc('ws') \
.h(elem_width) \
.arc('ne') \
.v(y_3 - y_2 - 2 * arc_radius) \
.arc('es') \
.format() \
.add_to(fmt)
else:
if i in self.skipped:
self.dia.path(x_of - arc_radius, y_1 - item.height - arc_radius) \
.v(item.height + y_3 - y_1) \
.format() \
.add_to(fmt)
self.dia.path(x_of + elem_width, y_1) \
.arc('ne') \
.v(y_2 - y_1 - 2 * arc_radius) \
.arc('es') \
.h(-elem_width) \
.arc('nw') \
.v(y_3 - y_2 - 2 * arc_radius) \
.arc('ws') \
.format() \
.add_to(fmt)
else:
if reverse:
if i in self.skipped:
self.dia.path(x_of + elem_width + arc_radius, current_y - arc_radius) \
.v(y_in - current_y) \
.arc('es') \
.h(-elem_width - arc_radius) \
.format() \
.add_to(fmt)
self.dia.path(x_of, current_y + item.height) \
.arc('nw') \
.v(y_in - current_y - 2 * arc_radius - item.height) \
.arc('es') \
.format() \
.add_to(fmt)
self.dia.path(x, y_in) \
.h(x_of - x) \
.format() \
.add_to(fmt)
else:
if i in self.skipped:
self.dia.path(x - arc_radius, current_y - arc_radius) \
.v(y_out - current_y) \
.arc('ws') \
.h(elem_width + arc_radius) \
.format() \
.add_to(fmt)
self.dia.path(x + elem_width, current_y + item.height) \
.arc('ne') \
.v(y_out - current_y - 2 * arc_radius - item.height) \
.arc('ws') \
.format() \
.add_to(fmt)
self.dia.path(x + elem_width, y_out) \
.h(inner_width - elem_width) \
.format() \
.add_to(fmt)
return fmt
@dataclass
class Sequence(DiagramItem):
items: List[DiagramItem] = None
def __init__(self, dia: Diagram, items: List[DiagramItem]):
super().__init__(dia, 'g')
if len(items) < 1:
items = [self.dia.skip()]
self.items = []
for item in items:
if isinstance(item, Sequence):
self.items.extend(item.items)
else:
self.items.append(item)
# Calculate vertical dimensions for when we're rendered normally:
height = 0
up = 0
down = 0
for item in self.items:
up = max(up, item.up - height)
height += item.height
down = max(down - item.height, item.down)
# Calculate vertical dimensions for when we're rendered in reverse:
revheight = 0
revup = 0
revdown = 0
for item in self.items[::-1]:
revup = max(revup, item.up - revheight)
revheight += item.height
revdown = max(revdown - item.height, item.down)
# Set up vertical dimensions:
self.height = height
self.up = max(up, revup)
self.down = max(down, revdown)
# Calculate width:
for item in self.items:
self.width += item.width
if item.needs_space:
self.width += self.settings.horizontal_separation * 2
if self.items[0].needs_space:
self.width -= self.settings.horizontal_separation
if self.items[-1].needs_space:
self.width -= self.settings.horizontal_separation
self.width = math.ceil(self.width)
def format(self, x, y, width, reverse, alignment_override):
fmt = FormattedItem(self)
left_gap, right_gap = self.determine_gaps(width, alignment_override)
alignment_override = self.alignment_override_center()
# Input line y coordinate
y_in = y
# Output line y coordinate
y_out = y + self.height
if reverse:
y_in, y_out = y_out, y_in
self.dia.path(x, y_in) \
.h(left_gap) \
.format() \
.add_to(fmt)
self.dia.path(x + left_gap + self.width, y_out) \
.h(right_gap) \
.format() \
.add_to(fmt)
x += left_gap
current_x = x
current_y = y_in
for i, item in enumerate(self.items[::-1 if reverse else 1]):
if item.needs_space and i > 0:
self.dia.path(current_x, current_y) \
.h(self.settings.horizontal_separation) \
.format() \
.add_to(fmt)
current_x += self.settings.horizontal_separation
if reverse:
ref_x = current_x
ref_y = current_y - item.height
else:
ref_x = current_x
ref_y = current_y
item.format(ref_x, ref_y, item.width, reverse, alignment_override) \
.add_to(fmt)
current_x += item.width
if reverse:
current_y -= item.height
else:
current_y += item.height
if item.needs_space and i < len(self.items) - 1:
self.dia.path(current_x, current_y) \
.h(self.settings.horizontal_separation) \
.format() \
.add_to(fmt)
current_x += self.settings.horizontal_separation
return fmt
@dataclass
class Choice(DiagramItem):
def __init__(self, dia: Diagram, default: int, items: List[DiagramItem]):
assert default < len(items)
assert len(items) >= 1
super().__init__(dia, 'g')
self.default = default
self.items = items
self.width = max(item.width for item in self.items)
self.width += self.settings.arc_radius * 4
self.height = self.items[default].height
# +------+ - <- top border
# /-># 0 | -
# | | |->\ -
# | +------+ | -
# | |
# | +------+ | -
# /-># 1 | | -
# | | |->\ -
# | +------+ | -
# | |
# | +------+ | -
# ----+-># 2 | | - <- main line
# | | def |->+- -
# | +------+ | -
# | |
# | +------+ | -
# \-># 3 | | -
# | | |->/ -
# | +------+ | -
# | |
# | +------+ | -
# \-># 4 | | -
# | |->/ -
# +------+ -
self.up += self.items[0].up
# Reference points along y axis for each child, relative to top border
child_refs = []
for i, item in enumerate(self.items):
if i in [default - 1, default + 1]:
arcs = self.settings.arc_radius * 2
else:
arcs = self.settings.arc_radius
if i < default:
child_refs.append(self.up)
up = self.items[i + 1].up + self.settings.vertical_separation + item.down
up = max(arcs, up)
up += item.height
self.up += up
elif i == default:
child_refs.append(self.up)
else:
down = self.items[i - 1].down + self.settings.vertical_separation + item.up
down = max(arcs, down)
# woof... that's asymmetric =(
child_refs.append(self.up + self.down + down + self.height)
down += item.height
self.down += down
self.down += self.items[-1].down
# Reference points along y axis for each child, relative to main line
self.child_refs = [c - self.up for c in child_refs]
self.width = math.ceil(self.width)
def format(self, x, y, width, reverse, alignment_override):
fmt = FormattedItem(self)
left_gap, right_gap = self.determine_gaps(width, alignment_override)
alignment_override = self.alignment_override_reverse(reverse)
# Input line y coordinate
y_in = y
# Output line y coordinate
y_out = y + self.height
if reverse:
y_in, y_out = y_out, y_in
self.dia.path(x, y_in) \
.h(left_gap) \
.format() \
.add_to(fmt)
self.dia.path(x + left_gap + self.width, y_out) \
.h(right_gap) \
.format() \
.add_to(fmt)
x += left_gap
inner_width = self.width - self.settings.arc_radius * 4
for i, (ref_y_rel, item) in enumerate(zip(self.child_refs, self.items)):
# Input line of the component
child_y_in = ref_y_rel + y
# Output line of the component
child_y_out = child_y_in + item.height
# Reference point of the component
ref_x = x + self.settings.arc_radius * 2
ref_y = child_y_in
if reverse:
child_y_in, child_y_out = child_y_out, child_y_in
if i == self.default:
self.dia.path(x, y_in) \
.right(self.settings.arc_radius * 2) \
.format() \
.add_to(fmt)
self.dia.path(ref_x + inner_width, y_out) \
.right(self.settings.arc_radius * 2) \
.format() \
.add_to(fmt)
else:
if i < self.default:
arcs = ['se', 'wn', 'ne', 'ws']
arcs_size = -self.settings.arc_radius * 2
else:
arcs = ['ne', 'ws', 'se', 'wn']
arcs_size = self.settings.arc_radius * 2
self.dia.path(x, y_in) \
.arc(arcs[0]) \
.v(child_y_in - y_in - arcs_size) \
.arc(arcs[1]) \
.format() \
.add_to(fmt)
self.dia.path(ref_x + inner_width, child_y_out) \
.arc(arcs[2]) \
.v(y_out - child_y_out + arcs_size) \
.arc(arcs[3]) \
.format() \
.add_to(fmt)
item.format(ref_x, ref_y, inner_width, reverse, alignment_override) \
.add_to(fmt)
return fmt
@dataclass
class OneOrMore(DiagramItem):
item: DiagramItem = None
repeat: DiagramItem = None
def __init__(self, dia: Diagram, item: DiagramItem, repeat: Optional[DiagramItem]=None):
super().__init__(dia, 'g')
self.item = item
self.repeat = repeat = repeat or self.dia.skip()
self.needs_space = True
self.width = max(item.width, repeat.width)
self.width += self.settings.arc_radius * 2
self.height = item.height
self.up = item.up
self.down = item.down + self.settings.vertical_separation + repeat.up
self.down = max(self.settings.arc_radius * 2, self.down)
self.down += repeat.height + repeat.down
self.width = math.ceil(self.width)
def format(self, x, y, width, reverse, alignment_override):
fmt = FormattedItem(self)
left_gap, right_gap = self.determine_gaps(width, alignment_override)
alignment_override = self.alignment_override_center()
inner_width = self.width - self.settings.arc_radius * 2
# +------+ -
# -/-># | - <- input line of the main component -------
# | | |->\- - <- output line of the main component --- ^
# | +------+ | - ^ |
# | | d_out=| |=d_in
# | +------+ | - v |
# | # |<-/ - <- input line of the repeat component --- v
# \<-| | - <- output line of the repeat component -------
# +------+ -
# Input line y coordinate
y_in = y
# Output line y coordinate
y_out = y + self.height
# Distance between input line of the main component
# and output line of the repeat component
d_in = self.height + self.down - self.repeat.down
# Distance between output line of the main component
# and input line of the repeat component
d_out = self.down - self.repeat.down - self.repeat.height
# Reference point of the main component
main_ref_x = x + self.settings.arc_radius + left_gap
main_ref_y = y
# Reference point of the repeat component
repeat_ref_x = x + self.settings.arc_radius + left_gap
repeat_ref_y = y_out + d_out
if reverse:
y_in, y_out = y_out, y_in
d_in, d_out = d_out, d_in
# note that reference points are not changed
self.dia.path(x, y_in) \
.h(left_gap) \
.format() \
.add_to(fmt)
self.dia.path(x + left_gap + self.width, y_out) \
.h(right_gap) \
.format() \
.add_to(fmt)
x += left_gap
# Draw main item
self.dia.path(x, y_in) \
.right(self.settings.arc_radius) \
.format() \
.add_to(fmt)
self.dia.path(x + self.width - self.settings.arc_radius, y_out) \
.right(self.settings.arc_radius) \
.format() \
.add_to(fmt)
self.item.format(main_ref_x, main_ref_y, inner_width, reverse, alignment_override) \
.add_to(fmt)
# Draw repeat item
self.dia.path(x + self.settings.arc_radius, y_in) \
.arc('nw') \
.v(d_in - 2 * self.settings.arc_radius) \
.arc('ws') \
.format() \
.add_to(fmt)
self.dia.path(x + self.width - self.settings.arc_radius, y_out) \
.arc('ne') \
.v(d_out - 2 * self.settings.arc_radius) \
.arc('es') \
.format() \
.add_to(fmt)
self.repeat.format(repeat_ref_x, repeat_ref_y, inner_width, not reverse, alignment_override) \
.add_to(fmt)
return fmt
@dataclass
class Start(DiagramItem):
end_class: EndClass = None
def __init__(self, dia: Diagram, end_class: Optional[EndClass] = None):
super().__init__(dia, 'g')
self.end_class = end_class or self.settings.end_class
self.width = 20
self.up = 10
self.down = 10
def format(self, x, y, width, reverse, alignment_override):
path = self.dia.path(x, y)
path.h(20)
if self.end_class == EndClass.SIMPLE:
path.m(-20, -10).v(20)
else:
path.m(-10, -10).v(20)
path.m(-10, -20).v(20)
return path.format()
@dataclass
class End(DiagramItem):
end_class: EndClass = None
def __init__(self, dia: Diagram, end_class: Optional[EndClass] = None):
super().__init__(dia, 'g')
self.end_class = end_class or self.settings.end_class
self.width = 20
self.up = 10
self.down = 10
def format(self, x, y, width, reverse, alignment_override):
path = self.dia.path(x, y)
path.h(20)
if self.end_class == EndClass.SIMPLE:
path.m(0, -10).v(20)
else:
path.m(0, -10).v(20)
path.m(-10, -20).v(20)
return path.format()
@dataclass
class Node(DiagramItem):
text: str = None
href: Optional[str] = None
radius: int = None
def __init__(self, dia: Diagram, text, href=None, css_class='', radius=0, padding=20, resolve=True, title_is_weak=False):
super().__init__(dia, 'g')
self.text = text
self.href = href
self.radius = radius
self.resolve = resolve
self.title_is_weak = title_is_weak
if self.resolve:
self.text, self.href = self.href_resolver.resolve(
self.text, self.href, self.title_is_weak
)
self.attrs = {'class': css_class}
self.needs_space = True
self.up = 11
self.down = 11
self.width = len(self.text) * self.settings.character_advance + padding
self.width = math.ceil(self.width)
def format(self, x, y, width, reverse, alignment_override):
fmt = FormattedItem(self)
left_gap, right_gap = self.determine_gaps(width, alignment_override)
self.dia.path(x, y).h(left_gap).format().add_to(fmt)
self.dia.path(x + left_gap + self.width, y).h(right_gap).format().add_to(fmt)
rect_attrs = {
'x': x + left_gap,
'y': y - self.up,
'width': self.width,
'height': self.up + self.down,
'rx': self.radius,
'ry': self.radius
}
self.dia.element('rect', attrs=rect_attrs).format().add_to(fmt)
text_attrs = {
'x': x + left_gap + self.width / 2,
'y': y
}
text = self.dia.element('text', attrs=text_attrs).format()
text.children.append(self.text)
if self.href is not None:
a = self.dia.element('a', attrs={'xlink:href': self.href}).format()
text.add_to(a)
a.add_to(fmt)
else:
text.add_to(fmt)
return fmt
@dataclass
class Skip(DiagramItem):
def __init__(self, dia: Diagram):
super().__init__(dia, 'g')
def format(self, x, y, width, reverse, alignment_override):
return self.dia.path(x, y).right(width).format()
sphinx-a4doc-1.6.0/sphinx_a4doc/contrib/rst_autodoc.py 0000664 0000000 0000000 00000026462 14351074657 0022770 0 ustar 00root root 0000000 0000000 import re
import dataclasses
import textwrap
import sphinx.addnodes
import sphinx.domains
import sphinx.roles
import sphinx.util.docfields
from docutils.parsers.rst.languages import en
import docutils.statemachine
import docutils.nodes
import sphinx.util.docutils
import sphinx.ext.autodoc
import sphinx.errors
import sphinx.domains.rst
import sphinx.addnodes
import sphinx.application
from sphinx.locale import _
from sphinx.pycode import ModuleAnalyzer
from .configurator import Namespace, NamespaceHolder, ManagedDirective, make_converter
from .marker_nodes import find_or_add_marker
from typing import *
class ReSTDirective(sphinx.domains.rst.ReSTDirective):
def before_content(self):
super().before_content()
self.env.ref_context['rst:directive'] = self.names
def after_content(self):
super().after_content()
self.env.ref_context['rst:directive'] = None
class ReSTOption(sphinx.domains.rst.ReSTMarkup):
def handle_signature(self, sig: str, signode: sphinx.addnodes.desc_signature) -> str:
directive: Optional[List[str]] = self.env.ref_context.get('rst:directive', None)
if directive is None:
raise ValueError('rst option cannot be documented '
'outside of any directive')
sig = sig.strip()
match = re.match(r'^([^ ]+)(( .*)?)', sig)
if match is None:
raise ValueError(f'invalid option name {sig}')
name, value_desc = match.group(1), match.group(2)
name, value_desc = name.strip(), value_desc.strip()
dispname = f':{name}:'
if value_desc:
dispname += ' '
signode += sphinx.addnodes.desc_name(dispname, dispname)
if value_desc:
signode += sphinx.addnodes.desc_addname(value_desc, value_desc)
return directive[0] + ':' + name
class OptRole(sphinx.roles.XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['rst:directive'] = env.ref_context.get('rst:directive', None)
return super().process_link(env, refnode, has_explicit_title, title, target)
class ExtendedReSTDomain(sphinx.domains.rst.ReSTDomain):
object_types = sphinx.domains.rst.ReSTDomain.object_types.copy()
object_types['option'] = sphinx.domains.ObjType(_('option'), 'opt')
directives = sphinx.domains.rst.ReSTDomain.directives.copy()
directives['directive'] = ReSTDirective
directives['option'] = ReSTOption
roles = sphinx.domains.rst.ReSTDomain.roles.copy()
roles['opt'] = OptRole()
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if typ == 'opt':
if ':' not in target and node.get('rst:directive', None):
target = node['rst:directive'][0] + ':' + target
return super().resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
@dataclasses.dataclass
class AutoDirectiveSettings:
options: bool = True
"""
Generate documentation for directive options.
"""
prefixed_options: bool = False
"""
Generate documentation for directive options with non-empty prefix.
"""
inherited_options: bool = True
"""
Generate documentation for inherited options (i.e., options that are
not in the namespace dataclass, but in its bases).
"""
prefix_filter: Optional[List[str]] = None
"""
Filter options documentation by option prefix.
"""
options_header: bool = True
"""
Controls whether directive should render a header for options section.
"""
namespace = Namespace('configurator', AutoDirectiveSettings)
class AutoDirective(ReSTDirective, ManagedDirective):
"""
Generates documentation for rst directives, including documentation for
its options.
"""
settings = namespace.for_directive()
def run(self):
self.name = 'rst:directive'
nodes = super().run()
try:
directive = self.load_directive()
except sphinx.errors.ExtensionError as e:
return [
self.state_machine.reporter.error(
str(e),
line=self.content_offset
)
]
if not issubclass(directive, docutils.parsers.rst.Directive):
return [
self.state_machine.reporter.error(
'cannot autodocument a directive that is not derived '
'from docutils.parsers.rst.Directive',
line=self.content_offset
)
]
for node in nodes:
if isinstance(node, sphinx.addnodes.desc):
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
self.render_directive(directive, content_node)
return nodes
else:
raise RuntimeError('no desc_content node can be found')
else:
raise RuntimeError('no desc node can be found')
def load_directive(self):
if len(self.names) < 1:
raise sphinx.errors.ExtensionError(
'should provide at least one signature'
)
directive_name = self.names[0]
if ':' in directive_name:
domain_name, directive_name = directive_name.split(':', 1)
if domain_name not in self.env.domains:
raise sphinx.errors.ExtensionError(
f'unknown domain {domain_name!r}'
)
domain = self.env.domains[domain_name]
if directive_name not in domain.directives:
raise sphinx.errors.ExtensionError(
f'unknown directive {directive_name!r} '
f'within domain {domain_name!r}'
)
return domain.directives[directive_name]
else:
directive, messages = sphinx.util.docutils.directives.directive(
directive_name,
en,
self.state.document
)
if directive is None:
raise sphinx.errors.ExtensionError(
f'unknown directive {directive_name!r}'
)
return directive
def render_directive(self, directive, nodes):
if getattr(directive, '__doc__', None):
doc_node = find_or_add_marker(nodes, 'docstring')
self.before_content()
try:
doc = self.canonize_docstring(directive.__doc__)
lines = docutils.statemachine.StringList(doc.splitlines())
self.state.nested_parse(lines, self.content_offset, doc_node)
finally:
self.after_content()
doc_node.replace_self(doc_node.children)
if not self.settings.options:
return
holders: Set[NamespaceHolder] = getattr(
directive,
'_namespace_attrs_',
set()
)
options: List[Tuple[str, Any, List[dataclasses.Field]]] = []
for holder in holders:
if holder.prefix:
if not self.settings.prefixed_options:
continue
if (self.settings.prefix_filter is not None and
holder.prefix not in self.settings.prefix_filter):
continue
prefix = holder.prefix
else:
prefix = ''
fields = holder.namespace.fields()
cls = holder.namespace.get_cls()
if fields:
options.append((prefix, cls, fields))
if not options:
return
opt_node = find_or_add_marker(nodes, 'members')
if self.settings.options_header:
# TODO: maybe add anchor?
p = docutils.nodes.paragraph('', '')
p += docutils.nodes.strong('Options:', _('Options:'))
opt_node += p
for p, cls, fields in sorted(options, key=lambda x: x[0]):
fields = filter(lambda x: x[0], [
(self.resolve_arg_doc_and_index(field.name, cls), field)
for field in fields
])
fields = sorted(fields, key=lambda x: (x[0][0], x[1].name))
for (i, doc), field in fields:
if p:
p += '-'
name = field.name.replace('_', '-')
names = [p + name]
if 'converter' in field.metadata:
value_desc = str(field.metadata['converter'])
elif field.type is bool:
value_desc = ''
names.append(p + 'no-' + name)
else:
value_desc = str(make_converter(field.type))
opt_node += self.render_option(names, value_desc, doc)
opt_node.replace_self(opt_node.children)
def render_option(self, names, value_desc, doc):
lines = docutils.statemachine.StringList(doc.splitlines())
directive = ReSTOption(
name='rst:option',
arguments=[
'\n'.join([f'{name} {value_desc}' for name in names])
],
options=self.options,
content=lines,
lineno=self.lineno,
content_offset=self.content_offset,
block_text=self.block_text,
state=self.state,
state_machine=self.state_machine
)
self.before_content()
try:
return directive.run()
finally:
self.after_content()
@staticmethod
def canonize_docstring(description):
if description is None:
return description
lines = description.split('\n')
lines = list(map(str.rstrip, lines))
# Handle trivial cases:
if len(lines) <= 1:
return '\n'.join(lines) + '\n\n'
# Ensure there is a blank line at the end of description:
if lines[-1]:
lines.append('')
# The first line is a line that follows immediately after the triple quote.
# We need to dedent the other lines but we don't need to dedent
# the first one.
body = lines[0] + '\n' + textwrap.dedent('\n'.join(lines[1:]))
# Remove any leading newlines and ensure that
# there is only one trailing newline.
body = body.strip('\n') + '\n\n'
return body
def resolve_arg_doc_and_index(self, name, dataclass: type) -> Optional[Tuple[Tuple[int, int], str]]:
if self.settings.inherited_options:
bases = dataclass.__mro__
else:
bases = [dataclass]
for i, base in enumerate(bases):
analyzer = ModuleAnalyzer.for_module(base.__module__)
docs = analyzer.find_attr_docs()
if (base.__qualname__, name) in docs:
tag = analyzer.tagorder[f'{base.__qualname__}.{name}']
return (-i, tag), self.canonize_docstring(
'\n'.join(docs[base.__qualname__, name])
)
return None
def setup(app: sphinx.application.Sphinx):
app.setup_extension('sphinx_a4doc.contrib.marker_nodes')
app.add_domain(ExtendedReSTDomain, override=True)
namespace.register_settings(app)
app.add_directive_to_domain('rst', 'autodirective', AutoDirective)
return {
'version': '1.0.0',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
sphinx-a4doc-1.6.0/sphinx_a4doc/diagram_directive.py 0000664 0000000 0000000 00000036170 14351074657 0022441 0 ustar 00root root 0000000 0000000 import json
import os.path
import docutils.parsers.rst
import docutils.nodes
import docutils.utils
import sphinx.addnodes
import sphinx.util.docutils
import sphinx.writers.html
import sphinx.writers.text
import sphinx.writers.latex
import sphinx.writers.manpage
import sphinx.util.docutils
import sphinx.util.logging
import sphinx.environment
import yaml
import yaml.error
from sphinx_a4doc.contrib.configurator import ManagedDirective
from sphinx_a4doc.contrib.railroad_diagrams import Diagram, HrefResolver
from sphinx_a4doc.model.model import ModelCache
from sphinx_a4doc.model.model_renderer import Renderer
from sphinx_a4doc.settings import diagram_namespace, DiagramSettings
from typing import *
logger = sphinx.util.logging.getLogger(__name__)
class DomainResolver(HrefResolver):
def __init__(self, builder, grammar: str):
self.builder = builder
self.grammar = grammar
def resolve(self, text: str, href: Optional[str], title_is_weak: bool):
# There can be three alternative situations when resolving rules:
# - href is not passed. In this case we resolve rule as if a role
# without an explicit title was invoked, i.e. we treat text as both
# title and target. If rule resolution succeeds, and the resolved rule
# have a human readable name set, we replace title with it;
# - href is passed explicitly. In this case we simulate invocation
# of a role with an explicit title, i.e. we use href to resolve rule
# and we don't mess with title at all;
# - title_is_weak is set. This means that title comes from an
# autogenerated rule which could've been overwritten in an .rst file.
# In this case, if rule resolution succeeds, and the resolved rule
# have a human readable name set, we replace title with it.
title = text
if href is None:
target = text
explicit_title = False
else:
target = href
explicit_title = not title_is_weak
builder = self.builder
env = builder.env
domain = env.get_domain('a4')
if hasattr(builder, 'current_docname'):
docname = builder.current_docname
else:
docname = None
xref = sphinx.addnodes.pending_xref(
'',
reftype='rule',
refdomain='a4',
refexplicit=explicit_title
)
xref['a4:grammar'] = self.grammar
try:
node: docutils.nodes.Element = domain.resolve_xref(
env,
docname,
builder,
'rule',
target,
xref,
docutils.nodes.literal(title, title)
)
except sphinx.environment.NoUri:
node = None
if node is None:
return title, None
reference = node.next_node(docutils.nodes.reference, include_self=True)
assert reference is not None
literal = node.next_node(docutils.nodes.literal, include_self=True)
assert literal is not None
if 'refuri' in reference:
return literal.astext(), reference['refuri']
else:
return literal.astext(), '#' + reference['refid']
class RailroadDiagramNode(docutils.nodes.Element, docutils.nodes.General):
def __init__(
self,
rawsource='',
*args,
diagram: dict,
options: DiagramSettings,
grammar: str,
**kwargs
):
super().__init__(
rawsource,
*args,
diagram=diagram,
options=options,
grammar=grammar,
**kwargs
)
@staticmethod
def node_to_svg(self: sphinx.util.docutils.SphinxTranslator, node, add_style=False):
resolver = DomainResolver(self.builder, node['grammar'])
dia = Diagram(settings=node['options'], href_resolver=resolver)
style = None
if add_style:
for basedir in self.config.html_static_path:
path = os.path.join(self.builder.confdir, basedir, 'a4_railroad_diagram_latex.css')
if os.path.exists(path):
with open(path, 'r') as f:
style = f.read()
break
try:
data = dia.load(node['diagram'])
return dia.render(data, style=style)
except Exception as e:
logger.exception(f'{node.source}:{node.line}: WARNING: {e}')
@staticmethod
def visit_node_html(self: sphinx.writers.html.HTMLTranslator, node):
svg = RailroadDiagramNode.node_to_svg(self, node)
if svg:
self.body.append('')
self.body.append(svg)
self.body.append('
')
@staticmethod
def visit_node_text(self: sphinx.writers.text.TextTranslator, node):
if node['options'].alt:
self.add_text('{}'.format(node['options'].alt))
else:
self.add_text(yaml.dump(node['diagram']))
@staticmethod
def visit_node_latex(self: sphinx.writers.latex.LaTeXTranslator, node):
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPDF
import io
import hashlib
outdir = os.path.join(self.builder.outdir, 'railroad_diagrams')
os.makedirs(outdir, exist_ok=True)
hash = hashlib.sha256()
hash.update(
yaml.safe_dump(node['diagram'], sort_keys=True, canonical=True).encode())
hash.update(
repr(node['options']).encode())
pdf_file = f'diagram:{node["grammar"]}:{hash.hexdigest()}.pdf'
pdf_file = os.path.join(outdir, pdf_file)
svg = RailroadDiagramNode.node_to_svg(self, node, add_style=True)
svg_file = io.StringIO(svg)
rlg = svg2rlg(svg_file)
renderPDF.drawToFile(rlg, pdf_file)
self.body.append(
f'\n\n\\includegraphics[scale=0.6]{{{pdf_file}}}\n\n'
)
@staticmethod
def visit_node_man(self: sphinx.writers.manpage.ManualPageTranslator, node):
if node['options'].alt:
self.body.append('{}'.format(node['options'].alt))
else:
self.body.append(yaml.dump(node['diagram']))
def depart_node(self, node):
pass
class RailroadDiagram(sphinx.util.docutils.SphinxDirective, ManagedDirective):
"""
This is the most flexible directive for rendering railroad diagrams.
Its content should be a valid `YAML `_
document containing the diagram item description.
The diagram item description itself has a recursive definition.
It can be one of the next things:
- ``None`` (denoted as tilde in YAML) will produce a line without objects:
.. code-block:: rst
.. railroad-diagram:: ~
.. highlights::
.. railroad-diagram:: ~
- a string will produce a terminal node:
.. code-block:: rst
.. railroad-diagram:: just some string
.. highlights::
.. railroad-diagram:: just some string
- a list of diagram item descriptions will produce these items rendered one
next to another:
.. code-block:: rst
.. railroad-diagram::
- terminal 1
- terminal 2
.. highlights::
.. railroad-diagram::
- terminal 1
- terminal 2
- a dict with ``stack`` key produces a vertically stacked sequence.
The main value (i.e. the one that corresponds to the ``stack`` key)
should contain a list of diagram item descriptions.
These items will be rendered vertically:
.. code-block:: rst
.. railroad-diagram::
stack:
- terminal 1
-
- terminal 2
- terminal 3
.. highlights::
.. railroad-diagram::
stack:
- terminal 1
-
- terminal 2
- terminal 3
- a dict with ``choice`` key produces an alternative.
The main value should contain a list of diagram item descriptions:
.. code-block:: rst
.. railroad-diagram::
choice:
- terminal 1
-
- terminal 2
- terminal 3
.. highlights::
.. railroad-diagram::
choice:
- terminal 1
-
- terminal 2
- terminal 3
- a dict with ``optional`` key will produce an optional item.
The main value should contain a single diagram item description.
Additionally, the ``skip`` key with a boolean value may be added.
If equal to true, the element will be rendered off the main line:
.. code-block:: rst
.. railroad-diagram::
optional:
- terminal 1
- optional:
- terminal 2
skip: true
.. highlights::
.. railroad-diagram::
optional:
- terminal 1
- optional:
- terminal 2
skip: true
- a dict with ``one_or_more`` key will produce a loop.
The ``one_or_more`` element of the dict should contain a single diagram
item description.
Additionally, the ``repeat`` key with another diagram item description
may be added to insert nodes to the inverse connection of the loop.
.. code-block:: rst
.. railroad-diagram::
one_or_more:
- terminal 1
- terminal 2
repeat:
- terminal 3
- terminal 4
.. highlights::
.. railroad-diagram::
one_or_more:
- terminal 1
- terminal 2
repeat:
- terminal 3
- terminal 4
- a dict with ``zero_or_more`` key works like ``one_or_more`` except that
the produced item is optional:
.. code-block:: rst
.. railroad-diagram::
zero_or_more:
- terminal 1
- terminal 2
repeat:
- terminal 3
- terminal 4
.. highlights::
.. railroad-diagram::
zero_or_more:
- terminal 1
- terminal 2
repeat:
- terminal 3
- terminal 4
- a dict with ``node`` key produces a textual node of configurable shape.
The main value should contain text which will be rendered in the node.
Optional keys include ``href``, ``css_class``, ``radius`` and ``padding``.
.. code-block:: rst
.. railroad-diagram::
node: go to google
href: https://www.google.com/
css_class: terminal
radius: 3
padding: 50
.. highlights::
.. railroad-diagram::
node: go to google
href: https://www.google.com/
css_class: terminal
radius: 3
padding: 50
- a dict with ``terminal`` key produces a terminal node.
It works exactly like ``node``. The only optional key is ``href``.
- a dict with ``non_terminal`` key produces a non-terminal node.
It works exactly like ``node``. The only optional key is ``href``.
- a dict with ``comment`` key produces a comment node.
It works exactly like ``node``. The only optional key is ``href``.
**Example:**
This example renders a diagram from the :ref:`features ` section:
.. code-block:: rst
.. railroad-diagram::
- choice:
- terminal: 'parser'
-
- terminal: 'lexer '
default: 1
- terminal: 'grammar'
- non_terminal: 'identifier'
- terminal: ';'
which translates to:
.. highlights::
.. railroad-diagram::
- choice:
- terminal: 'parser'
-
- terminal: 'lexer '
default: 1
- terminal: 'grammar'
- non_terminal: 'identifier'
- terminal: ';'
**Customization:**
See more on how to customize diagram style in the ':ref:`custom_style`'
section.
"""
has_content = True
settings = diagram_namespace.for_directive()
def run(self):
grammar = self.env.ref_context.get('a4:grammar', '__default__')
try:
content = self.get_content()
except Exception as e:
return [
self.state_machine.reporter.error(
str(e),
line=self.lineno
)
]
return [
RailroadDiagramNode(
diagram=content, options=self.settings, grammar=grammar
)
]
def get_content(self):
return yaml.safe_load('\n'.join(self.content))
class AntlrDiagram(RailroadDiagram):
def get_imports(self):
if self.env.temp_data.get('a4:autogrammar_ctx'):
path = self.env.temp_data['a4:autogrammar_ctx'][-1]
return [ModelCache.instance().from_file(path)]
else:
return []
class LexerRuleDiagram(AntlrDiagram):
"""
The body of this directive should contain a valid Antlr4 lexer rule
description.
For example
.. code-block:: rst
.. lexer-rule-diagram:: ('+' | '-')? [1-9] [0-9]*
translates to:
.. highlights::
.. lexer-rule-diagram:: ('+' | '-')? [1-9] [0-9]*
**Options:**
Options are inherited from the :rst:dir:`railroad-diagram` directive.
"""
def get_content(self):
raw = "\n".join(self.content)
content = f'grammar X; ROOT : {raw} ;'
model = ModelCache.instance().from_text(
content, (self.state_machine.reporter.source, self.content_offset),
self.get_imports())
tree = model.lookup('ROOT')
if tree is None or tree.content is None:
raise RuntimeError('cannot parse the rule')
renderer = Renderer(
self.settings.literal_rendering,
self.settings.cc_to_dash
)
return renderer.visit(tree.content)
class ParserRuleDiagram(AntlrDiagram):
"""
The body of this directive should contain a valid Antlr4 parser rule
description.
For example
.. code-block:: rst
.. parser-rule-diagram::
SELECT DISTINCT?
('*' | expression (AS row_name)?
(',' expression (AS row_name)?)*)
translates to:
.. highlights::
.. parser-rule-diagram::
SELECT DISTINCT?
('*' | expression (AS row_name)?
(',' expression (AS row_name)?)*)
**Options:**
Options are inherited from the :rst:dir:`railroad-diagram` directive.
"""
def get_content(self):
raw = "\n".join(self.content)
content = f'grammar X; root : {raw} ;'
model = ModelCache.instance().from_text(
content, (self.state_machine.reporter.source, self.content_offset),
self.get_imports())
tree = model.lookup('root')
if tree is None or tree.content is None:
raise RuntimeError('cannot parse the rule')
renderer = Renderer(
self.settings.literal_rendering,
self.settings.cc_to_dash
)
return renderer.visit(tree.content)
sphinx-a4doc-1.6.0/sphinx_a4doc/domain.py 0000664 0000000 0000000 00000034566 14351074657 0020255 0 ustar 00root root 0000000 0000000 import re
from dataclasses import dataclass
import sphinx.addnodes
import sphinx.util.docutils
import sphinx.util.logging
import sphinx.util.nodes
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.locale import _
from sphinx_a4doc.settings import grammar_namespace, rule_namespace, diagram_namespace, GrammarType
from sphinx_a4doc.contrib.configurator import ManagedDirective
from typing import *
ID_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*$', re.UNICODE)
class A4ObjectDescription(ObjectDescription, ManagedDirective):
"""
Common base for rule and grammar directives.
"""
diagram_settings = diagram_namespace.for_directive('diagram')
"""
We can redefine rendering settings for all diagrams withing a single object.
"""
def get_fqn(self, name: str) -> str:
"""
Get fully qualified name for the given object.
"""
raise NotImplementedError
def get_display_name(self) -> Optional[str]:
"""
Get display name which will be used instead of plain a name.
"""
raise NotImplementedError
def get_type(self) -> Optional[str]:
"""
Get object type which will be used in signature and index entry.
"""
raise NotImplementedError
def before_content(self):
self.push_settings(diagram_namespace, self.diagram_settings)
if self.names:
self.env.ref_context['a4:' + self.objtype] = self.names[0]
def after_content(self):
self.pop_settings(diagram_namespace)
if self.names:
self.env.ref_context.pop('a4:' + self.objtype)
def signature_fail(self, msg):
self.state_machine.reporter.warning(msg, line=self.lineno)
raise ValueError()
def handle_signature(self, sig, signode):
if ID_RE.match(sig) is None:
self.signature_fail(f'entity name {sig!r} is invalid')
subtype = self.get_type()
display_name = self.get_display_name()
if subtype:
ann = f'{subtype} {self.objtype} '
else:
ann = f'{self.objtype} '
signode += sphinx.addnodes.desc_annotation(ann, ann)
if display_name:
signode += sphinx.addnodes.desc_name(display_name, display_name)
else:
signode += sphinx.addnodes.desc_name(sig, sig)
return sig
def add_target_and_index(self, name, sig, signode):
fqn = self.get_fqn(name)
anchor = 'a4.' + fqn
if anchor not in self.state.document.ids:
signode['names'].append(anchor)
signode['ids'].append(anchor)
signode['first'] = not self.names
self.state.document.note_explicit_target(signode)
domain = self.env.domains[A4Domain.name]
assert isinstance(domain, A4Domain)
if fqn in domain.index:
path = self.env.doc2path(domain.index[fqn].docname)
self.state_machine.reporter.warning(
f'duplicate Antlr4 object description of {name}, '
f'other instance in {path}',
line=self.lineno)
self.add_target(name, fqn, anchor, domain)
self.add_index(name, fqn, anchor)
def add_target(self, name, fqn, anchor, domain):
raise NotImplementedError
def add_index(self, name, fqn, anchor):
subtype = self.get_type()
objtype = A4Domain.object_types[self.objtype].lname
display_name = self.get_display_name() or name
# TODO: translate
if subtype:
indextext = f'{display_name} (Antlr4 {subtype} {objtype})'
else:
indextext = f'{display_name} (Antlr4 {objtype})'
self.indexnode['entries'].append(
('single', indextext, anchor, '', None)
)
class Grammar(A4ObjectDescription):
"""
Declare a new grammar with the given name.
Grammar names should be unique within the project.
.. members-marker::
.. rst:option:: noindex
A standard sphinx option to disable indexing for this rule.
.. rst:option:: diagram-*
One can override any option for all
:rst:dir:`railroad diagrams ` within this grammar.
Prefix the desired option with ``diagram-`` and add to the
rule description.
For example:
.. code-block:: rst
.. a4:grammar:: Test
:diagram-end-class: complex
All diagrams rendered inside this grammar
will have 'end-class' set to 'complex'.
"""
settings = grammar_namespace.for_directive()
def get_fqn(self, name: str) -> str:
return name
def get_display_name(self) -> Optional[str]:
return self.settings.name
def get_type(self) -> Optional[str]:
if self.settings.type is GrammarType.MIXED:
return None
else:
return self.settings.type.name.lower()
def add_target(self, name, fqn, anchor, domain):
domain.register_grammar(
docname=self.env.docname,
name=name,
fqn=fqn,
display_name=self.get_display_name(),
relations=self.settings.imports
)
def handle_signature(self, sig, signode):
if 'a4:rule' in self.env.ref_context:
self.signature_fail('defining grammars within a rule body is not allowed')
if 'a4:grammar' in self.env.ref_context:
self.signature_fail('defining nested grammars is not allowed')
return super().handle_signature(sig, signode)
class Rule(A4ObjectDescription):
"""
Declare a new production rule with the given name.
If placed within an :rst:dir:`a4:grammar` body, the rule will be added to
that grammar. It can then be referenced by a full path which will include
the grammar name and the rule name concatenated with a dot symbol.
If placed outside any grammar directive, the rule will be added to
an implicitly declared "default" grammar. In this case, the rule's full
path will only include its name.
In either case, the rule name should be unique within its grammar.
.. members-marker::
.. rst:option:: noindex
A standard sphinx option to disable indexing for this rule.
.. rst:option:: diagram-*
One can override any option for all
:rst:dir:`railroad diagrams `
within this rule. Refer to the corresponding
:rst:opt:`a4:grammar `'s option for more info.
"""
settings = rule_namespace.for_directive()
def get_fqn(self, name: str) -> str:
grammar = self.env.ref_context.get(
'a4:grammar', A4Domain.DEFAULT_GRAMMAR.name
)
return grammar + '.' + name
def get_display_name(self) -> Optional[str]:
return self.settings.name
def get_type(self) -> Optional[str]:
return None
def add_target(self, name, fqn, anchor, domain):
domain.register_rule(
docname=self.env.docname,
name=name,
fqn=fqn,
display_name=self.get_display_name(),
)
def handle_signature(self, sig, signode):
if 'a4:rule' in self.env.ref_context:
self.signature_fail('defining nested rules is not allowed')
return super().handle_signature(sig, signode)
class A4XRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['a4:grammar'] = env.ref_context.get(
'a4:grammar', A4Domain.DEFAULT_GRAMMAR.name
)
# This is the standard tilde handling, copied from ``c`` domain:
target = target.lstrip('~')
if not has_explicit_title:
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
return super().process_link(env, refnode, has_explicit_title, title, target)
class A4Domain(Domain):
@dataclass
class IndexEntry:
docname: str
"""
Name of the document in which this entry was indexed.
"""
objtype: str
"""
Object type, either ``'grammar'`` or ``'rule'``.
"""
name: str
"""
Object name.
"""
fqn: str
"""
Fully qualified name.
"""
display_name: Optional[str] = None
"""
Human readable name which should replace the default name in crossrefs.
"""
relations: Optional[List[str]] = None
"""
For grammar objects, contains list of imported grammars.
"""
DEFAULT_GRAMMAR = IndexEntry(
docname='',
objtype='grammar',
name='__default__',
fqn='__default__',
display_name=None,
relations=[]
)
name = 'a4'
label = 'Antlr4'
object_types = {
'grammar': ObjType(_('grammar'), 'grammar', 'g'),
'rule': ObjType(_('production rule'), 'rule', 'r'),
}
directives = {
'grammar': Grammar,
'rule': Rule,
}
roles = {
'grammar': A4XRefRole(),
'g': A4XRefRole(),
'rule': A4XRefRole(),
'r': A4XRefRole(),
}
initial_data = {
'objects': {}, # fullname -> index entry
}
def register_grammar(self, docname, name, fqn, display_name, relations):
self.index[fqn] = A4Domain.IndexEntry(
docname=docname,
objtype='grammar',
name=name,
fqn=fqn,
display_name=display_name,
relations=relations
)
def register_rule(self, docname, name, fqn, display_name):
self.index[fqn] = A4Domain.IndexEntry(
docname=docname,
objtype='rule',
name=name,
fqn=fqn,
display_name=display_name,
relations=None
)
@property
def index(self) -> Dict[str, IndexEntry]:
return self.data['objects']
def lookup(self, fqn, objtype):
if fqn not in self.index:
return None
if self.index[fqn].objtype != objtype:
return None
return self.index[fqn]
def lookup_grammar(self, fqn):
return self.lookup(fqn, 'grammar')
def lookup_rule(self, fqn):
return self.lookup(fqn, 'rule')
def traverse_grammars(self, roots, add_default_grammar):
stack = list(roots)
seen = set()
while stack:
grammar_name = stack.pop()
if grammar_name in seen:
continue
seen.add(grammar_name)
grammar = self.lookup_grammar(grammar_name)
if grammar is not None:
yield grammar
stack.extend(grammar.relations or [])
# else:
# self.env.warn_node(
# f'cannot resolve grammar {grammar_name!r}', node
# )
if add_default_grammar:
yield self.DEFAULT_GRAMMAR
def clear_doc(self, docname):
for fqn, entry in list(self.index.items()):
if entry.docname == docname:
self.index.pop(fqn)
def merge_domaindata(self, docnames, otherdata):
objects: Dict[str, A4Domain.IndexEntry] = otherdata['objects']
objects = {k: v for k, v in objects.items() if v.docname in docnames}
self.index.update(objects)
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if typ in ['grammar', 'g']:
resolver = self.resolve_grammar
elif typ in ['rule', 'r']:
resolver = self.resolve_rule
else:
raise RuntimeError(f'unknown object type {typ}')
return resolver(env, fromdocname, builder, target, node, contnode)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
results = []
as_grammar = self.resolve_grammar(env, fromdocname, builder, target, node, contnode)
if as_grammar is not None:
results.append(('a4:grammar', as_grammar))
as_rule = self.resolve_rule(env, fromdocname, builder, target, node, contnode, True)
for r in as_rule:
results.append(('a4:rule', r))
return results
def resolve_grammar(self, env, fromdocname, builder, target, node, contnode):
obj = self.lookup_grammar(target)
if obj is not None:
return self.make_refnode(fromdocname, builder, node, contnode, obj)
def resolve_rule(self, env, fromdocname, builder, target, node, contnode, allow_multiple=False):
if '.' in target:
# Got fully qualified rule reference.
add_default_grammar = False
grammar_name, rule_name = target.rsplit('.', 1)
roots = [grammar_name]
elif 'a4:grammar' in node:
# Got rule reference made by A4XRefRole.
add_default_grammar = True
if node['a4:grammar'] == self.DEFAULT_GRAMMAR.name:
roots = []
else:
roots = [node['a4:grammar']]
rule_name = target
else:
# Got rule reference made by AnyXRefRole.
add_default_grammar = True
roots = [k for k, v in self.index.items() if v.objtype == 'grammar']
rule_name = target
results = []
for grammar in self.traverse_grammars(roots, add_default_grammar):
fqn = f'{grammar.name}.{rule_name}'
obj = self.lookup_rule(fqn)
if obj is not None:
refnode = self.make_refnode(fromdocname, builder, node, contnode, obj)
if allow_multiple:
results.append(refnode)
else:
return refnode
if allow_multiple:
return results
else:
return None
def make_refnode(self, fromdocname, builder, node, contnode, obj):
if not node['refexplicit'] and obj.display_name:
contnode = contnode.deepcopy()
contnode.clear()
contnode += sphinx.util.docutils.nodes.Text(obj.display_name)
return sphinx.util.nodes.make_refnode(
builder, fromdocname, obj.docname, 'a4.' + obj.fqn, contnode, obj.fqn
)
def get_objects(self):
for fqn, entry in self.index.items():
display_name = entry.display_name or entry.name or fqn
yield (fqn, display_name, entry.objtype, entry.docname, 'a4.' + fqn, 1)
sphinx-a4doc-1.6.0/sphinx_a4doc/model/ 0000775 0000000 0000000 00000000000 14351074657 0017516 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/model/__init__.py 0000664 0000000 0000000 00000000000 14351074657 0021615 0 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/model/impl.py 0000664 0000000 0000000 00000056322 14351074657 0021041 0 ustar 00root root 0000000 0000000 import os
import re
import textwrap
from typing import *
from antlr4 import CommonTokenStream, InputStream
from antlr4.error.ErrorListener import ErrorListener
from sphinx_a4doc.model.model import ModelCache, Model, Position, RuleBase, LexerRule, ParserRule, Section
from sphinx_a4doc.syntax import Lexer, Parser, ParserVisitor
import sphinx.util.logging
__all__ = [
'ModelCacheImpl',
'ModelImpl',
'MetaLoader',
'RuleLoader',
'LexerRuleLoader',
'ParserRuleLoader',
]
logger = sphinx.util.logging.getLogger(__name__)
CMD_RE = re.compile(r'''
//@\s*doc\s*:\s*(?P[a-zA-Z0-9_-]+)\s*(?P.*)
''', re.UNICODE | re.VERBOSE)
class LoggingErrorListener(ErrorListener):
def __init__(self, path: str, offset: int):
self._path = path
self._offset = offset
def syntaxError(self, recognizer, offending_symbol, line, column, msg, e):
logger.error(f'{self._path}:{line + self._offset}: WARNING: {msg}')
class ModelCacheImpl(ModelCache):
def __init__(self):
self._loaded: Dict[str, Model] = {}
def from_file(self, path: Union[str, Tuple[str, int]]) -> 'Model':
if isinstance(path, tuple):
path, offset = path
else:
path, offset = path, 0
path = os.path.abspath(os.path.normpath(path))
if path in self._loaded:
return self._loaded[path]
if not os.path.exists(path):
logger.error(f'unable to load {path!r}: file not found')
model = self._loaded[path] = ModelImpl(path, offset, False, True)
return model
with open(path, 'r', encoding='utf-8', errors='strict') as f:
self._loaded[path] = self._do_load(f.read(), path, offset, False, [])
return self._loaded[path]
def from_text(self, text: str, path: Union[str, Tuple[str, int]] = '', imports: List['Model'] = None) -> 'Model':
if isinstance(path, tuple):
path, offset = path
else:
path, offset = path, 0
return self._do_load(text, path, offset, True, imports)
def _do_load(self, text: str, path: str, offset: int, in_memory: bool, imports: List['Model']) -> 'Model':
content = InputStream(text)
lexer = Lexer(content)
lexer.removeErrorListeners()
lexer.addErrorListener(LoggingErrorListener(path, offset))
tokens = CommonTokenStream(lexer)
parser = Parser(tokens)
parser.removeErrorListeners()
parser.addErrorListener(LoggingErrorListener(path, offset))
tree = parser.grammarSpec()
if parser.getNumberOfSyntaxErrors():
return ModelImpl(path, offset, in_memory, True)
model = ModelImpl(path, offset, in_memory, False)
for im in imports or []:
model.add_import(im)
MetaLoader(model, self).visit(tree)
LexerRuleLoader(model).visit(tree)
ParserRuleLoader(model).visit(tree)
return model
class ModelImpl(Model):
def __init__(self, path: str, offset: int, in_memory: bool, has_errors: bool):
self._path = path
self._in_memory = in_memory
self._offset = offset
self._has_errors = has_errors
self._lexer_rules: Dict[str, LexerRule] = {}
self._parser_rules: Dict[str, ParserRule] = {}
self._imports: Set[Model] = set()
self._type: Optional[str] = None
self._name: Optional[str] = None
self._docs: Optional[List[Tuple[int, str]]] = None
def has_errors(self) -> bool:
return self._has_errors
def get_type(self) -> Optional[str]:
return self._type
def set_type(self, t: str):
self._type = t
def get_name(self) -> str:
return self._name
def set_name(self, n: str):
self._name = n
def is_in_memory(self):
return self._in_memory
def get_path(self) -> str:
return self._path
def get_model_docs(self) -> Optional[List[Tuple[int, str]]]:
return self._docs
def set_model_docs(self, docs: Optional[List[Tuple[int, str]]]):
self._docs = docs
def get_offset(self) -> int:
return self._offset
def add_import(self, model: 'Model'):
self._imports.add(model)
def set_lexer_rule(self, name: str, rule: LexerRule):
self._lexer_rules[name] = rule
def set_parser_rule(self, name: str, rule: ParserRule):
self._parser_rules[name] = rule
def lookup_local(self, name: str) -> Optional[RuleBase]:
if name in self._lexer_rules:
return self._lexer_rules[name]
if name in self._parser_rules:
return self._parser_rules[name]
return None
def get_imports(self) -> Iterable[Model]:
return iter(self._imports)
def get_terminals(self) -> Iterable[LexerRule]:
return iter(set(self._lexer_rules.values()))
def get_non_terminals(self) -> Iterable[ParserRule]:
return iter(set(self._parser_rules.values()))
class MetaLoader(ParserVisitor):
def __init__(self, model: ModelImpl, cache: ModelCacheImpl):
self._model = model
self._cache = cache
if self._model.is_in_memory():
self._basedir = None
else:
self._basedir = os.path.dirname(self._model.get_path())
def add_import(self, name: str, position: Position):
if self._model.is_in_memory():
logger.error(f'{position}: WARNING: imports are not allowed for in-memory grammars')
else:
model = self._cache.from_file(os.path.join(self._basedir, name + '.g4'))
self._model.add_import(model)
def visitGrammarSpec(self, ctx):
t = ctx.gtype.getText()
if 'lexer' in t: # that's nasty =(
t = 'lexer' # in fact, the whole file is nasty =(
elif 'parser' in t:
t = 'parser'
else:
t = None
self._model.set_name(ctx.gname.getText())
self._model.set_type(t)
if ctx.docs:
docs = load_docs(self._model, ctx.docs, allow_cmd=False)
self._model.set_model_docs(docs['documentation'])
return super(MetaLoader, self).visitGrammarSpec(ctx)
def visitParserRuleSpec(self, ctx: Parser.ParserRuleSpecContext):
return None # do not recurse into this
def visitLexerRuleSpec(self, ctx: Parser.LexerRuleSpecContext):
return None # do not recurse into this
def visitModeSpec(self, ctx: Parser.ModeSpecContext):
return None # do not recurse into this
def visitOption(self, ctx: Parser.OptionContext):
if ctx.name.getText() == 'tokenVocab':
self.add_import(ctx.value.getText(),
Position(self._model.get_path(), ctx.start.line + self._model.get_offset()))
def visitDelegateGrammar(self, ctx: Parser.DelegateGrammarContext):
self.add_import(ctx.value.getText(),
Position(self._model.get_path(), ctx.start.line + self._model.get_offset()))
def visitTokensSpec(self, ctx: Parser.TokensSpecContext):
tokens: List[Parser.IdentifierContext] = ctx.defs.defs
for token in tokens:
rule = LexerRule(
name=token.getText(),
display_name=None,
model=self._model,
position=Position(self._model.get_path(), token.start.line + self._model.get_offset()),
is_literal=False,
is_fragment=False,
content=None,
is_doxygen_nodoc=True,
is_doxygen_inline=True,
is_doxygen_no_diagram=True,
css_class=None,
importance=1,
documentation='',
section=None,
)
self._model.set_lexer_rule(rule.name, rule)
class RuleLoader(ParserVisitor):
rule_class: Union[Type[RuleBase], Type[LexerRule], Type[ParserRule]] = None
def __init__(self, model: ModelImpl):
self._model = model
self._current_section: Optional[Section] = None
def wrap_suffix(self, element, suffix):
if element == self.rule_class.EMPTY:
return element
if suffix is None:
return element
suffix: str = suffix.getText()
if suffix.startswith('?'):
if isinstance(element, self.rule_class.Maybe):
return element
else:
return self.rule_class.Maybe(child=element)
if suffix.startswith('+'):
return self.rule_class.OnePlus(child=element)
if suffix.startswith('*'):
return self.rule_class.ZeroPlus(child=element)
return element
def make_alt_rule(self, content):
has_empty_alt = False
alts = []
for alt in [self.visit(alt) for alt in content]:
if isinstance(alt, self.rule_class.Maybe):
has_empty_alt = True
alt = alt.child
if alt == self.rule_class.EMPTY:
has_empty_alt = True
elif isinstance(alt, self.rule_class.Alternative):
alts.extend(alt.children)
else:
alts.append(alt)
if len(alts) == 0:
return self.rule_class.EMPTY
elif len(alts) == 1 and has_empty_alt:
return self.rule_class.Maybe(child=alts[0])
elif len(alts) == 1:
return alts[0]
rule = self.rule_class.Alternative(children=tuple(alts))
if has_empty_alt:
rule = self.rule_class.Maybe(rule)
return rule
def make_seq_rule(self, content):
elements = []
linebreaks = set()
for element in [self.visit(element) for element in content]:
if isinstance(element, self.rule_class.Sequence):
elements.extend(element.children)
else:
elements.append(element)
linebreaks.add(len(elements) - 1)
if len(elements) == 1:
return elements[0]
linebreaks = tuple(True if i in linebreaks else False
for i in range(len(elements)))
return self.rule_class.Sequence(tuple(elements), linebreaks)
def visitRuleSpec(self, ctx: Parser.RuleSpecContext):
docs: List[Tuple[int, str]] = []
start_line = None
cur_line = None
cur_doc: List[str] = []
for token in ctx.headers:
text: str = token.text.lstrip('/').strip()
line: int = token.line + self._model.get_offset()
if start_line is None:
start_line = line
if cur_line is None or cur_line == line - 1:
cur_doc.append(text)
else:
docs.append((start_line, '\n'.join(cur_doc)))
start_line = line
cur_doc = [text]
cur_line = line
if cur_doc:
docs.append((start_line, '\n'.join(cur_doc)))
if docs:
self._current_section = Section(docs)
else:
self._current_section = None
super(RuleLoader, self).visitRuleSpec(ctx)
class LexerRuleLoader(RuleLoader):
rule_class = LexerRule
def visitParserRuleSpec(self, ctx: Parser.ParserRuleSpecContext):
return None # do not recurse into this
def visitPrequelConstruct(self, ctx: Parser.PrequelConstructContext):
return None # do not recurse into this
def visitLexerRuleSpec(self, ctx: Parser.LexerRuleSpecContext):
content: LexerRule.RuleContent = self.visit(ctx.lexerRuleBlock())
doc_info = load_docs(self._model, ctx.docs)
if isinstance(content, LexerRule.Literal):
is_literal = True
literal = content.content
else:
is_literal = False
literal = ''
rule = LexerRule(
name=ctx.name.text,
display_name=doc_info['name'] or None,
model=self._model,
position=Position(self._model.get_path(), ctx.start.line + self._model.get_offset()),
content=content,
is_doxygen_nodoc=doc_info['is_doxygen_nodoc'],
is_doxygen_inline=doc_info['is_doxygen_inline'],
is_doxygen_no_diagram=doc_info['is_doxygen_no_diagram'],
css_class=doc_info['css_class'],
importance=doc_info['importance'],
documentation=doc_info['documentation'],
is_fragment=bool(ctx.frag),
is_literal=is_literal,
section=self._current_section,
)
self._model.set_lexer_rule(rule.name, rule)
if is_literal:
self._model.set_lexer_rule(literal, rule)
def visitLexerAltList(self, ctx: Parser.LexerAltListContext):
return self.make_alt_rule(ctx.alts)
def visitLexerAlt(self, ctx: Parser.LexerAltContext):
return self.visit(ctx.lexerElements())
def visitLexerElements(self, ctx: Parser.LexerElementsContext):
return self.make_seq_rule(ctx.elements)
def visitLexerElementLabeled(self, ctx: Parser.LexerElementLabeledContext):
return self.wrap_suffix(self.visit(ctx.value), ctx.suffix)
def visitLexerElementAtom(self, ctx: Parser.LexerElementAtomContext):
return self.wrap_suffix(self.visit(ctx.value), ctx.suffix)
def visitLexerElementBlock(self, ctx: Parser.LexerElementBlockContext):
return self.wrap_suffix(self.visit(ctx.value), ctx.suffix)
def visitLexerElementAction(self, ctx: Parser.LexerElementActionContext):
return LexerRule.EMPTY
def visitLabeledLexerElement(self, ctx: Parser.LabeledLexerElementContext):
return self.visit(ctx.lexerAtom() or ctx.lexerBlock())
def visitLexerBlock(self, ctx: Parser.LexerBlockContext):
return self.visit(ctx.lexerAltList())
def visitCharacterRange(self, ctx: Parser.CharacterRangeContext):
return LexerRule.Range(start=ctx.start.text, end=ctx.end.text)
def visitTerminalRef(self, ctx: Parser.TerminalRefContext):
return LexerRule.Reference(model=self._model, name=ctx.value.text)
def visitTerminalLit(self, ctx: Parser.TerminalLitContext):
content = ctx.value.text
if content == "''":
return LexerRule.EMPTY
else:
return LexerRule.Literal(content=ctx.value.text)
def visitLexerAtomCharSet(self, ctx: Parser.LexerAtomCharSetContext):
content = ctx.value.text
if content == '[]':
return LexerRule.EMPTY
else:
return LexerRule.CharSet(content=content)
def visitLexerAtomWildcard(self, ctx: Parser.LexerAtomWildcardContext):
return LexerRule.WILDCARD
def visitLexerAtomDoc(self, ctx: Parser.LexerAtomDocContext):
docs = load_docs(self._model, [ctx.value], False)['documentation']
return LexerRule.Doc(value='\n'.join(d[1] for d in docs))
def visitNotElement(self, ctx: Parser.NotElementContext):
return LexerRule.Negation(child=self.visit(ctx.value))
def visitNotBlock(self, ctx: Parser.NotBlockContext):
return LexerRule.Negation(child=self.visit(ctx.value))
def visitBlockSet(self, ctx: Parser.BlockSetContext):
return self.make_alt_rule(ctx.elements)
def visitSetElementRef(self, ctx: Parser.SetElementRefContext):
return LexerRule.Reference(model=self._model, name=ctx.value.text)
def visitSetElementLit(self, ctx: Parser.SetElementLitContext):
content = ctx.value.text
if content == "''":
return LexerRule.EMPTY
else:
return LexerRule.Literal(content=ctx.value.text)
def visitSetElementCharSet(self, ctx: Parser.SetElementCharSetContext):
content = ctx.value.text
if content == '[]':
return LexerRule.EMPTY
else:
return LexerRule.CharSet(content=content)
class ParserRuleLoader(RuleLoader):
rule_class = ParserRule
def visitParserRuleSpec(self, ctx: Parser.ParserRuleSpecContext):
content: ParserRule.RuleContent = self.visit(ctx.ruleBlock())
doc_info = load_docs(self._model, ctx.docs)
rule = ParserRule(
name=ctx.name.text,
display_name=doc_info['name'] or None,
model=self._model,
position=Position(self._model.get_path(), ctx.start.line + self._model.get_offset()),
content=content,
is_doxygen_nodoc=doc_info['is_doxygen_nodoc'],
is_doxygen_inline=doc_info['is_doxygen_inline'],
is_doxygen_no_diagram=doc_info['is_doxygen_no_diagram'],
css_class=doc_info['css_class'],
importance=doc_info['importance'],
documentation=doc_info['documentation'],
section=self._current_section,
)
self._model.set_parser_rule(rule.name, rule)
def visitPrequelConstruct(self, ctx: Parser.PrequelConstructContext):
return None # do not recurse into this
def visitLexerRuleSpec(self, ctx: Parser.LexerRuleSpecContext):
return None # do not recurse into this
def visitModeSpec(self, ctx: Parser.ModeSpecContext):
return None # do not recurse into this
def visitRuleAltList(self, ctx: Parser.RuleAltListContext):
return self.make_alt_rule(ctx.alts)
def visitAltList(self, ctx: Parser.AltListContext):
return self.make_alt_rule(ctx.alts)
def visitLabeledAlt(self, ctx: Parser.LabeledAltContext):
return self.visit(ctx.alternative())
def visitAlternative(self, ctx: Parser.AlternativeContext):
return self.make_seq_rule(ctx.elements)
def visitParserElementLabeled(self, ctx: Parser.ParserElementLabeledContext):
return self.wrap_suffix(self.visit(ctx.value), ctx.suffix)
def visitParserElementAtom(self, ctx: Parser.ParserElementAtomContext):
return self.wrap_suffix(self.visit(ctx.value), ctx.suffix)
def visitParserElementBlock(self, ctx: Parser.ParserElementBlockContext):
return self.wrap_suffix(self.visit(ctx.value), ctx.suffix)
def visitParserElementAction(self, ctx: Parser.ParserElementActionContext):
return ParserRule.EMPTY
def visitParserInlineDoc(self, ctx: Parser.ParserInlineDocContext):
docs = load_docs(self._model, [ctx.value], False)['documentation']
return ParserRule.Doc(value='\n'.join(d[1] for d in docs))
def visitLabeledElement(self, ctx: Parser.LabeledElementContext):
return self.visit(ctx.atom() or ctx.block())
def visitBlock(self, ctx: Parser.BlockContext):
return self.visit(ctx.altList())
def visitAtomWildcard(self, ctx: Parser.AtomWildcardContext):
return ParserRule.WILDCARD
def visitTerminalRef(self, ctx: Parser.TerminalRefContext):
return ParserRule.Reference(model=self._model, name=ctx.value.text)
def visitTerminalLit(self, ctx: Parser.TerminalLitContext):
return ParserRule.Reference(model=self._model, name=ctx.value.text)
def visitRuleref(self, ctx: Parser.RulerefContext):
return ParserRule.Reference(model=self._model, name=ctx.value.text)
def visitNotElement(self, ctx: Parser.NotElementContext):
return ParserRule.Negation(child=self.visit(ctx.value))
def visitNotBlock(self, ctx: Parser.NotBlockContext):
return ParserRule.Negation(child=self.visit(ctx.value))
def visitBlockSet(self, ctx: Parser.BlockSetContext):
return self.make_alt_rule(ctx.elements)
def visitSetElementRef(self, ctx: Parser.SetElementRefContext):
return ParserRule.Reference(model=self._model, name=ctx.value.text)
def visitSetElementLit(self, ctx: Parser.SetElementLitContext):
return ParserRule.Reference(model=self._model, name=ctx.value.text)
def visitSetElementCharSet(self, ctx: Parser.SetElementCharSetContext):
# Char sets are not allowed in parser rules,
# yet our grammar can match them...
return ParserRule.EMPTY
def visitCharacterRange(self, ctx: Parser.CharacterRangeContext):
# This also makes no sense...
return ParserRule.EMPTY
def load_docs(model, tokens, allow_cmd=True):
is_doxygen_nodoc = False
is_doxygen_inline = False
is_doxygen_no_diagram = False
css_class = None
importance = 1
name = None
docs: List[Tuple[int, str]] = []
for token in tokens:
text: str = token.text
position = Position(model.get_path(), token.line + model.get_offset())
if text.startswith('//@'):
match = CMD_RE.match(text)
if match is None:
logger.error(f'{position}: WARNING: invalid command {text!r}')
continue
if not allow_cmd:
logger.error(f'{position}: WARNING: commands not allowed here')
continue
cmd = match['cmd']
if cmd == 'nodoc':
is_doxygen_nodoc = True
elif cmd == 'inline':
is_doxygen_inline = True
elif cmd == 'no-diagram':
is_doxygen_no_diagram = True
elif cmd == 'unimportant':
importance = 0
elif cmd == 'importance':
try:
val = int(match['ctx'].strip())
except ValueError:
logger.error(f'{position}: WARNING: importance requires an integer argument')
continue
if val < 0:
logger.error(f'{position}: WARNING: importance should not be negative')
importance = val
elif cmd == 'name':
name = match['ctx'].strip()
if not name:
logger.error(f'{position}: WARNING: name command requires an argument')
continue
elif cmd == 'css-class':
css_class = match['ctx'].strip()
if not name:
logger.error(f'{position}: WARNING: css-class command requires an argument')
continue
else:
logger.error(f'{position}: WARNING: unknown command {cmd!r}')
if cmd not in ['name', 'class', 'importance'] and match['ctx']:
logger.warning(f'argument for {cmd!r} command is ignored')
else:
documentation_lines = []
lines = text.splitlines()
if len(lines) == 1:
documentation_lines.append(lines[0][3:-2].strip())
else:
first_line = lines[0]
lines = lines[1:]
first_line = first_line[3:].strip()
documentation_lines.append(first_line)
lines[-1] = lines[-1][:-2].rstrip()
if not lines[-1].lstrip():
lines.pop()
if all(line.lstrip().startswith('*') for line in lines):
lines = [line.lstrip()[1:] for line in lines]
text = textwrap.dedent('\n'.join(lines))
documentation_lines.append(text)
docs.append((position.line, '\n'.join(documentation_lines)))
return dict(
importance=importance,
is_doxygen_inline=is_doxygen_inline,
is_doxygen_nodoc=is_doxygen_nodoc,
is_doxygen_no_diagram=is_doxygen_no_diagram,
css_class=css_class,
name=name,
documentation=docs
)
sphinx-a4doc-1.6.0/sphinx_a4doc/model/model.py 0000664 0000000 0000000 00000040714 14351074657 0021176 0 ustar 00root root 0000000 0000000 from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field, replace
from typing import *
try:
from typing.io import TextIO
except ImportError:
from typing import TextIO
__all__ = [
'ModelCache',
'Model',
'Position',
'RuleBase',
'ParserRule',
'LexerRule',
]
_global_cache = None
class ModelCache(metaclass=ABCMeta):
@staticmethod
def create() -> 'ModelCache':
"""
Create the default cache implementation.
"""
from sphinx_a4doc.model.impl import ModelCacheImpl
return ModelCacheImpl()
@staticmethod
def instance() -> 'ModelCache':
"""
Get a global cache instance.
"""
global _global_cache
if _global_cache is None:
_global_cache = ModelCache.create()
return _global_cache
@abstractmethod
def from_file(self, path: Union[str, Tuple[str, int]]) -> 'Model':
"""
Load model from file. If file is not found, returns an empty model.
Models are cached by absolute path.
"""
@abstractmethod
def from_text(self, text: str, path: Union[str, Tuple[str, int]] = '', imports: List['Model'] = None) -> 'Model':
"""
Load model from text.
Models are not cached; they also cannot have any imports.
Path parameter is used purely for error reporting.
"""
class Model(metaclass=ABCMeta):
@abstractmethod
def has_errors(self) -> bool:
"""
Returns true if any error occurred while parsing model.
"""
@abstractmethod
def get_type(self) -> Optional[str]:
"""
Get grammar type: lexer or parser. Returns ``None`` for mixed grammars.
"""
@abstractmethod
def get_name(self) -> Optional[str]:
"""
Get grammar name. Can be empty for in-memory models or in case of
parsing failure.
"""
@abstractmethod
def is_in_memory(self) -> bool:
"""
Indicates that this model was loaded from memory and there is no real
file associated with it.
"""
@abstractmethod
def get_path(self) -> str:
"""
Get path for the file that this model was loaded from.
If model is in-memory, returns a placeholder
suitable for error reporting.
"""
@abstractmethod
def get_model_docs(self) -> Optional[List[Tuple[int, str]]]:
"""
Get documentation that appear on top of the model.
The returned list contains one item per documentation comment.
The first element of this item is a line number at which the comment
started, the second element is the comment itself.
"""
@abstractmethod
def lookup_local(self, name: str) -> Optional['RuleBase']:
"""
Lookup symbol with the given name.
Imported models are not checked.
"""
def lookup(self, name: str) -> Optional['RuleBase']:
"""
Lookup symbol with the given name.
Check symbols in the model first, than check imported models.
To lookup literal tokens, pass contents of the literal,
e.g. `model.lookup("'literal'")`.
Returns `None` if symbol cannot be found.
If there are duplicate symbols, it is unspecified which one is returned.
"""
models = set()
visited = set()
models.add(self)
while models:
model = models.pop()
if model in visited:
continue
symbol = model.lookup_local(name)
if symbol is not None:
return symbol
models.update(model.get_imports())
visited.add(model)
return None
@abstractmethod
def get_imports(self) -> Iterable['Model']:
"""
Get all imported models.
No order of iteration is specified.
Note: cyclic imports are allowed in the model.
"""
@abstractmethod
def get_terminals(self) -> Iterable['LexerRule']:
"""
Get all terminals (including fragments) declared in this model.
Terminals declared in imported models are not included.
No order of iteration is specified, sort by position
must be performed manually.
"""
@abstractmethod
def get_non_terminals(self) -> Iterable['ParserRule']:
"""
Get all non-terminals (parser rules) declared in this model.
Non-terminals declared in imported models are not included.
No order of iteration is specified, sort by position
must be performed manually.
"""
@dataclass(order=True, frozen=True)
class Position:
file: str
"""Absolute path to the file in which this rule is declared"""
line: int
"""Line at which this rule is declared"""
def as_tuple(self):
return self.file, self.line
def __repr__(self):
return 'Position({!r}, {!r})'.format(self.file, self.line)
def __str__(self):
return '{}:{}'.format(self.file, self.line)
def meta(**kwargs):
"""
Decorator that sets meta for the given AST node.
"""
def wrapper(cls: 'RuleBase.RuleContent'):
cls.__meta__ = replace(cls.__meta__, **kwargs)
return cls
return wrapper
@dataclass(eq=False, frozen=True)
class Section:
"""
Represents a single section header, i.e. a group of comments that start
with a triple slash.
"""
docs: List[Tuple[int, str]]
"""List of documentation lines in the section description"""
@dataclass(eq=False, frozen=True)
class RuleBase:
"""
Base class for parser and lexer rules.
"""
name: str
"""Name of this parser rule"""
display_name: Optional[str]
"""Display name from `doc:name` command"""
model: Model
"""Reference to the model in which this rule was declared"""
position: Position
"""A position at which this rule is declared"""
content: Optional['RuleBase.RuleContent']
"""Body of the token or rule definition.
May be omitted for implicitly declared tokens or tokens that were declared
in the `tokens` section of a lexer.
"""
is_doxygen_nodoc: bool
"""Indicates that the ``'nodoc'`` flag is set for this rule.
If true, generators should not output any content for this rule.
"""
is_doxygen_no_diagram: bool
"""Indicates that the ``'no_diagram'`` flag is set.
If true, generators should not produce railroad diagram for this rule.
"""
css_class: Optional[str]
"""Custom css class set via `//@ doc:css_class`.
"""
is_doxygen_inline: bool
"""Indicates that the `'inline'` flag is set for this rule.
If true, generators should not output any content for this rule.
They should also inline contents of this rule when rendering
documentation for any other rule that refers this rule.
"""
importance: int
"""Importance of the rule"""
documentation: Optional[List[Tuple[int, str]]]
"""Documentation for this rule"""
section: Optional[Section]
"""Which section this rule belong to?"""
def __str__(self):
lines = [self.name]
if self.content is None:
lines.append(' ')
else:
if isinstance(self.content, self.Alternative):
alts = self.content.children
else:
alts = self.content,
for i, alt in enumerate(alts):
if i == 0:
lines.append(' : ' + str(alt))
else:
lines.append(' | ' + str(alt))
lines.append(' ;')
return '\n'.join(lines)
class RuleContent:
"""
Base class for AST nodes that form lexer and parser rules.
"""
@dataclass(frozen=True)
class Meta:
precedence: int = 0
visitor_relay: str = 'visit_default'
formatter: Callable = field(default=lambda x, _: repr(x))
__meta__ = Meta()
def __str__(self):
p = self.__meta__.precedence
return self.__meta__.formatter(
self,
lambda x: f'{x}' if x.__meta__.precedence > p else f'({x})'
)
@dataclass(frozen=True)
@meta(visitor_relay='visit_reference')
@meta(precedence=4, formatter=lambda x, f: f'{x.name}')
class Reference(RuleContent):
"""
Refers another parser or lexer rule.
"""
model: Model
"""Reference to the model in which the rule is used"""
name: str
"""Referenced rule name"""
def get_reference(self) -> Optional['RuleBase']:
"""
Lookup and return the actual rule class.
Returns None if reference is invalid.
"""
return self.model.lookup(self.name)
@dataclass(frozen=True)
@meta(visitor_relay='visit_doc')
@meta(precedence=4, formatter=lambda x, f: f'/** {x.value} */')
class Doc(RuleContent):
"""
Inline documentation.
"""
value: str
@dataclass(frozen=True)
@meta(visitor_relay='visit_wildcard')
@meta(precedence=4, formatter=lambda x, f: f'.')
class Wildcard(RuleContent):
"""
Matches any token.
"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_negation')
@meta(precedence=3, formatter=lambda x, f: f'~{f(x.child)}')
class Negation(RuleContent):
"""
Matches anything but the child rules.
"""
child: 'RuleBase.RuleContent'
"""Rules that will be negated"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_zero_plus')
@meta(precedence=3, formatter=lambda x, f: f'{f(x.child)}*')
class ZeroPlus(RuleContent):
"""
Matches the child zero or more times.
"""
child: 'RuleBase.RuleContent'
"""Rule which will be parsed zero or more times"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_one_plus')
@meta(precedence=3, formatter=lambda x, f: f'{f(x.child)}+')
class OnePlus(RuleContent):
"""
Matches the child one or more times.
"""
child: 'RuleBase.RuleContent'
"""Rule which will be parsed one or more times"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_maybe')
@meta(precedence=3, formatter=lambda x, f: f'{f(x.child)}?')
class Maybe(RuleContent):
"""
Matches child or nothing.
"""
child: 'RuleBase.RuleContent'
"""Rule which will be parsed"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_sequence')
@meta(precedence=1, formatter=lambda x, f: ' '.join(map(f, x.children)))
class Sequence(RuleContent):
"""
Matches a sequence of elements.
"""
children: Tuple['RuleBase.RuleContent', ...]
"""Children rules that will be parsed in order"""
linebreaks: Optional[Tuple[bool, ...]] = field(
default=None, compare=False, repr=False)
"""Bitmask which describes where it is preferable to wrap sequence"""
def __post_init__(self):
assert self.linebreaks is None or \
len(self.linebreaks) == len(self.children)
def get_linebreaks(self):
if self.linebreaks is not None:
return self.linebreaks
else:
return tuple([False] * len(self.children))
@dataclass(frozen=True)
@meta(visitor_relay='visit_alternative')
@meta(precedence=0, formatter=lambda x, f: ' | '.join(map(f, x.children)))
class Alternative(RuleContent):
"""
Matches either of children.
"""
children: Tuple['RuleBase.RuleContent', ...]
"""Children rules"""
@dataclass(eq=False, frozen=True)
class LexerRule(RuleBase):
content: Optional['LexerRule.RuleContent']
is_literal: bool
"""Indicates that this token is a literal token.
Literal tokens are tokens with a single fixed-string literal element.
"""
is_fragment: bool
"""Indicates that this rule is a fragment"""
@dataclass(frozen=True)
class RuleContent(RuleBase.RuleContent):
"""
Lexer rule definition syntax tree node.
"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_literal')
@meta(precedence=4, formatter=lambda x, f: f'{x.content}')
class Literal(RuleContent):
"""
A sequence of symbols (e.g. `'kwd'`).
"""
content: str
"""Formatted content of the literal, with special symbols escaped"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_range')
@meta(precedence=4, formatter=lambda x, f: f'{x.start}..{x.end}')
class Range(RuleContent):
"""
A range of symbols (e.g. `a..b`).
"""
start: str
"""Range first symbol"""
end: str
"""Range last symbol"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_charset')
@meta(precedence=4, formatter=lambda x, f: f'{x.content}')
class CharSet(RuleContent):
"""
A character set (e.g. `[a-zA-Z]`).
"""
content: str
"""Character set description, bracks included"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_reference')
class Reference(RuleContent, RuleBase.Reference):
def get_reference(self) -> Optional['LexerRule']:
rule = super().get_reference()
if rule is not None:
assert isinstance(rule, LexerRule)
return rule
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_doc')
class Doc(RuleContent, RuleBase.Doc):
pass
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_wildcard')
class Wildcard(RuleContent, RuleBase.Wildcard):
pass
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_negation')
class Negation(RuleContent, RuleBase.Negation):
child: 'LexerRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_zero_plus')
class ZeroPlus(RuleContent, RuleBase.ZeroPlus):
child: 'LexerRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_one_plus')
class OnePlus(RuleContent, RuleBase.OnePlus):
child: 'LexerRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_maybe')
class Maybe(RuleContent, RuleBase.Maybe):
child: 'LexerRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_sequence')
class Sequence(RuleContent, RuleBase.Sequence):
children: Tuple['LexerRule.RuleContent', ...]
@dataclass(frozen=True)
@meta(visitor_relay='visit_lexer_alternative')
class Alternative(RuleContent, RuleBase.Alternative):
children: Tuple['LexerRule.RuleContent', ...]
WILDCARD = Wildcard()
EMPTY = Sequence(())
@dataclass(eq=False, frozen=True)
class ParserRule(RuleBase):
content: Optional['ParserRule.RuleContent']
@dataclass(frozen=True)
class RuleContent(RuleBase.RuleContent):
"""
Parser rule definition syntax tree node.
"""
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_reference')
class Reference(RuleContent, RuleBase.Reference):
pass
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_doc')
class Doc(RuleContent, RuleBase.Doc):
pass
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_wildcard')
class Wildcard(RuleContent, RuleBase.Wildcard):
pass
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_negation')
class Negation(RuleContent, RuleBase.Negation):
child: 'ParserRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_zero_plus')
class ZeroPlus(RuleContent, RuleBase.ZeroPlus):
child: 'ParserRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_one_plus')
class OnePlus(RuleContent, RuleBase.OnePlus):
child: 'ParserRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_maybe')
class Maybe(RuleContent, RuleBase.Maybe):
child: 'ParserRule.RuleContent'
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_sequence')
class Sequence(RuleContent, RuleBase.Sequence):
children: Tuple['ParserRule.RuleContent', ...]
@dataclass(frozen=True)
@meta(visitor_relay='visit_parser_alternative')
class Alternative(RuleContent, RuleBase.Alternative):
children: Tuple['ParserRule.RuleContent', ...]
WILDCARD = Wildcard()
EMPTY = Sequence(())
sphinx-a4doc-1.6.0/sphinx_a4doc/model/model_renderer.py 0000664 0000000 0000000 00000023173 14351074657 0023064 0 ustar 00root root 0000000 0000000 from typing import *
import re
from sphinx_a4doc.model.model import RuleBase, LexerRule, ParserRule
from sphinx_a4doc.model.visitor import *
from sphinx_a4doc.settings import LiteralRendering
def cc_to_dash(name: str) -> str:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
class ImportanceProvider(CachedRuleContentVisitor[int]):
"""
Given a rule content item, calculates its importance.
"""
def visit_literal(self, r: LexerRule.Literal) -> int:
return 1
def visit_range(self, r: LexerRule.Range) -> int:
return 1
def visit_charset(self, r: LexerRule.CharSet) -> int:
return 1
def visit_reference(self, r: RuleBase.Reference) -> int:
rule = r.get_reference()
if rule is None:
return 1
else:
return rule.importance
def visit_doc(self, r: RuleBase.Doc) -> int:
return 0
def visit_wildcard(self, r: RuleBase.Wildcard) -> int:
return 1
def visit_negation(self, r: RuleBase.Negation) -> int:
return self.visit(r.child)
def visit_zero_plus(self, r: RuleBase.ZeroPlus) -> int:
return self.visit(r.child)
def visit_one_plus(self, r: RuleBase.OnePlus) -> int:
return self.visit(r.child)
def visit_maybe(self, r: RuleBase.Maybe) -> int:
return self.visit(r.child)
def visit_sequence(self, r: RuleBase.Sequence) -> int:
return max(self.visit(c) for c in r.children)
def visit_alternative(self, r: RuleBase.Alternative) -> int:
return max(self.visit(c) for c in r.children)
class Renderer(CachedRuleContentVisitor[dict]):
def __init__(
self,
literal_rendering: LiteralRendering = LiteralRendering.CONTENTS_UNQUOTED,
do_cc_to_dash: bool = False,
importance_provider: ImportanceProvider = ImportanceProvider()
):
super().__init__()
self._do_cc_to_dash = do_cc_to_dash
self.literal_rendering = literal_rendering
self.importance_provider = importance_provider
@staticmethod
def _sequence(*items, linebreaks):
return dict(sequence=items, autowrap=True, linebreaks=linebreaks)
@staticmethod
def _stack(*items):
return dict(stack=items)
@staticmethod
def _choice(*items, default: int = 0):
return dict(choice=items, default=default)
@staticmethod
def _optional(item, skip: bool = False):
return dict(optional=item, skip=skip)
@staticmethod
def _one_or_more(item, repeat=None):
return dict(one_or_more=item, repeat=repeat)
@staticmethod
def _zero_or_more(item, repeat=None):
return dict(zero_or_more=item, repeat=repeat)
@staticmethod
def _terminal(text: str, href: Optional[str]=None, resolve: bool = True, title_is_weak: bool = False, css_class: Optional[str] = None):
return dict(terminal=text, href=href, resolve=resolve, title_is_weak=title_is_weak, css_class=css_class)
@staticmethod
def _non_terminal(text: str, href: Optional[str]=None, resolve: bool = True, title_is_weak: bool = False, css_class: Optional[str] = None):
return dict(non_terminal=text, href=href, resolve=resolve, title_is_weak=title_is_weak, css_class=css_class)
@staticmethod
def _comment(text: str, href: Optional[str]=None):
return dict(comment=text, href=href)
@staticmethod
def _literal(text: str):
return dict(literal=text)
@staticmethod
def _range(text: str):
return dict(range=text)
@staticmethod
def _charset(text: str):
return dict(charset=text)
@staticmethod
def _wildcard(text: str):
return dict(wildcard=text)
@staticmethod
def _negation(text: str):
return dict(negation=text)
@staticmethod
def _skip():
return None
def visit_literal(self, r: LexerRule.Literal):
return self._literal(r.content)
def visit_range(self, r: LexerRule.Range):
return self._range(f'{r.start}..{r.end}')
def visit_charset(self, r: LexerRule.CharSet):
return self._charset(r.content)
def visit_reference(self, r: RuleBase.Reference):
rule = r.get_reference()
if rule is None:
if r.name and (r.name[0].isupper() or r.name.startswith('\'')):
if r.name.startswith('\'') and r.name.endswith('\''):
if self.literal_rendering is LiteralRendering.CONTENTS_UNQUOTED:
name = r.name[1:-1]
else:
name = r.name
else:
name = self._cc_to_dash(r.name)
return self._terminal(name)
else:
return self._non_terminal(self._cc_to_dash(r.name))
elif rule.is_doxygen_inline and rule.content is not None:
return self.visit(rule.content)
elif isinstance(rule, LexerRule):
path = f'{rule.model.get_name()}.{rule.name}'
if rule.is_literal and self.literal_rendering is not LiteralRendering.NAME:
literal = str(rule.content)
if self.literal_rendering is LiteralRendering.CONTENTS_UNQUOTED:
literal = literal[1:-1]
return self._terminal(literal, path, css_class=rule.css_class)
else:
name = rule.display_name or self._cc_to_dash(rule.name)
return self._terminal(name, path, title_is_weak=True, css_class=rule.css_class)
elif isinstance(rule, ParserRule):
return self._non_terminal(
rule.display_name or self._cc_to_dash(rule.name),
f'{rule.model.get_name()}.{rule.name}',
title_is_weak=True,
css_class=rule.css_class)
else:
assert False
def visit_doc(self, r: RuleBase.Doc):
return self._comment(r.value)
def visit_wildcard(self, r: RuleBase.Wildcard):
return self._wildcard('.')
def visit_negation(self, r: RuleBase.Negation):
return self._negation(str(r))
def visit_zero_plus(self, r: RuleBase.ZeroPlus):
skip = not self.importance_provider.visit(r.child)
return self._optional(self._one_or_more(self.visit(r.child)), skip=skip)
def visit_one_plus(self, r: RuleBase.OnePlus):
return self._one_or_more(self.visit(r.child))
def visit_maybe(self, r: RuleBase.Maybe):
if (
isinstance(r.child, RuleBase.Alternative) and
len(r.child.children) == 2 and
self.importance_provider.visit(r.child.children[0]) ==
self.importance_provider.visit(r.child.children[1])
):
return self._choice(
self.visit(r.child.children[0]),
self._skip(),
self.visit(r.child.children[1]),
default=1,
)
skip = not self.importance_provider.visit(r.child)
return self._optional(self.visit(r.child), skip=skip)
def visit_sequence(self, r: RuleBase.Sequence):
return self._optimize_sequence(list(r.children),
list(r.get_linebreaks()))
def visit_alternative(self, r: RuleBase.Alternative):
default = max(enumerate(r.children),
key=lambda x: self.importance_provider.visit(x[1]))[0]
return self._choice(*[self.visit(c) for c in r.children], default=default)
def _optimize_sequence(self, seq: List[RuleBase.RuleContent], lb: List[bool]):
assert len(seq) == len(lb)
# We are trying to find a sub-sequence of form `x y z (A B x y z)*`
# and replace it with a single 'OneOrMore(Seq(x, y, z), Seq(A, B))'.
for i in range(len(seq) - 1, -1, -1):
# Our ZeroPlus rule with a sequence inside:
star = seq[i]
if not isinstance(star, RuleBase.ZeroPlus):
continue
if not isinstance(star.child, RuleBase.Sequence):
continue
nested_seq = list(star.child.children)
nested_seq_lb = list(star.child.get_linebreaks())
for j in range(len(nested_seq) - 1, -1, -1):
k = i + j - len(nested_seq)
if k < 0 or seq[k] != nested_seq[j]:
# Index of the seq after which our sub-sequence start
# (e.g. 0 if the first element of our sub-sequence
# is the first element of the sequence):
seq_start = k + 1
# Index of the nested_seq which splits main part
# and the repeat part (e.g. for [A, B, x, y, z]
# the index is 2):
nested_seq_start = j + 1
break
else:
seq_start = i - len(nested_seq)
nested_seq_start = 0
if seq_start == i:
# matched no elements from the nested sequence
continue
repeat = self._optimize_sequence(nested_seq[:nested_seq_start],
nested_seq_lb[:nested_seq_start])
main = self._optimize_sequence(nested_seq[nested_seq_start:],
nested_seq_lb[nested_seq_start:])
item = self._one_or_more(main, repeat)
seq[seq_start:i + 1] = [item]
lb[seq_start:i + 1] = [any(lb[seq_start:i + 1])]
return self._optimize_sequence(seq, lb)
return self._sequence(*[
e if isinstance(e, dict) else self.visit(e) for e in seq
], linebreaks=lb)
def _cc_to_dash(self, name):
if self._do_cc_to_dash:
return cc_to_dash(name)
else:
return name
sphinx-a4doc-1.6.0/sphinx_a4doc/model/reachable_finder.py 0000664 0000000 0000000 00000003451 14351074657 0023330 0 ustar 00root root 0000000 0000000 from sphinx_a4doc.model.model import RuleBase
from sphinx_a4doc.model.visitor import RuleContentVisitor, T
from typing import *
class _ReachableFiner(RuleContentVisitor[Set[RuleBase]]):
def __init__(self):
super().__init__()
self._seen = set()
def visit_literal(self, r) -> Set[RuleBase]:
return set()
def visit_range(self, r) -> Set[RuleBase]:
return set()
def visit_charset(self, r) -> Set[RuleBase]:
return set()
def visit_reference(self, r: RuleBase.Reference) -> Set[RuleBase]:
ref = r.get_reference()
if ref is None:
return set()
elif ref in self._seen:
return set()
else:
self._seen.add(ref)
return {ref} | self.visit(ref.content)
def visit_doc(self, r: RuleBase.Doc) -> T:
return set()
def visit_wildcard(self, r: RuleBase.Wildcard) -> Set[RuleBase]:
return set()
def visit_negation(self, r: RuleBase.Negation) -> Set[RuleBase]:
return self.visit(r.child)
def visit_zero_plus(self, r: RuleBase.ZeroPlus) -> Set[RuleBase]:
return self.visit(r.child)
def visit_one_plus(self, r: RuleBase.OnePlus) -> Set[RuleBase]:
return self.visit(r.child)
def visit_maybe(self, r: RuleBase.Maybe) -> Set[RuleBase]:
return self.visit(r.child)
def visit_sequence(self, r: RuleBase.Sequence) -> Set[RuleBase]:
return set().union(*[self.visit(c) for c in r.children])
def visit_alternative(self, r: RuleBase.Alternative) -> Set[RuleBase]:
return set().union(*[self.visit(c) for c in r.children])
def find_reachable_rules(r: RuleBase) -> Set[RuleBase]:
"""
Calculates a set of rules that are reachable from the root rule.
"""
return {r} | _ReachableFiner().visit(r.content)
sphinx-a4doc-1.6.0/sphinx_a4doc/model/visitor.py 0000664 0000000 0000000 00000010155 14351074657 0021571 0 ustar 00root root 0000000 0000000 from weakref import WeakKeyDictionary
from sphinx_a4doc.model.model import RuleBase, LexerRule, ParserRule
from typing import *
__all__ = [
'RuleContentVisitor',
'CachedRuleContentVisitor',
]
T = TypeVar('T')
class RuleContentVisitor(Generic[T]):
"""
Generic visitor for rule contents.
"""
def visit(self, r: RuleBase.RuleContent) -> T:
return getattr(self, r.__meta__.visitor_relay, self.visit_default)(r)
def visit_default(self, r: RuleBase.RuleContent) -> T:
raise RuntimeError(f'no visitor for {r.__class__.__name__!r}')
# Lexer
def visit_lexer_literal(self, r: LexerRule.Literal) -> T:
return self.visit_literal(r)
def visit_lexer_range(self, r: LexerRule.Range) -> T:
return self.visit_range(r)
def visit_lexer_charset(self, r: LexerRule.CharSet) -> T:
return self.visit_charset(r)
def visit_lexer_reference(self, r: LexerRule.Reference) -> T:
return self.visit_reference(r)
def visit_lexer_doc(self, r: LexerRule.Doc) -> T:
return self.visit_doc(r)
def visit_lexer_wildcard(self, r: LexerRule.Wildcard) -> T:
return self.visit_wildcard(r)
def visit_lexer_negation(self, r: LexerRule.Negation) -> T:
return self.visit_negation(r)
def visit_lexer_zero_plus(self, r: LexerRule.ZeroPlus) -> T:
return self.visit_zero_plus(r)
def visit_lexer_one_plus(self, r: LexerRule.OnePlus) -> T:
return self.visit_one_plus(r)
def visit_lexer_maybe(self, r: LexerRule.Maybe) -> T:
return self.visit_maybe(r)
def visit_lexer_sequence(self, r: LexerRule.Sequence) -> T:
return self.visit_sequence(r)
def visit_lexer_alternative(self, r: LexerRule.Alternative) -> T:
return self.visit_alternative(r)
# Parser
def visit_parser_reference(self, r: ParserRule.Reference) -> T:
return self.visit_reference(r)
def visit_parser_doc(self, r: ParserRule.Doc) -> T:
return self.visit_doc(r)
def visit_parser_wildcard(self, r: ParserRule.Wildcard) -> T:
return self.visit_wildcard(r)
def visit_parser_negation(self, r: ParserRule.Negation) -> T:
return self.visit_negation(r)
def visit_parser_zero_plus(self, r: ParserRule.ZeroPlus) -> T:
return self.visit_zero_plus(r)
def visit_parser_one_plus(self, r: ParserRule.OnePlus) -> T:
return self.visit_one_plus(r)
def visit_parser_maybe(self, r: ParserRule.Maybe) -> T:
return self.visit_maybe(r)
def visit_parser_sequence(self, r: ParserRule.Sequence) -> T:
return self.visit_sequence(r)
def visit_parser_alternative(self, r: ParserRule.Alternative) -> T:
return self.visit_alternative(r)
# Common
def visit_literal(self, r: LexerRule.Literal) -> T:
return self.visit_default(r)
def visit_range(self, r: LexerRule.Range) -> T:
return self.visit_default(r)
def visit_charset(self, r: LexerRule.CharSet) -> T:
return self.visit_default(r)
def visit_reference(self, r: RuleBase.Reference) -> T:
return self.visit_default(r)
def visit_doc(self, r: RuleBase.Doc) -> T:
return self.visit_default(r)
def visit_wildcard(self, r: RuleBase.Wildcard) -> T:
return self.visit_default(r)
def visit_negation(self, r: RuleBase.Negation) -> T:
return self.visit_default(r)
def visit_zero_plus(self, r: RuleBase.ZeroPlus) -> T:
return self.visit_default(r)
def visit_one_plus(self, r: RuleBase.OnePlus) -> T:
return self.visit_default(r)
def visit_maybe(self, r: RuleBase.Maybe) -> T:
return self.visit_default(r)
def visit_sequence(self, r: RuleBase.Sequence) -> T:
return self.visit_default(r)
def visit_alternative(self, r: RuleBase.Alternative) -> T:
return self.visit_default(r)
class CachedRuleContentVisitor(RuleContentVisitor[T]):
def __init__(self):
self._cache: Dict[RuleBase.RuleContent, T] = WeakKeyDictionary()
def visit(self, r: RuleBase.RuleContent) -> T:
if r not in self._cache:
self._cache[r] = super().visit(r)
return self._cache[r]
sphinx-a4doc-1.6.0/sphinx_a4doc/settings.py 0000664 0000000 0000000 00000030013 14351074657 0020625 0 ustar 00root root 0000000 0000000 from dataclasses import dataclass, field
from enum import Enum
from sphinx_a4doc.contrib.configurator import Namespace
from typing import *
class InternalAlignment(Enum):
"""
Controls how to align nodes within a single railroad.
See `DiagramSettings.internal_alignment` for documentation on elements.
"""
CENTER = 'CENTER'
LEFT = 'LEFT'
RIGHT = 'RIGHT'
AUTO_LEFT = 'AUTO_LEFT'
AUTO_RIGHT = 'AUTO_RIGHT'
class EndClass(Enum):
"""
Controls how diagram start and end look like.
See `DiagramSettings.end_class` for documentation on elements.
"""
SIMPLE = 'SIMPLE'
COMPLEX = 'COMPLEX'
class GrammarType(Enum):
"""
Antlr4 grammar types.
"""
MIXED = 'MIXED'
LEXER = 'LEXER'
PARSER = 'PARSER'
class OrderSettings(Enum):
"""
Controls how autodoc orders rules that are extracted from sources.
"""
BY_SOURCE = 'BY_SOURCE'
"""
Order by position in source file.
"""
BY_NAME = 'BY_NAME'
"""
Order by human-readable name.
"""
class GroupingSettings(Enum):
"""
Controls how autodoc groups rules that are extracted from sources.
"""
MIXED = 'MIXED'
"""
Rules are not ordered.
"""
LEXER_FIRST = 'LEXER_FIRST'
"""
Lexer rules go first.
"""
PARSER_FIRST = 'PARSER_FIRST'
"""
Parser rules go first.
"""
class LiteralRendering(Enum):
"""
Controls how literal rules are rendered.
"""
NAME = 'NAME'
"""
Name of the rule is displayed.
"""
CONTENTS = 'CONTENTS'
"""
Contents of the rule are displayed.
"""
CONTENTS_UNQUOTED = 'CONTENTS_UNQUOTED'
"""
Contents of the rule are displayed, single quotes are stripped away.
"""
@dataclass(frozen=True)
class DiagramSettings:
"""
Settings for diagram directive.
"""
padding: Tuple[int, int, int, int] = (1, 1, 1, 1)
"""
Array of four positive integers denoting top, right, bottom and left
padding between the diagram and its container. By default, there is 1px
of padding on each side.
"""
vertical_separation: int = 8
"""
Vertical space between diagram lines.
"""
horizontal_separation: int = 10
"""
Horizontal space between items within a sequence.
"""
arc_radius: int = 10
"""
Arc radius of railroads. 10px by default.
"""
translate_half_pixel: bool = False
"""
If enabled, the diagram will be translated half-pixel in both directions.
May be used to deal with anti-aliasing issues when using odd stroke widths.
"""
internal_alignment: InternalAlignment = InternalAlignment.AUTO_LEFT
"""
Determines how nodes aligned within a single diagram line. Available
options are:
- ``center`` -- nodes are centered.
.. parser-rule-diagram:: (A B | C D E) (',' (A B | C D E))*
:internal-alignment: CENTER
- ``left`` -- nodes are flushed to left in all cases.
.. parser-rule-diagram:: (A B | C D E) (',' (A B | C D E))*
:internal-alignment: LEFT
- ``right`` -- nodes are flushed to right in all cases.
.. parser-rule-diagram:: (A B | C D E) (',' (A B | C D E))*
:internal-alignment: RIGHT
- ``auto_left`` -- nodes in choice groups are flushed left,
all other nodes are centered.
.. parser-rule-diagram:: (A B | C D E) (',' (A B | C D E))*
:internal-alignment: AUTO_LEFT
- ``auto_right`` -- nodes in choice groups are flushed right,
all other nodes are centered.
.. parser-rule-diagram:: (A B | C D E) (',' (A B | C D E))*
:internal-alignment: AUTO_RIGHT
"""
character_advance: float = 8.4
"""
Average length of one character in the used font. Since SVG elements
cannot expand and shrink dynamically, length of text nodes is calculated
as number of symbols multiplied by this constant.
"""
end_class: EndClass = EndClass.SIMPLE
"""
Controls how diagram start and end look like. Available options are:
- ``simple`` -- a simple ``T``-shaped ending.
.. parser-rule-diagram:: X
:end-class: SIMPLE
- ``complex`` -- a ``T``-shaped ending with vertical line doubled.
.. parser-rule-diagram:: X
:end-class: COMPLEX
"""
max_width: int = 500
"""
Max width after which a sequence will be wrapped. This option is used to
automatically convert sequences to stacks. Note that this is a suggestive
option, there is no guarantee that the diagram will
fit to its ``max_width``.
"""
literal_rendering: LiteralRendering = LiteralRendering.CONTENTS_UNQUOTED
"""
Controls how literal rules (i.e. lexer rules that only consist of one
string) are rendered. Available options are:
- ``name`` -- only name of the literal rule is displayed.
- ``contents`` -- quoted literal string is displayed.
.. parser-rule-diagram:: 'def' Id
:literal-rendering: contents
- ``contents-unquoted``: -- literal string is displayed, quotes stripped
away.
.. parser-rule-diagram:: 'def' Id
:literal-rendering: contents-unquoted
"""
cc_to_dash: bool = False
"""
If rule have no human-readable name set, convert its name from
``CamelCase`` to ``dash-case``.
"""
alt: Optional[str] = None
"""
If rendering engine does not support output of contents, specified
string is used alternatively.
"""
@dataclass(frozen=True)
class GrammarSettings:
"""
Settings for grammar directive.
"""
name: Optional[str] = field(default=None, metadata=dict(no_global=True))
"""
Specifies a human-readable name for the grammar.
If given, the human-readable name will be rendered instead of the primary
grammar name. It will also replace the primary name in all cross references.
For example this code:
.. code-block:: rst
.. a4:grammar:: PrimaryName
:name: Human-readable name
will render the next grammar description:
.. highlights::
.. a4:grammar:: PrimaryName
:noindex:
:name: Human-readable name
"""
type: GrammarType = field(default=GrammarType.MIXED, metadata=dict(no_global=True))
"""
Specifies a grammar type. The type will be displayed in the grammar
signature.
For example these three grammars:
.. code-block:: rst
.. a4:grammar:: Grammar1
.. a4:grammar:: Grammar2
:type: lexer
.. a4:grammar:: Grammar3
:type: parser
will be rendered differently:
.. highlights::
.. a4:grammar:: Grammar1
:noindex:
.. a4:grammar:: Grammar2
:noindex:
:type: lexer
.. a4:grammar:: Grammar3
:noindex:
:type: parser
"""
imports: List[str] = field(default_factory=list, metadata=dict(no_global=True))
"""
Specifies a list of imported grammars.
This option affects name resolution process for rule cross-references.
That is, if there is a reference to ``grammar.rule`` and there is no
``rule`` found in the ``grammar``, the imported grammars will be searched
as well.
Note that this setting is not passed through intersphinx.
"""
@dataclass(frozen=True)
class RuleSettings:
"""
Settings for rule directive.
"""
name: Optional[str] = field(default=None, metadata=dict(no_global=True))
"""
Specifies a human-readable name for this rule. Refer to the corresponding
:rst:opt:`a4:grammar `'s option for more info.
"""
@dataclass(frozen=True)
class AutogrammarSettings(GrammarSettings):
"""
Settings for autogrammar directive.
"""
only_reachable_from: Optional[str] = field(default=None, metadata=dict(no_global=True, rebuild=True))
"""
If given, autodoc will only render rules that are reachable from this root.
This is useful to exclude rules from imported grammars that are not used
by the primary grammar.
The value should be either name of a rule from the grammar that's being
documented or a full path which includes grammar name and rule name.
For example, suppose there's ``Lexer.g4`` and ``Parser.g4``. To filter
lexer rules that are not used by parser grammar, use:
.. code-block:: rst
.. a4:autogrammar:: Parser
:only-reachable-from: Parser.root
.. a4:autogrammar:: Lexer
:only-reachable-from: Parser.root
"""
mark_root_rule: bool = field(default=True, metadata=dict(rebuild=True))
"""
If enabled, automatic diagram for the rule that's listed in
:rst:opt:`only-reachable-from` will use complex line endings
(see the :rst:opt:`end-class ` option
of the :rst:dir:`railroad-diagram` directive).
"""
lexer_rules: bool = field(default=True, metadata=dict(rebuild=True))
"""
Controls whether lexer rules should appear in documentation.
Enabled by default.
"""
parser_rules: bool = field(default=True, metadata=dict(rebuild=True))
"""
Controls whether parser rules should appear in documentation.
Enabled by default.
"""
fragments: bool = field(default=False, metadata=dict(rebuild=True))
"""
Controls whether fragments should appear in documentation.
Disabled by default.
"""
undocumented: bool = field(default=False, metadata=dict(rebuild=True))
"""
Controls whether undocumented rules should appear in documentation.
Disabled by default.
"""
grouping: GroupingSettings = field(default=GroupingSettings.MIXED, metadata=dict(rebuild=True))
"""
Controls how autodoc groups rules that are extracted from sources.
- ``mixed`` -- there's one group that contain all rules.
- ``lexer-first`` -- there are two group: one for parser rules and one for
lexer rules and fragments. Lexer group goes first.
- ``parser-first`` -- like ``lexer-first``, but parser group preceeds
lexer group.
"""
ordering: OrderSettings = field(default=OrderSettings.BY_SOURCE, metadata=dict(rebuild=True))
"""
Controls how autodoc orders rules within each group
(see :rst:opt:`grouping` option).
- ``by-source`` -- rules are ordered as they appear in the grammar file.
- ``by-name`` -- rules are ordered lexicographically.
"""
honor_sections: bool = field(default=True, metadata=dict(rebuild=True))
"""
If true, render comments that start with a triple slash, treating them
as paragraphs that placed between rules.
This setting has no effect unless :rst:opt:`ordering` is ``by-source``.
.. versionadded:: 1.2.0
"""
cc_to_dash: bool = False
"""
For rules without explicit human-readable names, generate ones by converting
rule name from ``CamelCase`` to ``dash-case``.
Setting this option will also set the ``diagram-cc-to-dash`` option, unless
the latter is specified explicitly.
"""
@dataclass(frozen=True)
class AutoruleSettings(GrammarSettings):
"""
Settings for autorule directive.
.. versionadded:: 1.2.0
"""
@dataclass(frozen=True)
class GlobalSettings:
"""
Global A4Doc settings. Each member of this dataclass will be added
to the global sphinx settings registry with prefix ``a4_``.
"""
base_path: str = field(default='.', metadata=dict(rebuild=True))
"""
Path which autodoc searches for grammar files.
"""
diagram_namespace = Namespace('a4_diagram', DiagramSettings)
grammar_namespace = Namespace('a4_grammar', GrammarSettings)
rule_namespace = Namespace('a4_rule', RuleSettings)
autogrammar_namespace = Namespace('a4_autogrammar', AutogrammarSettings)
autorule_namespace = Namespace('a4_autorule', AutoruleSettings)
global_namespace = Namespace('a4', GlobalSettings)
def register_settings(app):
diagram_namespace.register_settings(app)
grammar_namespace.register_settings(app)
rule_namespace.register_settings(app)
autogrammar_namespace.register_settings(app)
autorule_namespace.register_settings(app)
global_namespace.register_settings(app)
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/ 0000775 0000000 0000000 00000000000 14351074657 0017744 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/ANTLRv4Lexer.g4 0000664 0000000 0000000 00000017241 14351074657 0022337 0 ustar 00root root 0000000 0000000 lexer grammar ANTLRv4Lexer;
options {
superClass = LexerAdaptor ;
}
import LexBasic; // Standard set of fragments
@header {
from sphinx_a4doc.syntax.lexer_adaptor import LexerAdaptor
}
tokens {
TOKEN_REF,
RULE_REF,
LEXER_CHAR_SET
}
channels {
OFF_CHANNEL // non-default channel for whitespace and comments
}
// ======================================================
// Lexer specification
//
// -------------------------
// Comments
DOC_COMMENT
: DocComment
;
HEADER
: '///' ~[\r\n]*
;
BLOCK_COMMENT
: BlockComment -> channel(OFF_CHANNEL)
;
LINE_COMMENT
: LineComment -> channel(OFF_CHANNEL)
;
// -------------------------
// Integer
//
INT : DecimalNumeral
;
// -------------------------
// Literal string
//
// ANTLR makes no distinction between a single character literal and a
// multi-character string. All literals are single quote delimited and
// may contain unicode escape sequences of the form \uxxxx, where x
// is a valid hexadecimal number (per Unicode standard).
STRING_LITERAL
: SQuoteLiteral
;
UNTERMINATED_STRING_LITERAL
: USQuoteLiteral
;
// -------------------------
// Arguments
//
// Certain argument lists, such as those specifying call parameters
// to a rule invocation, or input parameters to a rule specification
// are contained within square brackets.
BEGIN_ARGUMENT
: LBrack { self.handleBeginArgument() }
;
// -------------------------
// Actions
BEGIN_ACTION
: LBrace -> pushMode(Action)
;
// -------------------------
// Keywords
//
// Keywords may not be used as labels for rules or in any other context where
// they would be ambiguous with the keyword vs some other identifier. OPTIONS,
// TOKENS, & CHANNELS blocks are handled idiomatically in dedicated lexical modes.
OPTIONS : 'options' [ \t\f\n\r]* '{' -> pushMode(Options) ;
TOKENS : 'tokens' [ \t\f\n\r]* '{' -> pushMode(Tokens) ;
CHANNELS : 'channels' [ \t\f\n\r]* '{' -> pushMode(Channels) ;
IMPORT : 'import' ;
FRAGMENT : 'fragment' ;
LEXER : 'lexer' ;
PARSER : 'parser' ;
GRAMMAR : 'grammar' ;
PROTECTED : 'protected' ;
PUBLIC : 'public' ;
PRIVATE : 'private' ;
RETURNS : 'returns' ;
LOCALS : 'locals' ;
THROWS : 'throws' ;
CATCH : 'catch' ;
FINALLY : 'finally' ;
MODE : 'mode' ;
// -------------------------
// Punctuation
COLON : Colon ;
COLONCOLON : DColon ;
COMMA : Comma ;
SEMI : Semi ;
LPAREN : LParen ;
RPAREN : RParen ;
LBRACE : LBrace ;
RBRACE : RBrace ;
RARROW : RArrow ;
LT : Lt ;
GT : Gt ;
ASSIGN : Equal ;
QUESTION : Question ;
STAR : Star ;
PLUS_ASSIGN : PlusAssign ;
PLUS : Plus ;
OR : Pipe ;
DOLLAR : Dollar ;
RANGE : Range ;
DOT : Dot ;
AT : At ;
POUND : Pound ;
NOT : Tilde ;
// -------------------------
// Identifiers - allows unicode rule/token names
ID : Id
;
// -------------------------
// Whitespace
WS : Ws+ -> channel(OFF_CHANNEL) ;
// -------------------------
// Illegal Characters
//
// This is an illegal character trap which is always the last rule in the
// lexer specification. It matches a single character of any value and being
// the last rule in the file will match when no other rule knows what to do
// about the character. It is reported as an error but is not passed on to the
// parser. This means that the parser to deal with the gramamr file anyway
// but we will not try to analyse or code generate from a file with lexical
// errors.
//
// Comment this rule out to allow the error to be propagated to the parser
ERRCHAR
: . -> channel(HIDDEN)
;
// ======================================================
// Lexer modes
// -------------------------
// Arguments
mode Argument; // E.g., [int x, List a[]]
NESTED_ARGUMENT : LBrack -> type(ARGUMENT_CONTENT), pushMode(Argument) ;
ARGUMENT_ESCAPE : EscAny -> type(ARGUMENT_CONTENT) ;
ARGUMENT_STRING_LITERAL : DQuoteLiteral -> type(ARGUMENT_CONTENT) ;
ARGUMENT_CHAR_LITERAL : SQuoteLiteral -> type(ARGUMENT_CONTENT) ;
END_ARGUMENT : RBrack { self.handleEndArgument() } ;
// added this to return non-EOF token type here. EOF does something weird
UNTERMINATED_ARGUMENT : EOF -> popMode ;
ARGUMENT_CONTENT : . ;
// -------------------------
// Actions
//
// Many language targets use {} as block delimiters and so we
// must recursively match {} delimited blocks to balance the
// braces. Additionally, we must make some assumptions about
// literal string representation in the target language. We assume
// that they are delimited by ' or " and so consume these
// in their own alts so as not to inadvertantly match {}.
mode Action;
NESTED_ACTION : LBrace -> type(ACTION_CONTENT), pushMode(Action) ;
ACTION_ESCAPE : EscAny -> type(ACTION_CONTENT) ;
ACTION_STRING_LITERAL : DQuoteLiteral -> type(ACTION_CONTENT) ;
ACTION_CHAR_LITERAL : SQuoteLiteral -> type(ACTION_CONTENT) ;
ACTION_DOC_COMMENT : DocComment -> type(ACTION_CONTENT) ;
ACTION_BLOCK_COMMENT : BlockComment -> type(ACTION_CONTENT) ;
ACTION_LINE_COMMENT : LineComment -> type(ACTION_CONTENT) ;
END_ACTION : RBrace { self.handleEndAction() } ;
UNTERMINATED_ACTION : EOF -> popMode ;
ACTION_CONTENT : . ;
// -------------------------
mode Options;
OPT_DOC_COMMENT : DocComment -> type(DOC_COMMENT), channel(OFF_CHANNEL) ;
OPT_BLOCK_COMMENT : BlockComment -> type(BLOCK_COMMENT), channel(OFF_CHANNEL) ;
OPT_LINE_COMMENT : LineComment -> type(LINE_COMMENT), channel(OFF_CHANNEL) ;
OPT_LBRACE : LBrace -> type(LBRACE) ;
OPT_RBRACE : RBrace -> type(RBRACE), popMode ;
OPT_ID : Id -> type(ID) ;
OPT_DOT : Dot -> type(DOT) ;
OPT_ASSIGN : Equal -> type(ASSIGN) ;
OPT_STRING_LITERAL : SQuoteLiteral -> type(STRING_LITERAL) ;
OPT_INT : DecimalNumeral -> type(INT) ;
OPT_STAR : Star -> type(STAR) ;
OPT_SEMI : Semi -> type(SEMI) ;
OPT_WS : Ws+ -> type(WS), channel(OFF_CHANNEL) ;
// -------------------------
mode Tokens;
TOK_DOC_COMMENT : DocComment -> type(DOC_COMMENT), channel(OFF_CHANNEL) ;
TOK_BLOCK_COMMENT : BlockComment -> type(BLOCK_COMMENT), channel(OFF_CHANNEL) ;
TOK_LINE_COMMENT : LineComment -> type(LINE_COMMENT), channel(OFF_CHANNEL) ;
TOK_LBRACE : LBrace -> type(LBRACE) ;
TOK_RBRACE : RBrace -> type(RBRACE), popMode ;
TOK_ID : Id -> type(ID) ;
TOK_DOT : Dot -> type(DOT) ;
TOK_COMMA : Comma -> type(COMMA) ;
TOK_WS : Ws+ -> type(WS), channel(OFF_CHANNEL) ;
// -------------------------
mode Channels; // currently same as Tokens mode; distinguished by keyword
CHN_DOC_COMMENT : DocComment -> type(DOC_COMMENT), channel(OFF_CHANNEL) ;
CHN_BLOCK_COMMENT : BlockComment -> type(BLOCK_COMMENT), channel(OFF_CHANNEL) ;
CHN_LINE_COMMENT : LineComment -> type(LINE_COMMENT), channel(OFF_CHANNEL) ;
CHN_LBRACE : LBrace -> type(LBRACE) ;
CHN_RBRACE : RBrace -> type(RBRACE), popMode ;
CHN_ID : Id -> type(ID) ;
CHN_DOT : Dot -> type(DOT) ;
CHN_COMMA : Comma -> type(COMMA) ;
CHN_WS : Ws+ -> type(WS), channel(OFF_CHANNEL) ;
// -------------------------
mode LexerCharSet;
LEXER_CHAR_SET_BODY
: ( ~[\]\\]
| EscAny
)+ -> more
;
LEXER_CHAR_SET
: RBrack -> popMode
;
UNTERMINATED_CHAR_SET
: EOF -> popMode
;
// ------------------------------------------------------------------------------
// Grammar specific Keywords, Punctuation, etc.
fragment Id : NameStartChar NameChar*;
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/ANTLRv4Lexer.tokens 0000664 0000000 0000000 00000001612 14351074657 0023323 0 ustar 00root root 0000000 0000000 TOKEN_REF=1
RULE_REF=2
LEXER_CHAR_SET=3
DOC_COMMENT=4
HEADER=5
BLOCK_COMMENT=6
LINE_COMMENT=7
INT=8
STRING_LITERAL=9
UNTERMINATED_STRING_LITERAL=10
BEGIN_ARGUMENT=11
BEGIN_ACTION=12
OPTIONS=13
TOKENS=14
CHANNELS=15
IMPORT=16
FRAGMENT=17
LEXER=18
PARSER=19
GRAMMAR=20
PROTECTED=21
PUBLIC=22
PRIVATE=23
RETURNS=24
LOCALS=25
THROWS=26
CATCH=27
FINALLY=28
MODE=29
COLON=30
COLONCOLON=31
COMMA=32
SEMI=33
LPAREN=34
RPAREN=35
LBRACE=36
RBRACE=37
RARROW=38
LT=39
GT=40
ASSIGN=41
QUESTION=42
STAR=43
PLUS_ASSIGN=44
PLUS=45
OR=46
DOLLAR=47
RANGE=48
DOT=49
AT=50
POUND=51
NOT=52
ID=53
WS=54
ERRCHAR=55
END_ARGUMENT=56
UNTERMINATED_ARGUMENT=57
ARGUMENT_CONTENT=58
END_ACTION=59
UNTERMINATED_ACTION=60
ACTION_CONTENT=61
UNTERMINATED_CHAR_SET=62
'import'=16
'fragment'=17
'lexer'=18
'parser'=19
'grammar'=20
'protected'=21
'public'=22
'private'=23
'returns'=24
'locals'=25
'throws'=26
'catch'=27
'finally'=28
'mode'=29
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/ANTLRv4Parser.g4 0000664 0000000 0000000 00000020727 14351074657 0022517 0 ustar 00root root 0000000 0000000 /*
* [The "BSD license"]
* Copyright (c) 2012-2014 Terence Parr
* Copyright (c) 2012-2014 Sam Harwell
* Copyright (c) 2015 Gerald Rosenberg
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* A grammar for ANTLR v4 written in ANTLR v4.
*
* Modified 2018.08.13 taminomara -
* -- annotate some code parts with labels and named alternatives
* -- update tokens, options and channels spec to match new lexer behaviour
* Modified 2015.06.16 gbr -
* -- update for compatibility with Antlr v4.5
* -- add mode for channels
* -- moved members to LexerAdaptor
* -- move fragments to imports
*/
parser grammar ANTLRv4Parser;
options {
tokenVocab = ANTLRv4Lexer;
}
//@ doc:unimportant
/**
* The main entry point for parsing a v4 grammar.
*/
grammarSpec
: docs+=DOC_COMMENT* gtype=grammarType gname=identifier SEMI prequelConstruct* rules modeSpec* EOF
;
grammarType
: (LEXER GRAMMAR | PARSER GRAMMAR | GRAMMAR)
;
// This is the list of all constructs that can be declared before
// the set of rules that compose the grammar, and is invoked 0..n
// times by the grammarPrequel rule.
prequelConstruct
: optionsSpec
| delegateGrammars
| tokensSpec
| channelsSpec
| action
;
// ------------
// Options - things that affect analysis and/or code generation
optionsSpec
: OPTIONS /* LBRACE */ (option SEMI)* RBRACE
;
option
: name=identifier ASSIGN value=optionValue
;
optionValue
: value+=identifier (DOT value+=identifier)* # pathOption
| value=STRING_LITERAL # stringOption
| value=actionBlock # actionOption
| value=INT # intOption
;
// ------------
// Delegates
delegateGrammars
: IMPORT delegateGrammar (COMMA delegateGrammar)* SEMI
;
delegateGrammar
: value=identifier
;
// ------------
// Tokens & Channels
tokensSpec
: TOKENS /* LBRACE */ defs=idList? RBRACE
;
channelsSpec
: CHANNELS /* LBRACE */ idList? RBRACE
;
idList
: defs+=identifier (COMMA defs+=identifier)* COMMA?
;
// Match stuff like @parser::members {int i;}
action
: AT (actionScopeName COLONCOLON)? identifier actionBlock
;
// Scope names could collide with keywords; allow them as ids for action scopes
actionScopeName
: identifier
| LEXER
| PARSER
;
actionBlock
: BEGIN_ACTION ACTION_CONTENT* END_ACTION
;
argActionBlock
: BEGIN_ARGUMENT ARGUMENT_CONTENT* END_ARGUMENT
;
modeSpec
: MODE identifier SEMI lexerRuleSpec*
;
rules
: ruleSpec*
;
ruleSpec
: headers+=HEADER* (parserRuleSpec | lexerRuleSpec)
;
parserRuleSpec
: docs+=DOC_COMMENT* ruleModifiers? name=RULE_REF argActionBlock? ruleReturns? throwsSpec? localsSpec? rulePrequel* COLON ruleBlock SEMI exceptionGroup
;
exceptionGroup
: exceptionHandler* finallyClause?
;
exceptionHandler
: CATCH argActionBlock actionBlock
;
finallyClause
: FINALLY actionBlock
;
rulePrequel
: optionsSpec
| ruleAction
;
ruleReturns
: RETURNS argActionBlock
;
// --------------
// Exception spec
throwsSpec
: THROWS identifier (COMMA identifier)*
;
localsSpec
: LOCALS argActionBlock
;
/** Match stuff like @init {int i;} */
ruleAction
: AT identifier actionBlock
;
ruleModifiers
: ruleModifier +
;
// An individual access modifier for a rule. The 'fragment' modifier
// is an internal indication for lexer rules that they do not match
// from the input but are like subroutines for other lexer rules to
// reuse for certain lexical patterns. The other modifiers are passed
// to the code generation templates and may be ignored by the template
// if they are of no use in that language.
ruleModifier
: PUBLIC
| PRIVATE
| PROTECTED
| FRAGMENT
;
ruleBlock
: ruleAltList
;
ruleAltList
: alts+=labeledAlt (OR alts+=labeledAlt)*
;
labeledAlt
: alternative (POUND identifier)?
;
// --------------------
// Lexer rules
lexerRuleSpec
: docs+=DOC_COMMENT* frag=FRAGMENT? name=TOKEN_REF COLON lexerRuleBlock SEMI
;
lexerRuleBlock
: lexerAltList
;
lexerAltList
: alts+=lexerAlt (OR alts+=lexerAlt)*
;
lexerAlt
: lexerElements lexerCommands?
|
// explicitly allow empty alts
;
lexerElements
: elements+=lexerElement+
;
lexerElement
: value=labeledLexerElement suffix=ebnfSuffix? # lexerElementLabeled
| value=lexerAtom suffix=ebnfSuffix? # lexerElementAtom
| value=lexerBlock suffix=ebnfSuffix? # lexerElementBlock
| actionBlock QUESTION? # lexerElementAction
;
// but preds can be anywhere
labeledLexerElement
: identifier (ASSIGN | PLUS_ASSIGN) (lexerAtom | lexerBlock)
;
lexerBlock
: LPAREN lexerAltList RPAREN
;
// E.g., channel(HIDDEN), skip, more, mode(INSIDE), push(INSIDE), pop
lexerCommands
: RARROW lexerCommand (COMMA lexerCommand)*
;
lexerCommand
: lexerCommandName LPAREN lexerCommandExpr RPAREN
| lexerCommandName
;
lexerCommandName
: identifier
| MODE
;
lexerCommandExpr
: identifier
| INT
;
// --------------------
// Rule Alts
altList
: alts+=alternative (OR alts+=alternative)*
;
alternative
: elementOptions? elements+=element+
|
// explicitly allow empty alts
;
element
: value=labeledElement suffix=ebnfSuffix? # parserElementLabeled
| value=atom suffix=ebnfSuffix? # parserElementAtom
| value=block suffix=ebnfSuffix? # parserElementBlock
| actionBlock QUESTION? # parserElementAction
| value=DOC_COMMENT # parserInlineDoc
;
labeledElement
: identifier (ASSIGN | PLUS_ASSIGN) (atom | block)
;
// --------------------
// EBNF and blocks
ebnfSuffix
: QUESTION QUESTION?
| STAR QUESTION?
| PLUS QUESTION?
;
lexerAtom
: characterRange # lexerAtomRange
| terminal # lexerAtomTerminal
| notSet # lexerAtomNot
| value=LEXER_CHAR_SET # lexerAtomCharSet
| DOT elementOptions? # lexerAtomWildcard
| value=DOC_COMMENT # lexerAtomDoc
;
atom
: terminal # atomTerminal
| ruleref # atomRuleRef
| notSet # atomNot
| DOT elementOptions? # atomWildcard
;
// --------------------
// Inverted element set
notSet
: NOT value=setElement # notElement
| NOT value=blockSet # notBlock
;
blockSet
: LPAREN elements+=setElement (OR elements+=setElement)* RPAREN
;
setElement
: value=TOKEN_REF elementOptions? # setElementRef
| value=STRING_LITERAL elementOptions? # setElementLit
| characterRange # setElementRange
| value=LEXER_CHAR_SET # setElementCharSet
;
// -------------
// Grammar Block
block
: LPAREN (optionsSpec? ruleAction* COLON)? altList RPAREN
;
// ----------------
// Parser rule ref
ruleref
: value=RULE_REF argActionBlock? elementOptions?
;
// ---------------
// Character Range
characterRange
: start=STRING_LITERAL RANGE end=STRING_LITERAL
;
terminal
: value=TOKEN_REF elementOptions? # terminalRef
| value=STRING_LITERAL elementOptions? # terminalLit
;
// Terminals may be adorned with certain options when
// reference in the grammar: TOK<,,,>
elementOptions
: LT elementOption (COMMA elementOption)* GT
;
elementOption
: identifier
| identifier ASSIGN (identifier | STRING_LITERAL)
;
identifier
: value=RULE_REF # ruleRefIdentifier
| value=TOKEN_REF # tokenRefIdentifier
;
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/LexBasic.g4 0000664 0000000 0000000 00000011161 14351074657 0021672 0 ustar 00root root 0000000 0000000 /*
* [The "BSD license"]
* Copyright (c) 2014-2015 Gerald Rosenberg
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* A generally reusable set of fragments for import in to Lexer grammars.
*
* Modified 2018.08.13 taminomara -
* -- treat `'//@' ~ [\r\n]*` as doc comments
* Modified 2015.06.16 gbr -
* -- generalized for inclusion into the ANTLRv4 grammar distribution
*
*/
lexer grammar LexBasic;
// ======================================================
// Lexer fragments
//
// -----------------------------------
// Whitespace & Comments
fragment Ws
: Hws | Vws
;
fragment Hws
: [ \t]
;
fragment Vws
: [\r\n\f]
;
fragment BlockComment
: '/*' .*? ('*/' | EOF)
;
fragment DocComment
: '/**' .*? ('*/' | EOF)
| '//@' ~ [\r\n]*
;
fragment LineComment
: '//' ~ [\r\n]*
;
// -----------------------------------
// Escapes
// Any kind of escaped character that we can embed within ANTLR literal strings.
fragment EscSeq
: Esc ([btnfr"'\\] | UnicodeEsc | . | EOF)
;
fragment EscAny
: Esc .
;
fragment UnicodeEsc
: 'u' (HexDigit (HexDigit (HexDigit HexDigit?)?)?)?
;
// -----------------------------------
// Numerals
fragment DecimalNumeral
: '0' | [1-9] DecDigit*
;
// -----------------------------------
// Digits
fragment HexDigit
: [0-9a-fA-F]
;
fragment DecDigit
: [0-9]
;
// -----------------------------------
// Literals
fragment BoolLiteral
: 'true' | 'false'
;
fragment CharLiteral
: SQuote (EscSeq | ~ ['\r\n\\]) SQuote
;
fragment SQuoteLiteral
: SQuote (EscSeq | ~ ['\r\n\\])* SQuote
;
fragment DQuoteLiteral
: DQuote (EscSeq | ~ ["\r\n\\])* DQuote
;
fragment USQuoteLiteral
: SQuote (EscSeq | ~ ['\r\n\\])*
;
// -----------------------------------
// Character ranges
fragment NameChar
: NameStartChar | '0' .. '9' | Underscore | '\u00B7' | '\u0300' .. '\u036F' | '\u203F' .. '\u2040'
;
fragment NameStartChar
: 'A' .. 'Z' | 'a' .. 'z' | '\u00C0' .. '\u00D6' | '\u00D8' .. '\u00F6' | '\u00F8' .. '\u02FF' | '\u0370' .. '\u037D' | '\u037F' .. '\u1FFF' | '\u200C' .. '\u200D' | '\u2070' .. '\u218F' | '\u2C00' .. '\u2FEF' | '\u3001' .. '\uD7FF' | '\uF900' .. '\uFDCF' | '\uFDF0' .. '\uFFFD'
;
// ignores | ['\u10000-'\uEFFFF] ;
// -----------------------------------
// Types
fragment Int
: 'int'
;
// -----------------------------------
// Symbols
fragment Esc
: '\\'
;
fragment Colon
: ':'
;
fragment DColon
: '::'
;
fragment SQuote
: '\''
;
fragment DQuote
: '"'
;
fragment LParen
: '('
;
fragment RParen
: ')'
;
fragment LBrace
: '{'
;
fragment RBrace
: '}'
;
fragment LBrack
: '['
;
fragment RBrack
: ']'
;
fragment RArrow
: '->'
;
fragment Lt
: '<'
;
fragment Gt
: '>'
;
fragment Equal
: '='
;
fragment Question
: '?'
;
fragment Star
: '*'
;
fragment Plus
: '+'
;
fragment PlusAssign
: '+='
;
fragment Underscore
: '_'
;
fragment Pipe
: '|'
;
fragment Dollar
: '$'
;
fragment Comma
: ','
;
fragment Semi
: ';'
;
fragment Dot
: '.'
;
fragment Range
: '..'
;
fragment At
: '@'
;
fragment Pound
: '#'
;
fragment Tilde
: '~'
;
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/LexBasic.tokens 0000664 0000000 0000000 00000000000 14351074657 0022651 0 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/__init__.py 0000664 0000000 0000000 00000000567 14351074657 0022065 0 ustar 00root root 0000000 0000000 from .gen.syntax.ANTLRv4Lexer import ANTLRv4Lexer as Lexer
from .gen.syntax.ANTLRv4Parser import ANTLRv4Parser as Parser
from .gen.syntax.ANTLRv4ParserListener import ANTLRv4ParserListener as ParserListener
from .gen.syntax.ANTLRv4ParserVisitor import ANTLRv4ParserVisitor as ParserVisitor
__all__ = [
'Lexer',
'Parser',
'ParserListener',
'ParserVisitor',
]
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/ 0000775 0000000 0000000 00000000000 14351074657 0020515 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/__init__.py 0000664 0000000 0000000 00000000000 14351074657 0022614 0 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ 0000775 0000000 0000000 00000000000 14351074657 0022043 5 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ANTLRv4Lexer.py 0000664 0000000 0000000 00000123417 14351074657 0024557 0 ustar 00root root 0000000 0000000 from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
from sphinx_a4doc.syntax.lexer_adaptor import LexerAdaptor
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2@")
buf.write("\u03f6\b\1\b\1\b\1\b\1\b\1\b\1\b\1\4\2\t\2\4\3\t\3\4\4")
buf.write("\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4")
buf.write("\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20")
buf.write("\4\21\t\21\4\22\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26")
buf.write("\t\26\4\27\t\27\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33")
buf.write("\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4")
buf.write("\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*")
buf.write("\t*\4+\t+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61")
buf.write("\4\62\t\62\4\63\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67")
buf.write("\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?")
buf.write("\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\t")
buf.write("H\4I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\t")
buf.write("Q\4R\tR\4S\tS\4T\tT\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\t")
buf.write("Z\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4")
buf.write("c\tc\4d\td\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k\tk\4")
buf.write("l\tl\4m\tm\4n\tn\4o\to\4p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4")
buf.write("u\tu\4v\tv\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4")
buf.write("~\t~\4\177\t\177\4\u0080\t\u0080\4\u0081\t\u0081\4\u0082")
buf.write("\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084\4\u0085\t\u0085")
buf.write("\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089")
buf.write("\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c\t\u008c")
buf.write("\4\u008d\t\u008d\4\u008e\t\u008e\4\u008f\t\u008f\4\u0090")
buf.write("\t\u0090\4\u0091\t\u0091\4\u0092\t\u0092\4\u0093\t\u0093")
buf.write("\4\u0094\t\u0094\4\u0095\t\u0095\4\u0096\t\u0096\4\u0097")
buf.write("\t\u0097\4\u0098\t\u0098\4\u0099\t\u0099\4\u009a\t\u009a")
buf.write("\3\2\3\2\3\3\3\3\3\3\3\3\3\3\7\3\u0143\n\3\f\3\16\3\u0146")
buf.write("\13\3\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\6\3\6\3\7\3\7")
buf.write("\3\b\3\b\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3")
buf.write("\13\3\13\3\13\3\13\3\13\3\13\7\13\u0166\n\13\f\13\16\13")
buf.write("\u0169\13\13\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3")
buf.write("\f\3\f\3\f\7\f\u0177\n\f\f\f\16\f\u017a\13\f\3\f\3\f\3")
buf.write("\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\7\r\u018a")
buf.write("\n\r\f\r\16\r\u018d\13\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25")
buf.write("\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\32")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3 \3 \3")
buf.write("!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'\3\'\3(\3(\3)")
buf.write("\3)\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3\60\3\61")
buf.write("\3\61\3\62\3\62\3\63\3\63\3\64\6\64\u022b\n\64\r\64\16")
buf.write("\64\u022c\3\64\3\64\3\65\3\65\3\65\3\65\3\66\3\66\5\66")
buf.write("\u0237\n\66\3\67\3\67\38\38\39\39\39\39\79\u0241\n9\f")
buf.write("9\169\u0244\139\39\39\39\59\u0249\n9\3:\3:\3:\3:\3:\7")
buf.write(":\u0250\n:\f:\16:\u0253\13:\3:\3:\3:\5:\u0258\n:\3:\3")
buf.write(":\3:\3:\3:\7:\u025f\n:\f:\16:\u0262\13:\5:\u0264\n:\3")
buf.write(";\3;\3;\3;\7;\u026a\n;\f;\16;\u026d\13;\3<\3<\3<\3<\3")
buf.write("<\5<\u0274\n<\3=\3=\3=\3>\3>\3>\3>\3>\5>\u027e\n>\5>\u0280")
buf.write("\n>\5>\u0282\n>\5>\u0284\n>\3?\3?\3?\7?\u0289\n?\f?\16")
buf.write("?\u028c\13?\5?\u028e\n?\3@\3@\3A\3A\3B\3B\3B\3B\3B\3B")
buf.write("\3B\3B\3B\5B\u029d\nB\3C\3C\3C\5C\u02a2\nC\3C\3C\3D\3")
buf.write("D\3D\7D\u02a9\nD\fD\16D\u02ac\13D\3D\3D\3E\3E\3E\7E\u02b3")
buf.write("\nE\fE\16E\u02b6\13E\3E\3E\3F\3F\3F\7F\u02bd\nF\fF\16")
buf.write("F\u02c0\13F\3G\3G\3G\3G\5G\u02c6\nG\3H\3H\3I\3I\3I\3I")
buf.write("\3J\3J\3K\3K\3L\3L\3L\3M\3M\3N\3N\3O\3O\3P\3P\3Q\3Q\3")
buf.write("R\3R\3S\3S\3T\3T\3U\3U\3U\3V\3V\3W\3W\3X\3X\3Y\3Y\3Z\3")
buf.write("Z\3[\3[\3\\\3\\\3\\\3]\3]\3^\3^\3_\3_\3`\3`\3a\3a\3b\3")
buf.write("b\3c\3c\3c\3d\3d\3e\3e\3f\3f\3g\3g\3g\3g\3g\3h\3h\3h\3")
buf.write("h\3i\3i\3i\3i\3j\3j\3j\3j\3k\3k\3k\3l\3l\3l\3l\3m\3m\3")
buf.write("n\3n\3n\3n\3n\3o\3o\3o\3o\3p\3p\3p\3p\3q\3q\3q\3q\3r\3")
buf.write("r\3r\3r\3s\3s\3s\3s\3t\3t\3t\3t\3u\3u\3u\3v\3v\3v\3v\3")
buf.write("w\3w\3x\3x\3x\3x\3x\3y\3y\3y\3y\3y\3z\3z\3z\3z\3z\3{\3")
buf.write("{\3{\3{\3|\3|\3|\3|\3|\3}\3}\3}\3}\3~\3~\3~\3~\3\177\3")
buf.write("\177\3\177\3\177\3\u0080\3\u0080\3\u0080\3\u0080\3\u0081")
buf.write("\3\u0081\3\u0081\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\3\u0083\3\u0083\3\u0083\3\u0083\3\u0084\6\u0084\u0381")
buf.write("\n\u0084\r\u0084\16\u0084\u0382\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0086\3\u0086")
buf.write("\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088\3\u0089\3\u0089")
buf.write("\3\u0089\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a")
buf.write("\3\u008b\3\u008b\3\u008b\3\u008b\3\u008c\3\u008c\3\u008c")
buf.write("\3\u008c\3\u008d\6\u008d\u03ad\n\u008d\r\u008d\16\u008d")
buf.write("\u03ae\3\u008d\3\u008d\3\u008d\3\u008e\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f")
buf.write("\3\u0090\3\u0090\3\u0090\3\u0090\3\u0090\3\u0091\3\u0091")
buf.write("\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092\3\u0092\3\u0092")
buf.write("\3\u0093\3\u0093\3\u0093\3\u0093\3\u0094\3\u0094\3\u0094")
buf.write("\3\u0094\3\u0095\3\u0095\3\u0095\3\u0095\3\u0096\6\u0096")
buf.write("\u03d9\n\u0096\r\u0096\16\u0096\u03da\3\u0096\3\u0096")
buf.write("\3\u0096\3\u0097\3\u0097\6\u0097\u03e2\n\u0097\r\u0097")
buf.write("\16\u0097\u03e3\3\u0097\3\u0097\3\u0098\3\u0098\3\u0098")
buf.write("\3\u0098\3\u0099\3\u0099\3\u0099\3\u0099\3\u009a\3\u009a")
buf.write("\7\u009a\u03f2\n\u009a\f\u009a\16\u009a\u03f5\13\u009a")
buf.write("\4\u0242\u0251\2\u009b\t\6\13\7\r\b\17\t\21\n\23\13\25")
buf.write("\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+")
buf.write("\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E")
buf.write("$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k")
buf.write("\67m8o9q\2s\2u\2w\2y\2{\2}\2\177\2\u0081\2\u0083\2\u0085")
buf.write("\2\u0087\2\u0089\2\u008b\2\u008d\2\u008f\2\u0091\2\u0093")
buf.write("\2\u0095\2\u0097\2\u0099\2\u009b\2\u009d\2\u009f\2\u00a1")
buf.write("\2\u00a3\2\u00a5\2\u00a7\2\u00a9\2\u00ab\2\u00ad\2\u00af")
buf.write("\2\u00b1\2\u00b3\2\u00b5\2\u00b7\2\u00b9\2\u00bb\2\u00bd")
buf.write("\2\u00bf\2\u00c1\2\u00c3\2\u00c5\2\u00c7\2\u00c9\2\u00cb")
buf.write("\2\u00cd\2\u00cf\2\u00d1\2\u00d3\2\u00d5\2\u00d7\2\u00d9")
buf.write("\2\u00db:\u00dd;\u00df<\u00e1\2\u00e3\2\u00e5\2\u00e7")
buf.write("\2\u00e9\2\u00eb\2\u00ed\2\u00ef=\u00f1>\u00f3?\u00f5")
buf.write("\2\u00f7\2\u00f9\2\u00fb\2\u00fd\2\u00ff\2\u0101\2\u0103")
buf.write("\2\u0105\2\u0107\2\u0109\2\u010b\2\u010d\2\u010f\2\u0111")
buf.write("\2\u0113\2\u0115\2\u0117\2\u0119\2\u011b\2\u011d\2\u011f")
buf.write("\2\u0121\2\u0123\2\u0125\2\u0127\2\u0129\2\u012b\2\u012d")
buf.write("\2\u012f\2\u0131\2\u0133\2\u0135\5\u0137@\u0139\2\t\2")
buf.write("\3\4\5\6\7\b\17\4\2\f\f\17\17\5\2\13\f\16\17\"\"\4\2\13")
buf.write("\13\"\"\4\2\f\f\16\17\n\2$$))^^ddhhppttvv\3\2\63;\5\2")
buf.write("\62;CHch\3\2\62;\6\2\f\f\17\17))^^\6\2\f\f\17\17$$^^\5")
buf.write("\2\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c|\u00c2")
buf.write("\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381\u2001")
buf.write("\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801\uf902")
buf.write("\ufdd1\ufdf2\uffff\3\2^_\2\u03e4\2\t\3\2\2\2\2\13\3\2")
buf.write("\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2")
buf.write("\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2")
buf.write("\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3")
buf.write("\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2")
buf.write("/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67")
buf.write("\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write("A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write("\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write("\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write("\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write("\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\3\u00d3")
buf.write("\3\2\2\2\3\u00d5\3\2\2\2\3\u00d7\3\2\2\2\3\u00d9\3\2\2")
buf.write("\2\3\u00db\3\2\2\2\3\u00dd\3\2\2\2\3\u00df\3\2\2\2\4\u00e1")
buf.write("\3\2\2\2\4\u00e3\3\2\2\2\4\u00e5\3\2\2\2\4\u00e7\3\2\2")
buf.write("\2\4\u00e9\3\2\2\2\4\u00eb\3\2\2\2\4\u00ed\3\2\2\2\4\u00ef")
buf.write("\3\2\2\2\4\u00f1\3\2\2\2\4\u00f3\3\2\2\2\5\u00f5\3\2\2")
buf.write("\2\5\u00f7\3\2\2\2\5\u00f9\3\2\2\2\5\u00fb\3\2\2\2\5\u00fd")
buf.write("\3\2\2\2\5\u00ff\3\2\2\2\5\u0101\3\2\2\2\5\u0103\3\2\2")
buf.write("\2\5\u0105\3\2\2\2\5\u0107\3\2\2\2\5\u0109\3\2\2\2\5\u010b")
buf.write("\3\2\2\2\5\u010d\3\2\2\2\6\u010f\3\2\2\2\6\u0111\3\2\2")
buf.write("\2\6\u0113\3\2\2\2\6\u0115\3\2\2\2\6\u0117\3\2\2\2\6\u0119")
buf.write("\3\2\2\2\6\u011b\3\2\2\2\6\u011d\3\2\2\2\6\u011f\3\2\2")
buf.write("\2\7\u0121\3\2\2\2\7\u0123\3\2\2\2\7\u0125\3\2\2\2\7\u0127")
buf.write("\3\2\2\2\7\u0129\3\2\2\2\7\u012b\3\2\2\2\7\u012d\3\2\2")
buf.write("\2\7\u012f\3\2\2\2\7\u0131\3\2\2\2\b\u0133\3\2\2\2\b\u0135")
buf.write("\3\2\2\2\b\u0137\3\2\2\2\t\u013b\3\2\2\2\13\u013d\3\2")
buf.write("\2\2\r\u0147\3\2\2\2\17\u014b\3\2\2\2\21\u014f\3\2\2\2")
buf.write("\23\u0151\3\2\2\2\25\u0153\3\2\2\2\27\u0155\3\2\2\2\31")
buf.write("\u0158\3\2\2\2\33\u015c\3\2\2\2\35\u016e\3\2\2\2\37\u017f")
buf.write("\3\2\2\2!\u0192\3\2\2\2#\u0199\3\2\2\2%\u01a2\3\2\2\2")
buf.write("\'\u01a8\3\2\2\2)\u01af\3\2\2\2+\u01b7\3\2\2\2-\u01c1")
buf.write("\3\2\2\2/\u01c8\3\2\2\2\61\u01d0\3\2\2\2\63\u01d8\3\2")
buf.write("\2\2\65\u01df\3\2\2\2\67\u01e6\3\2\2\29\u01ec\3\2\2\2")
buf.write(";\u01f4\3\2\2\2=\u01f9\3\2\2\2?\u01fb\3\2\2\2A\u01fd\3")
buf.write("\2\2\2C\u01ff\3\2\2\2E\u0201\3\2\2\2G\u0203\3\2\2\2I\u0205")
buf.write("\3\2\2\2K\u0207\3\2\2\2M\u0209\3\2\2\2O\u020b\3\2\2\2")
buf.write("Q\u020d\3\2\2\2S\u020f\3\2\2\2U\u0211\3\2\2\2W\u0213\3")
buf.write("\2\2\2Y\u0215\3\2\2\2[\u0217\3\2\2\2]\u0219\3\2\2\2_\u021b")
buf.write("\3\2\2\2a\u021d\3\2\2\2c\u021f\3\2\2\2e\u0221\3\2\2\2")
buf.write("g\u0223\3\2\2\2i\u0225\3\2\2\2k\u0227\3\2\2\2m\u022a\3")
buf.write("\2\2\2o\u0230\3\2\2\2q\u0236\3\2\2\2s\u0238\3\2\2\2u\u023a")
buf.write("\3\2\2\2w\u023c\3\2\2\2y\u0263\3\2\2\2{\u0265\3\2\2\2")
buf.write("}\u026e\3\2\2\2\177\u0275\3\2\2\2\u0081\u0278\3\2\2\2")
buf.write("\u0083\u028d\3\2\2\2\u0085\u028f\3\2\2\2\u0087\u0291\3")
buf.write("\2\2\2\u0089\u029c\3\2\2\2\u008b\u029e\3\2\2\2\u008d\u02a5")
buf.write("\3\2\2\2\u008f\u02af\3\2\2\2\u0091\u02b9\3\2\2\2\u0093")
buf.write("\u02c5\3\2\2\2\u0095\u02c7\3\2\2\2\u0097\u02c9\3\2\2\2")
buf.write("\u0099\u02cd\3\2\2\2\u009b\u02cf\3\2\2\2\u009d\u02d1\3")
buf.write("\2\2\2\u009f\u02d4\3\2\2\2\u00a1\u02d6\3\2\2\2\u00a3\u02d8")
buf.write("\3\2\2\2\u00a5\u02da\3\2\2\2\u00a7\u02dc\3\2\2\2\u00a9")
buf.write("\u02de\3\2\2\2\u00ab\u02e0\3\2\2\2\u00ad\u02e2\3\2\2\2")
buf.write("\u00af\u02e4\3\2\2\2\u00b1\u02e7\3\2\2\2\u00b3\u02e9\3")
buf.write("\2\2\2\u00b5\u02eb\3\2\2\2\u00b7\u02ed\3\2\2\2\u00b9\u02ef")
buf.write("\3\2\2\2\u00bb\u02f1\3\2\2\2\u00bd\u02f3\3\2\2\2\u00bf")
buf.write("\u02f6\3\2\2\2\u00c1\u02f8\3\2\2\2\u00c3\u02fa\3\2\2\2")
buf.write("\u00c5\u02fc\3\2\2\2\u00c7\u02fe\3\2\2\2\u00c9\u0300\3")
buf.write("\2\2\2\u00cb\u0302\3\2\2\2\u00cd\u0305\3\2\2\2\u00cf\u0307")
buf.write("\3\2\2\2\u00d1\u0309\3\2\2\2\u00d3\u030b\3\2\2\2\u00d5")
buf.write("\u0310\3\2\2\2\u00d7\u0314\3\2\2\2\u00d9\u0318\3\2\2\2")
buf.write("\u00db\u031c\3\2\2\2\u00dd\u031f\3\2\2\2\u00df\u0323\3")
buf.write("\2\2\2\u00e1\u0325\3\2\2\2\u00e3\u032a\3\2\2\2\u00e5\u032e")
buf.write("\3\2\2\2\u00e7\u0332\3\2\2\2\u00e9\u0336\3\2\2\2\u00eb")
buf.write("\u033a\3\2\2\2\u00ed\u033e\3\2\2\2\u00ef\u0342\3\2\2\2")
buf.write("\u00f1\u0345\3\2\2\2\u00f3\u0349\3\2\2\2\u00f5\u034b\3")
buf.write("\2\2\2\u00f7\u0350\3\2\2\2\u00f9\u0355\3\2\2\2\u00fb\u035a")
buf.write("\3\2\2\2\u00fd\u035e\3\2\2\2\u00ff\u0363\3\2\2\2\u0101")
buf.write("\u0367\3\2\2\2\u0103\u036b\3\2\2\2\u0105\u036f\3\2\2\2")
buf.write("\u0107\u0373\3\2\2\2\u0109\u0377\3\2\2\2\u010b\u037b\3")
buf.write("\2\2\2\u010d\u0380\3\2\2\2\u010f\u0387\3\2\2\2\u0111\u038c")
buf.write("\3\2\2\2\u0113\u0391\3\2\2\2\u0115\u0396\3\2\2\2\u0117")
buf.write("\u039a\3\2\2\2\u0119\u039f\3\2\2\2\u011b\u03a3\3\2\2\2")
buf.write("\u011d\u03a7\3\2\2\2\u011f\u03ac\3\2\2\2\u0121\u03b3\3")
buf.write("\2\2\2\u0123\u03b8\3\2\2\2\u0125\u03bd\3\2\2\2\u0127\u03c2")
buf.write("\3\2\2\2\u0129\u03c6\3\2\2\2\u012b\u03cb\3\2\2\2\u012d")
buf.write("\u03cf\3\2\2\2\u012f\u03d3\3\2\2\2\u0131\u03d8\3\2\2\2")
buf.write("\u0133\u03e1\3\2\2\2\u0135\u03e7\3\2\2\2\u0137\u03eb\3")
buf.write("\2\2\2\u0139\u03ef\3\2\2\2\u013b\u013c\5y:\2\u013c\n\3")
buf.write("\2\2\2\u013d\u013e\7\61\2\2\u013e\u013f\7\61\2\2\u013f")
buf.write("\u0140\7\61\2\2\u0140\u0144\3\2\2\2\u0141\u0143\n\2\2")
buf.write("\2\u0142\u0141\3\2\2\2\u0143\u0146\3\2\2\2\u0144\u0142")
buf.write("\3\2\2\2\u0144\u0145\3\2\2\2\u0145\f\3\2\2\2\u0146\u0144")
buf.write("\3\2\2\2\u0147\u0148\5w9\2\u0148\u0149\3\2\2\2\u0149\u014a")
buf.write("\b\4\2\2\u014a\16\3\2\2\2\u014b\u014c\5{;\2\u014c\u014d")
buf.write("\3\2\2\2\u014d\u014e\b\5\2\2\u014e\20\3\2\2\2\u014f\u0150")
buf.write("\5\u0083?\2\u0150\22\3\2\2\2\u0151\u0152\5\u008dD\2\u0152")
buf.write("\24\3\2\2\2\u0153\u0154\5\u0091F\2\u0154\26\3\2\2\2\u0155")
buf.write("\u0156\5\u00abS\2\u0156\u0157\b\t\3\2\u0157\30\3\2\2\2")
buf.write("\u0158\u0159\5\u00a7Q\2\u0159\u015a\3\2\2\2\u015a\u015b")
buf.write("\b\n\4\2\u015b\32\3\2\2\2\u015c\u015d\7q\2\2\u015d\u015e")
buf.write("\7r\2\2\u015e\u015f\7v\2\2\u015f\u0160\7k\2\2\u0160\u0161")
buf.write("\7q\2\2\u0161\u0162\7p\2\2\u0162\u0163\7u\2\2\u0163\u0167")
buf.write("\3\2\2\2\u0164\u0166\t\3\2\2\u0165\u0164\3\2\2\2\u0166")
buf.write("\u0169\3\2\2\2\u0167\u0165\3\2\2\2\u0167\u0168\3\2\2\2")
buf.write("\u0168\u016a\3\2\2\2\u0169\u0167\3\2\2\2\u016a\u016b\7")
buf.write("}\2\2\u016b\u016c\3\2\2\2\u016c\u016d\b\13\5\2\u016d\34")
buf.write("\3\2\2\2\u016e\u016f\7v\2\2\u016f\u0170\7q\2\2\u0170\u0171")
buf.write("\7m\2\2\u0171\u0172\7g\2\2\u0172\u0173\7p\2\2\u0173\u0174")
buf.write("\7u\2\2\u0174\u0178\3\2\2\2\u0175\u0177\t\3\2\2\u0176")
buf.write("\u0175\3\2\2\2\u0177\u017a\3\2\2\2\u0178\u0176\3\2\2\2")
buf.write("\u0178\u0179\3\2\2\2\u0179\u017b\3\2\2\2\u017a\u0178\3")
buf.write("\2\2\2\u017b\u017c\7}\2\2\u017c\u017d\3\2\2\2\u017d\u017e")
buf.write("\b\f\6\2\u017e\36\3\2\2\2\u017f\u0180\7e\2\2\u0180\u0181")
buf.write("\7j\2\2\u0181\u0182\7c\2\2\u0182\u0183\7p\2\2\u0183\u0184")
buf.write("\7p\2\2\u0184\u0185\7g\2\2\u0185\u0186\7n\2\2\u0186\u0187")
buf.write("\7u\2\2\u0187\u018b\3\2\2\2\u0188\u018a\t\3\2\2\u0189")
buf.write("\u0188\3\2\2\2\u018a\u018d\3\2\2\2\u018b\u0189\3\2\2\2")
buf.write("\u018b\u018c\3\2\2\2\u018c\u018e\3\2\2\2\u018d\u018b\3")
buf.write("\2\2\2\u018e\u018f\7}\2\2\u018f\u0190\3\2\2\2\u0190\u0191")
buf.write("\b\r\7\2\u0191 \3\2\2\2\u0192\u0193\7k\2\2\u0193\u0194")
buf.write("\7o\2\2\u0194\u0195\7r\2\2\u0195\u0196\7q\2\2\u0196\u0197")
buf.write("\7t\2\2\u0197\u0198\7v\2\2\u0198\"\3\2\2\2\u0199\u019a")
buf.write("\7h\2\2\u019a\u019b\7t\2\2\u019b\u019c\7c\2\2\u019c\u019d")
buf.write("\7i\2\2\u019d\u019e\7o\2\2\u019e\u019f\7g\2\2\u019f\u01a0")
buf.write("\7p\2\2\u01a0\u01a1\7v\2\2\u01a1$\3\2\2\2\u01a2\u01a3")
buf.write("\7n\2\2\u01a3\u01a4\7g\2\2\u01a4\u01a5\7z\2\2\u01a5\u01a6")
buf.write("\7g\2\2\u01a6\u01a7\7t\2\2\u01a7&\3\2\2\2\u01a8\u01a9")
buf.write("\7r\2\2\u01a9\u01aa\7c\2\2\u01aa\u01ab\7t\2\2\u01ab\u01ac")
buf.write("\7u\2\2\u01ac\u01ad\7g\2\2\u01ad\u01ae\7t\2\2\u01ae(\3")
buf.write("\2\2\2\u01af\u01b0\7i\2\2\u01b0\u01b1\7t\2\2\u01b1\u01b2")
buf.write("\7c\2\2\u01b2\u01b3\7o\2\2\u01b3\u01b4\7o\2\2\u01b4\u01b5")
buf.write("\7c\2\2\u01b5\u01b6\7t\2\2\u01b6*\3\2\2\2\u01b7\u01b8")
buf.write("\7r\2\2\u01b8\u01b9\7t\2\2\u01b9\u01ba\7q\2\2\u01ba\u01bb")
buf.write("\7v\2\2\u01bb\u01bc\7g\2\2\u01bc\u01bd\7e\2\2\u01bd\u01be")
buf.write("\7v\2\2\u01be\u01bf\7g\2\2\u01bf\u01c0\7f\2\2\u01c0,\3")
buf.write("\2\2\2\u01c1\u01c2\7r\2\2\u01c2\u01c3\7w\2\2\u01c3\u01c4")
buf.write("\7d\2\2\u01c4\u01c5\7n\2\2\u01c5\u01c6\7k\2\2\u01c6\u01c7")
buf.write("\7e\2\2\u01c7.\3\2\2\2\u01c8\u01c9\7r\2\2\u01c9\u01ca")
buf.write("\7t\2\2\u01ca\u01cb\7k\2\2\u01cb\u01cc\7x\2\2\u01cc\u01cd")
buf.write("\7c\2\2\u01cd\u01ce\7v\2\2\u01ce\u01cf\7g\2\2\u01cf\60")
buf.write("\3\2\2\2\u01d0\u01d1\7t\2\2\u01d1\u01d2\7g\2\2\u01d2\u01d3")
buf.write("\7v\2\2\u01d3\u01d4\7w\2\2\u01d4\u01d5\7t\2\2\u01d5\u01d6")
buf.write("\7p\2\2\u01d6\u01d7\7u\2\2\u01d7\62\3\2\2\2\u01d8\u01d9")
buf.write("\7n\2\2\u01d9\u01da\7q\2\2\u01da\u01db\7e\2\2\u01db\u01dc")
buf.write("\7c\2\2\u01dc\u01dd\7n\2\2\u01dd\u01de\7u\2\2\u01de\64")
buf.write("\3\2\2\2\u01df\u01e0\7v\2\2\u01e0\u01e1\7j\2\2\u01e1\u01e2")
buf.write("\7t\2\2\u01e2\u01e3\7q\2\2\u01e3\u01e4\7y\2\2\u01e4\u01e5")
buf.write("\7u\2\2\u01e5\66\3\2\2\2\u01e6\u01e7\7e\2\2\u01e7\u01e8")
buf.write("\7c\2\2\u01e8\u01e9\7v\2\2\u01e9\u01ea\7e\2\2\u01ea\u01eb")
buf.write("\7j\2\2\u01eb8\3\2\2\2\u01ec\u01ed\7h\2\2\u01ed\u01ee")
buf.write("\7k\2\2\u01ee\u01ef\7p\2\2\u01ef\u01f0\7c\2\2\u01f0\u01f1")
buf.write("\7n\2\2\u01f1\u01f2\7n\2\2\u01f2\u01f3\7{\2\2\u01f3:\3")
buf.write("\2\2\2\u01f4\u01f5\7o\2\2\u01f5\u01f6\7q\2\2\u01f6\u01f7")
buf.write("\7f\2\2\u01f7\u01f8\7g\2\2\u01f8<\3\2\2\2\u01f9\u01fa")
buf.write("\5\u009bK\2\u01fa>\3\2\2\2\u01fb\u01fc\5\u009dL\2\u01fc")
buf.write("@\3\2\2\2\u01fd\u01fe\5\u00c5`\2\u01feB\3\2\2\2\u01ff")
buf.write("\u0200\5\u00c7a\2\u0200D\3\2\2\2\u0201\u0202\5\u00a3O")
buf.write("\2\u0202F\3\2\2\2\u0203\u0204\5\u00a5P\2\u0204H\3\2\2")
buf.write("\2\u0205\u0206\5\u00a7Q\2\u0206J\3\2\2\2\u0207\u0208\5")
buf.write("\u00a9R\2\u0208L\3\2\2\2\u0209\u020a\5\u00afU\2\u020a")
buf.write("N\3\2\2\2\u020b\u020c\5\u00b1V\2\u020cP\3\2\2\2\u020d")
buf.write("\u020e\5\u00b3W\2\u020eR\3\2\2\2\u020f\u0210\5\u00b5X")
buf.write("\2\u0210T\3\2\2\2\u0211\u0212\5\u00b7Y\2\u0212V\3\2\2")
buf.write("\2\u0213\u0214\5\u00b9Z\2\u0214X\3\2\2\2\u0215\u0216\5")
buf.write("\u00bd\\\2\u0216Z\3\2\2\2\u0217\u0218\5\u00bb[\2\u0218")
buf.write("\\\3\2\2\2\u0219\u021a\5\u00c1^\2\u021a^\3\2\2\2\u021b")
buf.write("\u021c\5\u00c3_\2\u021c`\3\2\2\2\u021d\u021e\5\u00cbc")
buf.write("\2\u021eb\3\2\2\2\u021f\u0220\5\u00c9b\2\u0220d\3\2\2")
buf.write("\2\u0221\u0222\5\u00cdd\2\u0222f\3\2\2\2\u0223\u0224\5")
buf.write("\u00cfe\2\u0224h\3\2\2\2\u0225\u0226\5\u00d1f\2\u0226")
buf.write("j\3\2\2\2\u0227\u0228\5\u0139\u009a\2\u0228l\3\2\2\2\u0229")
buf.write("\u022b\5q\66\2\u022a\u0229\3\2\2\2\u022b\u022c\3\2\2\2")
buf.write("\u022c\u022a\3\2\2\2\u022c\u022d\3\2\2\2\u022d\u022e\3")
buf.write("\2\2\2\u022e\u022f\b\64\2\2\u022fn\3\2\2\2\u0230\u0231")
buf.write("\13\2\2\2\u0231\u0232\3\2\2\2\u0232\u0233\b\65\b\2\u0233")
buf.write("p\3\2\2\2\u0234\u0237\5s\67\2\u0235\u0237\5u8\2\u0236")
buf.write("\u0234\3\2\2\2\u0236\u0235\3\2\2\2\u0237r\3\2\2\2\u0238")
buf.write("\u0239\t\4\2\2\u0239t\3\2\2\2\u023a\u023b\t\5\2\2\u023b")
buf.write("v\3\2\2\2\u023c\u023d\7\61\2\2\u023d\u023e\7,\2\2\u023e")
buf.write("\u0242\3\2\2\2\u023f\u0241\13\2\2\2\u0240\u023f\3\2\2")
buf.write("\2\u0241\u0244\3\2\2\2\u0242\u0243\3\2\2\2\u0242\u0240")
buf.write("\3\2\2\2\u0243\u0248\3\2\2\2\u0244\u0242\3\2\2\2\u0245")
buf.write("\u0246\7,\2\2\u0246\u0249\7\61\2\2\u0247\u0249\7\2\2\3")
buf.write("\u0248\u0245\3\2\2\2\u0248\u0247\3\2\2\2\u0249x\3\2\2")
buf.write("\2\u024a\u024b\7\61\2\2\u024b\u024c\7,\2\2\u024c\u024d")
buf.write("\7,\2\2\u024d\u0251\3\2\2\2\u024e\u0250\13\2\2\2\u024f")
buf.write("\u024e\3\2\2\2\u0250\u0253\3\2\2\2\u0251\u0252\3\2\2\2")
buf.write("\u0251\u024f\3\2\2\2\u0252\u0257\3\2\2\2\u0253\u0251\3")
buf.write("\2\2\2\u0254\u0255\7,\2\2\u0255\u0258\7\61\2\2\u0256\u0258")
buf.write("\7\2\2\3\u0257\u0254\3\2\2\2\u0257\u0256\3\2\2\2\u0258")
buf.write("\u0264\3\2\2\2\u0259\u025a\7\61\2\2\u025a\u025b\7\61\2")
buf.write("\2\u025b\u025c\7B\2\2\u025c\u0260\3\2\2\2\u025d\u025f")
buf.write("\n\2\2\2\u025e\u025d\3\2\2\2\u025f\u0262\3\2\2\2\u0260")
buf.write("\u025e\3\2\2\2\u0260\u0261\3\2\2\2\u0261\u0264\3\2\2\2")
buf.write("\u0262\u0260\3\2\2\2\u0263\u024a\3\2\2\2\u0263\u0259\3")
buf.write("\2\2\2\u0264z\3\2\2\2\u0265\u0266\7\61\2\2\u0266\u0267")
buf.write("\7\61\2\2\u0267\u026b\3\2\2\2\u0268\u026a\n\2\2\2\u0269")
buf.write("\u0268\3\2\2\2\u026a\u026d\3\2\2\2\u026b\u0269\3\2\2\2")
buf.write("\u026b\u026c\3\2\2\2\u026c|\3\2\2\2\u026d\u026b\3\2\2")
buf.write("\2\u026e\u0273\5\u0099J\2\u026f\u0274\t\6\2\2\u0270\u0274")
buf.write("\5\u0081>\2\u0271\u0274\13\2\2\2\u0272\u0274\7\2\2\3\u0273")
buf.write("\u026f\3\2\2\2\u0273\u0270\3\2\2\2\u0273\u0271\3\2\2\2")
buf.write("\u0273\u0272\3\2\2\2\u0274~\3\2\2\2\u0275\u0276\5\u0099")
buf.write("J\2\u0276\u0277\13\2\2\2\u0277\u0080\3\2\2\2\u0278\u0283")
buf.write("\7w\2\2\u0279\u0281\5\u0085@\2\u027a\u027f\5\u0085@\2")
buf.write("\u027b\u027d\5\u0085@\2\u027c\u027e\5\u0085@\2\u027d\u027c")
buf.write("\3\2\2\2\u027d\u027e\3\2\2\2\u027e\u0280\3\2\2\2\u027f")
buf.write("\u027b\3\2\2\2\u027f\u0280\3\2\2\2\u0280\u0282\3\2\2\2")
buf.write("\u0281\u027a\3\2\2\2\u0281\u0282\3\2\2\2\u0282\u0284\3")
buf.write("\2\2\2\u0283\u0279\3\2\2\2\u0283\u0284\3\2\2\2\u0284\u0082")
buf.write("\3\2\2\2\u0285\u028e\7\62\2\2\u0286\u028a\t\7\2\2\u0287")
buf.write("\u0289\5\u0087A\2\u0288\u0287\3\2\2\2\u0289\u028c\3\2")
buf.write("\2\2\u028a\u0288\3\2\2\2\u028a\u028b\3\2\2\2\u028b\u028e")
buf.write("\3\2\2\2\u028c\u028a\3\2\2\2\u028d\u0285\3\2\2\2\u028d")
buf.write("\u0286\3\2\2\2\u028e\u0084\3\2\2\2\u028f\u0290\t\b\2\2")
buf.write("\u0290\u0086\3\2\2\2\u0291\u0292\t\t\2\2\u0292\u0088\3")
buf.write("\2\2\2\u0293\u0294\7v\2\2\u0294\u0295\7t\2\2\u0295\u0296")
buf.write("\7w\2\2\u0296\u029d\7g\2\2\u0297\u0298\7h\2\2\u0298\u0299")
buf.write("\7c\2\2\u0299\u029a\7n\2\2\u029a\u029b\7u\2\2\u029b\u029d")
buf.write("\7g\2\2\u029c\u0293\3\2\2\2\u029c\u0297\3\2\2\2\u029d")
buf.write("\u008a\3\2\2\2\u029e\u02a1\5\u009fM\2\u029f\u02a2\5}<")
buf.write("\2\u02a0\u02a2\n\n\2\2\u02a1\u029f\3\2\2\2\u02a1\u02a0")
buf.write("\3\2\2\2\u02a2\u02a3\3\2\2\2\u02a3\u02a4\5\u009fM\2\u02a4")
buf.write("\u008c\3\2\2\2\u02a5\u02aa\5\u009fM\2\u02a6\u02a9\5}<")
buf.write("\2\u02a7\u02a9\n\n\2\2\u02a8\u02a6\3\2\2\2\u02a8\u02a7")
buf.write("\3\2\2\2\u02a9\u02ac\3\2\2\2\u02aa\u02a8\3\2\2\2\u02aa")
buf.write("\u02ab\3\2\2\2\u02ab\u02ad\3\2\2\2\u02ac\u02aa\3\2\2\2")
buf.write("\u02ad\u02ae\5\u009fM\2\u02ae\u008e\3\2\2\2\u02af\u02b4")
buf.write("\5\u00a1N\2\u02b0\u02b3\5}<\2\u02b1\u02b3\n\13\2\2\u02b2")
buf.write("\u02b0\3\2\2\2\u02b2\u02b1\3\2\2\2\u02b3\u02b6\3\2\2\2")
buf.write("\u02b4\u02b2\3\2\2\2\u02b4\u02b5\3\2\2\2\u02b5\u02b7\3")
buf.write("\2\2\2\u02b6\u02b4\3\2\2\2\u02b7\u02b8\5\u00a1N\2\u02b8")
buf.write("\u0090\3\2\2\2\u02b9\u02be\5\u009fM\2\u02ba\u02bd\5}<")
buf.write("\2\u02bb\u02bd\n\n\2\2\u02bc\u02ba\3\2\2\2\u02bc\u02bb")
buf.write("\3\2\2\2\u02bd\u02c0\3\2\2\2\u02be\u02bc\3\2\2\2\u02be")
buf.write("\u02bf\3\2\2\2\u02bf\u0092\3\2\2\2\u02c0\u02be\3\2\2\2")
buf.write("\u02c1\u02c6\5\u0095H\2\u02c2\u02c6\4\62;\2\u02c3\u02c6")
buf.write("\5\u00bf]\2\u02c4\u02c6\t\f\2\2\u02c5\u02c1\3\2\2\2\u02c5")
buf.write("\u02c2\3\2\2\2\u02c5\u02c3\3\2\2\2\u02c5\u02c4\3\2\2\2")
buf.write("\u02c6\u0094\3\2\2\2\u02c7\u02c8\t\r\2\2\u02c8\u0096\3")
buf.write("\2\2\2\u02c9\u02ca\7k\2\2\u02ca\u02cb\7p\2\2\u02cb\u02cc")
buf.write("\7v\2\2\u02cc\u0098\3\2\2\2\u02cd\u02ce\7^\2\2\u02ce\u009a")
buf.write("\3\2\2\2\u02cf\u02d0\7<\2\2\u02d0\u009c\3\2\2\2\u02d1")
buf.write("\u02d2\7<\2\2\u02d2\u02d3\7<\2\2\u02d3\u009e\3\2\2\2\u02d4")
buf.write("\u02d5\7)\2\2\u02d5\u00a0\3\2\2\2\u02d6\u02d7\7$\2\2\u02d7")
buf.write("\u00a2\3\2\2\2\u02d8\u02d9\7*\2\2\u02d9\u00a4\3\2\2\2")
buf.write("\u02da\u02db\7+\2\2\u02db\u00a6\3\2\2\2\u02dc\u02dd\7")
buf.write("}\2\2\u02dd\u00a8\3\2\2\2\u02de\u02df\7\177\2\2\u02df")
buf.write("\u00aa\3\2\2\2\u02e0\u02e1\7]\2\2\u02e1\u00ac\3\2\2\2")
buf.write("\u02e2\u02e3\7_\2\2\u02e3\u00ae\3\2\2\2\u02e4\u02e5\7")
buf.write("/\2\2\u02e5\u02e6\7@\2\2\u02e6\u00b0\3\2\2\2\u02e7\u02e8")
buf.write("\7>\2\2\u02e8\u00b2\3\2\2\2\u02e9\u02ea\7@\2\2\u02ea\u00b4")
buf.write("\3\2\2\2\u02eb\u02ec\7?\2\2\u02ec\u00b6\3\2\2\2\u02ed")
buf.write("\u02ee\7A\2\2\u02ee\u00b8\3\2\2\2\u02ef\u02f0\7,\2\2\u02f0")
buf.write("\u00ba\3\2\2\2\u02f1\u02f2\7-\2\2\u02f2\u00bc\3\2\2\2")
buf.write("\u02f3\u02f4\7-\2\2\u02f4\u02f5\7?\2\2\u02f5\u00be\3\2")
buf.write("\2\2\u02f6\u02f7\7a\2\2\u02f7\u00c0\3\2\2\2\u02f8\u02f9")
buf.write("\7~\2\2\u02f9\u00c2\3\2\2\2\u02fa\u02fb\7&\2\2\u02fb\u00c4")
buf.write("\3\2\2\2\u02fc\u02fd\7.\2\2\u02fd\u00c6\3\2\2\2\u02fe")
buf.write("\u02ff\7=\2\2\u02ff\u00c8\3\2\2\2\u0300\u0301\7\60\2\2")
buf.write("\u0301\u00ca\3\2\2\2\u0302\u0303\7\60\2\2\u0303\u0304")
buf.write("\7\60\2\2\u0304\u00cc\3\2\2\2\u0305\u0306\7B\2\2\u0306")
buf.write("\u00ce\3\2\2\2\u0307\u0308\7%\2\2\u0308\u00d0\3\2\2\2")
buf.write("\u0309\u030a\7\u0080\2\2\u030a\u00d2\3\2\2\2\u030b\u030c")
buf.write("\5\u00abS\2\u030c\u030d\3\2\2\2\u030d\u030e\bg\t\2\u030e")
buf.write("\u030f\bg\n\2\u030f\u00d4\3\2\2\2\u0310\u0311\5\177=\2")
buf.write("\u0311\u0312\3\2\2\2\u0312\u0313\bh\t\2\u0313\u00d6\3")
buf.write("\2\2\2\u0314\u0315\5\u008fE\2\u0315\u0316\3\2\2\2\u0316")
buf.write("\u0317\bi\t\2\u0317\u00d8\3\2\2\2\u0318\u0319\5\u008d")
buf.write("D\2\u0319\u031a\3\2\2\2\u031a\u031b\bj\t\2\u031b\u00da")
buf.write("\3\2\2\2\u031c\u031d\5\u00adT\2\u031d\u031e\bk\13\2\u031e")
buf.write("\u00dc\3\2\2\2\u031f\u0320\7\2\2\3\u0320\u0321\3\2\2\2")
buf.write("\u0321\u0322\bl\f\2\u0322\u00de\3\2\2\2\u0323\u0324\13")
buf.write("\2\2\2\u0324\u00e0\3\2\2\2\u0325\u0326\5\u00a7Q\2\u0326")
buf.write("\u0327\3\2\2\2\u0327\u0328\bn\r\2\u0328\u0329\bn\4\2\u0329")
buf.write("\u00e2\3\2\2\2\u032a\u032b\5\177=\2\u032b\u032c\3\2\2")
buf.write("\2\u032c\u032d\bo\r\2\u032d\u00e4\3\2\2\2\u032e\u032f")
buf.write("\5\u008fE\2\u032f\u0330\3\2\2\2\u0330\u0331\bp\r\2\u0331")
buf.write("\u00e6\3\2\2\2\u0332\u0333\5\u008dD\2\u0333\u0334\3\2")
buf.write("\2\2\u0334\u0335\bq\r\2\u0335\u00e8\3\2\2\2\u0336\u0337")
buf.write("\5y:\2\u0337\u0338\3\2\2\2\u0338\u0339\br\r\2\u0339\u00ea")
buf.write("\3\2\2\2\u033a\u033b\5w9\2\u033b\u033c\3\2\2\2\u033c\u033d")
buf.write("\bs\r\2\u033d\u00ec\3\2\2\2\u033e\u033f\5{;\2\u033f\u0340")
buf.write("\3\2\2\2\u0340\u0341\bt\r\2\u0341\u00ee\3\2\2\2\u0342")
buf.write("\u0343\5\u00a9R\2\u0343\u0344\bu\16\2\u0344\u00f0\3\2")
buf.write("\2\2\u0345\u0346\7\2\2\3\u0346\u0347\3\2\2\2\u0347\u0348")
buf.write("\bv\f\2\u0348\u00f2\3\2\2\2\u0349\u034a\13\2\2\2\u034a")
buf.write("\u00f4\3\2\2\2\u034b\u034c\5y:\2\u034c\u034d\3\2\2\2\u034d")
buf.write("\u034e\bx\17\2\u034e\u034f\bx\2\2\u034f\u00f6\3\2\2\2")
buf.write("\u0350\u0351\5w9\2\u0351\u0352\3\2\2\2\u0352\u0353\by")
buf.write("\20\2\u0353\u0354\by\2\2\u0354\u00f8\3\2\2\2\u0355\u0356")
buf.write("\5{;\2\u0356\u0357\3\2\2\2\u0357\u0358\bz\21\2\u0358\u0359")
buf.write("\bz\2\2\u0359\u00fa\3\2\2\2\u035a\u035b\5\u00a7Q\2\u035b")
buf.write("\u035c\3\2\2\2\u035c\u035d\b{\22\2\u035d\u00fc\3\2\2\2")
buf.write("\u035e\u035f\5\u00a9R\2\u035f\u0360\3\2\2\2\u0360\u0361")
buf.write("\b|\23\2\u0361\u0362\b|\f\2\u0362\u00fe\3\2\2\2\u0363")
buf.write("\u0364\5\u0139\u009a\2\u0364\u0365\3\2\2\2\u0365\u0366")
buf.write("\b}\24\2\u0366\u0100\3\2\2\2\u0367\u0368\5\u00c9b\2\u0368")
buf.write("\u0369\3\2\2\2\u0369\u036a\b~\25\2\u036a\u0102\3\2\2\2")
buf.write("\u036b\u036c\5\u00b5X\2\u036c\u036d\3\2\2\2\u036d\u036e")
buf.write("\b\177\26\2\u036e\u0104\3\2\2\2\u036f\u0370\5\u008dD\2")
buf.write("\u0370\u0371\3\2\2\2\u0371\u0372\b\u0080\27\2\u0372\u0106")
buf.write("\3\2\2\2\u0373\u0374\5\u0083?\2\u0374\u0375\3\2\2\2\u0375")
buf.write("\u0376\b\u0081\30\2\u0376\u0108\3\2\2\2\u0377\u0378\5")
buf.write("\u00b9Z\2\u0378\u0379\3\2\2\2\u0379\u037a\b\u0082\31\2")
buf.write("\u037a\u010a\3\2\2\2\u037b\u037c\5\u00c7a\2\u037c\u037d")
buf.write("\3\2\2\2\u037d\u037e\b\u0083\32\2\u037e\u010c\3\2\2\2")
buf.write("\u037f\u0381\5q\66\2\u0380\u037f\3\2\2\2\u0381\u0382\3")
buf.write("\2\2\2\u0382\u0380\3\2\2\2\u0382\u0383\3\2\2\2\u0383\u0384")
buf.write("\3\2\2\2\u0384\u0385\b\u0084\33\2\u0385\u0386\b\u0084")
buf.write("\2\2\u0386\u010e\3\2\2\2\u0387\u0388\5y:\2\u0388\u0389")
buf.write("\3\2\2\2\u0389\u038a\b\u0085\17\2\u038a\u038b\b\u0085")
buf.write("\2\2\u038b\u0110\3\2\2\2\u038c\u038d\5w9\2\u038d\u038e")
buf.write("\3\2\2\2\u038e\u038f\b\u0086\20\2\u038f\u0390\b\u0086")
buf.write("\2\2\u0390\u0112\3\2\2\2\u0391\u0392\5{;\2\u0392\u0393")
buf.write("\3\2\2\2\u0393\u0394\b\u0087\21\2\u0394\u0395\b\u0087")
buf.write("\2\2\u0395\u0114\3\2\2\2\u0396\u0397\5\u00a7Q\2\u0397")
buf.write("\u0398\3\2\2\2\u0398\u0399\b\u0088\22\2\u0399\u0116\3")
buf.write("\2\2\2\u039a\u039b\5\u00a9R\2\u039b\u039c\3\2\2\2\u039c")
buf.write("\u039d\b\u0089\23\2\u039d\u039e\b\u0089\f\2\u039e\u0118")
buf.write("\3\2\2\2\u039f\u03a0\5\u0139\u009a\2\u03a0\u03a1\3\2\2")
buf.write("\2\u03a1\u03a2\b\u008a\24\2\u03a2\u011a\3\2\2\2\u03a3")
buf.write("\u03a4\5\u00c9b\2\u03a4\u03a5\3\2\2\2\u03a5\u03a6\b\u008b")
buf.write("\25\2\u03a6\u011c\3\2\2\2\u03a7\u03a8\5\u00c5`\2\u03a8")
buf.write("\u03a9\3\2\2\2\u03a9\u03aa\b\u008c\34\2\u03aa\u011e\3")
buf.write("\2\2\2\u03ab\u03ad\5q\66\2\u03ac\u03ab\3\2\2\2\u03ad\u03ae")
buf.write("\3\2\2\2\u03ae\u03ac\3\2\2\2\u03ae\u03af\3\2\2\2\u03af")
buf.write("\u03b0\3\2\2\2\u03b0\u03b1\b\u008d\33\2\u03b1\u03b2\b")
buf.write("\u008d\2\2\u03b2\u0120\3\2\2\2\u03b3\u03b4\5y:\2\u03b4")
buf.write("\u03b5\3\2\2\2\u03b5\u03b6\b\u008e\17\2\u03b6\u03b7\b")
buf.write("\u008e\2\2\u03b7\u0122\3\2\2\2\u03b8\u03b9\5w9\2\u03b9")
buf.write("\u03ba\3\2\2\2\u03ba\u03bb\b\u008f\20\2\u03bb\u03bc\b")
buf.write("\u008f\2\2\u03bc\u0124\3\2\2\2\u03bd\u03be\5{;\2\u03be")
buf.write("\u03bf\3\2\2\2\u03bf\u03c0\b\u0090\21\2\u03c0\u03c1\b")
buf.write("\u0090\2\2\u03c1\u0126\3\2\2\2\u03c2\u03c3\5\u00a7Q\2")
buf.write("\u03c3\u03c4\3\2\2\2\u03c4\u03c5\b\u0091\22\2\u03c5\u0128")
buf.write("\3\2\2\2\u03c6\u03c7\5\u00a9R\2\u03c7\u03c8\3\2\2\2\u03c8")
buf.write("\u03c9\b\u0092\23\2\u03c9\u03ca\b\u0092\f\2\u03ca\u012a")
buf.write("\3\2\2\2\u03cb\u03cc\5\u0139\u009a\2\u03cc\u03cd\3\2\2")
buf.write("\2\u03cd\u03ce\b\u0093\24\2\u03ce\u012c\3\2\2\2\u03cf")
buf.write("\u03d0\5\u00c9b\2\u03d0\u03d1\3\2\2\2\u03d1\u03d2\b\u0094")
buf.write("\25\2\u03d2\u012e\3\2\2\2\u03d3\u03d4\5\u00c5`\2\u03d4")
buf.write("\u03d5\3\2\2\2\u03d5\u03d6\b\u0095\34\2\u03d6\u0130\3")
buf.write("\2\2\2\u03d7\u03d9\5q\66\2\u03d8\u03d7\3\2\2\2\u03d9\u03da")
buf.write("\3\2\2\2\u03da\u03d8\3\2\2\2\u03da\u03db\3\2\2\2\u03db")
buf.write("\u03dc\3\2\2\2\u03dc\u03dd\b\u0096\33\2\u03dd\u03de\b")
buf.write("\u0096\2\2\u03de\u0132\3\2\2\2\u03df\u03e2\n\16\2\2\u03e0")
buf.write("\u03e2\5\177=\2\u03e1\u03df\3\2\2\2\u03e1\u03e0\3\2\2")
buf.write("\2\u03e2\u03e3\3\2\2\2\u03e3\u03e1\3\2\2\2\u03e3\u03e4")
buf.write("\3\2\2\2\u03e4\u03e5\3\2\2\2\u03e5\u03e6\b\u0097\35\2")
buf.write("\u03e6\u0134\3\2\2\2\u03e7\u03e8\5\u00adT\2\u03e8\u03e9")
buf.write("\3\2\2\2\u03e9\u03ea\b\u0098\f\2\u03ea\u0136\3\2\2\2\u03eb")
buf.write("\u03ec\7\2\2\3\u03ec\u03ed\3\2\2\2\u03ed\u03ee\b\u0099")
buf.write("\f\2\u03ee\u0138\3\2\2\2\u03ef\u03f3\5\u0095H\2\u03f0")
buf.write("\u03f2\5\u0093G\2\u03f1\u03f0\3\2\2\2\u03f2\u03f5\3\2")
buf.write("\2\2\u03f3\u03f1\3\2\2\2\u03f3\u03f4\3\2\2\2\u03f4\u013a")
buf.write("\3\2\2\2\u03f5\u03f3\3\2\2\2,\2\3\4\5\6\7\b\u0144\u0167")
buf.write("\u0178\u018b\u022c\u0236\u0242\u0248\u0251\u0257\u0260")
buf.write("\u0263\u026b\u0273\u027d\u027f\u0281\u0283\u028a\u028d")
buf.write("\u029c\u02a1\u02a8\u02aa\u02b2\u02b4\u02bc\u02be\u02c5")
buf.write("\u0382\u03ae\u03da\u03e1\u03e3\u03f3\36\2\4\2\3\t\2\7")
buf.write("\4\2\7\5\2\7\6\2\7\7\2\2\3\2\t<\2\7\3\2\3k\3\6\2\2\t?")
buf.write("\2\3u\4\t\6\2\t\b\2\t\t\2\t&\2\t\'\2\t\67\2\t\63\2\t+")
buf.write("\2\t\13\2\t\n\2\t-\2\t#\2\t8\2\t\"\2\5\2\2")
return buf.getvalue()
class ANTLRv4Lexer(LexerAdaptor):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
OFF_CHANNEL = 2
Argument = 1
Action = 2
Options = 3
Tokens = 4
Channels = 5
LexerCharSet = 6
TOKEN_REF = 1
RULE_REF = 2
LEXER_CHAR_SET = 3
DOC_COMMENT = 4
HEADER = 5
BLOCK_COMMENT = 6
LINE_COMMENT = 7
INT = 8
STRING_LITERAL = 9
UNTERMINATED_STRING_LITERAL = 10
BEGIN_ARGUMENT = 11
BEGIN_ACTION = 12
OPTIONS = 13
TOKENS = 14
CHANNELS = 15
IMPORT = 16
FRAGMENT = 17
LEXER = 18
PARSER = 19
GRAMMAR = 20
PROTECTED = 21
PUBLIC = 22
PRIVATE = 23
RETURNS = 24
LOCALS = 25
THROWS = 26
CATCH = 27
FINALLY = 28
MODE = 29
COLON = 30
COLONCOLON = 31
COMMA = 32
SEMI = 33
LPAREN = 34
RPAREN = 35
LBRACE = 36
RBRACE = 37
RARROW = 38
LT = 39
GT = 40
ASSIGN = 41
QUESTION = 42
STAR = 43
PLUS_ASSIGN = 44
PLUS = 45
OR = 46
DOLLAR = 47
RANGE = 48
DOT = 49
AT = 50
POUND = 51
NOT = 52
ID = 53
WS = 54
ERRCHAR = 55
END_ARGUMENT = 56
UNTERMINATED_ARGUMENT = 57
ARGUMENT_CONTENT = 58
END_ACTION = 59
UNTERMINATED_ACTION = 60
ACTION_CONTENT = 61
UNTERMINATED_CHAR_SET = 62
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN", u"OFF_CHANNEL" ]
modeNames = [ "DEFAULT_MODE", "Argument", "Action", "Options", "Tokens",
"Channels", "LexerCharSet" ]
literalNames = [ "",
"'import'", "'fragment'", "'lexer'", "'parser'", "'grammar'",
"'protected'", "'public'", "'private'", "'returns'", "'locals'",
"'throws'", "'catch'", "'finally'", "'mode'" ]
symbolicNames = [ "",
"TOKEN_REF", "RULE_REF", "LEXER_CHAR_SET", "DOC_COMMENT", "HEADER",
"BLOCK_COMMENT", "LINE_COMMENT", "INT", "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL",
"BEGIN_ARGUMENT", "BEGIN_ACTION", "OPTIONS", "TOKENS", "CHANNELS",
"IMPORT", "FRAGMENT", "LEXER", "PARSER", "GRAMMAR", "PROTECTED",
"PUBLIC", "PRIVATE", "RETURNS", "LOCALS", "THROWS", "CATCH",
"FINALLY", "MODE", "COLON", "COLONCOLON", "COMMA", "SEMI", "LPAREN",
"RPAREN", "LBRACE", "RBRACE", "RARROW", "LT", "GT", "ASSIGN",
"QUESTION", "STAR", "PLUS_ASSIGN", "PLUS", "OR", "DOLLAR", "RANGE",
"DOT", "AT", "POUND", "NOT", "ID", "WS", "ERRCHAR", "END_ARGUMENT",
"UNTERMINATED_ARGUMENT", "ARGUMENT_CONTENT", "END_ACTION", "UNTERMINATED_ACTION",
"ACTION_CONTENT", "UNTERMINATED_CHAR_SET" ]
ruleNames = [ "DOC_COMMENT", "HEADER", "BLOCK_COMMENT", "LINE_COMMENT",
"INT", "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL",
"BEGIN_ARGUMENT", "BEGIN_ACTION", "OPTIONS", "TOKENS",
"CHANNELS", "IMPORT", "FRAGMENT", "LEXER", "PARSER", "GRAMMAR",
"PROTECTED", "PUBLIC", "PRIVATE", "RETURNS", "LOCALS",
"THROWS", "CATCH", "FINALLY", "MODE", "COLON", "COLONCOLON",
"COMMA", "SEMI", "LPAREN", "RPAREN", "LBRACE", "RBRACE",
"RARROW", "LT", "GT", "ASSIGN", "QUESTION", "STAR", "PLUS_ASSIGN",
"PLUS", "OR", "DOLLAR", "RANGE", "DOT", "AT", "POUND",
"NOT", "ID", "WS", "ERRCHAR", "Ws", "Hws", "Vws", "BlockComment",
"DocComment", "LineComment", "EscSeq", "EscAny", "UnicodeEsc",
"DecimalNumeral", "HexDigit", "DecDigit", "BoolLiteral",
"CharLiteral", "SQuoteLiteral", "DQuoteLiteral", "USQuoteLiteral",
"NameChar", "NameStartChar", "Int", "Esc", "Colon", "DColon",
"SQuote", "DQuote", "LParen", "RParen", "LBrace", "RBrace",
"LBrack", "RBrack", "RArrow", "Lt", "Gt", "Equal", "Question",
"Star", "Plus", "PlusAssign", "Underscore", "Pipe", "Dollar",
"Comma", "Semi", "Dot", "Range", "At", "Pound", "Tilde",
"NESTED_ARGUMENT", "ARGUMENT_ESCAPE", "ARGUMENT_STRING_LITERAL",
"ARGUMENT_CHAR_LITERAL", "END_ARGUMENT", "UNTERMINATED_ARGUMENT",
"ARGUMENT_CONTENT", "NESTED_ACTION", "ACTION_ESCAPE",
"ACTION_STRING_LITERAL", "ACTION_CHAR_LITERAL", "ACTION_DOC_COMMENT",
"ACTION_BLOCK_COMMENT", "ACTION_LINE_COMMENT", "END_ACTION",
"UNTERMINATED_ACTION", "ACTION_CONTENT", "OPT_DOC_COMMENT",
"OPT_BLOCK_COMMENT", "OPT_LINE_COMMENT", "OPT_LBRACE",
"OPT_RBRACE", "OPT_ID", "OPT_DOT", "OPT_ASSIGN", "OPT_STRING_LITERAL",
"OPT_INT", "OPT_STAR", "OPT_SEMI", "OPT_WS", "TOK_DOC_COMMENT",
"TOK_BLOCK_COMMENT", "TOK_LINE_COMMENT", "TOK_LBRACE",
"TOK_RBRACE", "TOK_ID", "TOK_DOT", "TOK_COMMA", "TOK_WS",
"CHN_DOC_COMMENT", "CHN_BLOCK_COMMENT", "CHN_LINE_COMMENT",
"CHN_LBRACE", "CHN_RBRACE", "CHN_ID", "CHN_DOT", "CHN_COMMA",
"CHN_WS", "LEXER_CHAR_SET_BODY", "LEXER_CHAR_SET", "UNTERMINATED_CHAR_SET",
"Id" ]
grammarFileName = "ANTLRv4Lexer.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
if self._actions is None:
actions = dict()
actions[7] = self.BEGIN_ARGUMENT_action
actions[105] = self.END_ARGUMENT_action
actions[115] = self.END_ACTION_action
self._actions = actions
action = self._actions.get(ruleIndex, None)
if action is not None:
action(localctx, actionIndex)
else:
raise Exception("No registered action for:" + str(ruleIndex))
def BEGIN_ARGUMENT_action(self, localctx:RuleContext , actionIndex:int):
if actionIndex == 0:
self.handleBeginArgument()
def END_ARGUMENT_action(self, localctx:RuleContext , actionIndex:int):
if actionIndex == 1:
self.handleEndArgument()
def END_ACTION_action(self, localctx:RuleContext , actionIndex:int):
if actionIndex == 2:
self.handleEndAction()
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ANTLRv4Lexer.tokens 0000664 0000000 0000000 00000001612 14351074657 0025422 0 ustar 00root root 0000000 0000000 TOKEN_REF=1
RULE_REF=2
LEXER_CHAR_SET=3
DOC_COMMENT=4
HEADER=5
BLOCK_COMMENT=6
LINE_COMMENT=7
INT=8
STRING_LITERAL=9
UNTERMINATED_STRING_LITERAL=10
BEGIN_ARGUMENT=11
BEGIN_ACTION=12
OPTIONS=13
TOKENS=14
CHANNELS=15
IMPORT=16
FRAGMENT=17
LEXER=18
PARSER=19
GRAMMAR=20
PROTECTED=21
PUBLIC=22
PRIVATE=23
RETURNS=24
LOCALS=25
THROWS=26
CATCH=27
FINALLY=28
MODE=29
COLON=30
COLONCOLON=31
COMMA=32
SEMI=33
LPAREN=34
RPAREN=35
LBRACE=36
RBRACE=37
RARROW=38
LT=39
GT=40
ASSIGN=41
QUESTION=42
STAR=43
PLUS_ASSIGN=44
PLUS=45
OR=46
DOLLAR=47
RANGE=48
DOT=49
AT=50
POUND=51
NOT=52
ID=53
WS=54
ERRCHAR=55
END_ARGUMENT=56
UNTERMINATED_ARGUMENT=57
ARGUMENT_CONTENT=58
END_ACTION=59
UNTERMINATED_ACTION=60
ACTION_CONTENT=61
UNTERMINATED_CHAR_SET=62
'import'=16
'fragment'=17
'lexer'=18
'parser'=19
'grammar'=20
'protected'=21
'public'=22
'private'=23
'returns'=24
'locals'=25
'throws'=26
'catch'=27
'finally'=28
'mode'=29
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ANTLRv4Parser.py 0000664 0000000 0000000 00000641610 14351074657 0024734 0 ustar 00root root 0000000 0000000 # encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3@")
buf.write("\u027b\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\3\2\7\2~\n\2\f\2\16\2\u0081\13\2\3")
buf.write("\2\3\2\3\2\3\2\7\2\u0087\n\2\f\2\16\2\u008a\13\2\3\2\3")
buf.write("\2\7\2\u008e\n\2\f\2\16\2\u0091\13\2\3\2\3\2\3\3\3\3\3")
buf.write("\3\3\3\3\3\5\3\u009a\n\3\3\4\3\4\3\4\3\4\3\4\5\4\u00a1")
buf.write("\n\4\3\5\3\5\3\5\3\5\7\5\u00a7\n\5\f\5\16\5\u00aa\13\5")
buf.write("\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\7\7\u00b5\n\7\f\7")
buf.write("\16\7\u00b8\13\7\3\7\3\7\3\7\5\7\u00bd\n\7\3\b\3\b\3\b")
buf.write("\3\b\7\b\u00c3\n\b\f\b\16\b\u00c6\13\b\3\b\3\b\3\t\3\t")
buf.write("\3\n\3\n\5\n\u00ce\n\n\3\n\3\n\3\13\3\13\5\13\u00d4\n")
buf.write("\13\3\13\3\13\3\f\3\f\3\f\7\f\u00db\n\f\f\f\16\f\u00de")
buf.write("\13\f\3\f\5\f\u00e1\n\f\3\r\3\r\3\r\3\r\5\r\u00e7\n\r")
buf.write("\3\r\3\r\3\r\3\16\3\16\3\16\5\16\u00ef\n\16\3\17\3\17")
buf.write("\7\17\u00f3\n\17\f\17\16\17\u00f6\13\17\3\17\3\17\3\20")
buf.write("\3\20\7\20\u00fc\n\20\f\20\16\20\u00ff\13\20\3\20\3\20")
buf.write("\3\21\3\21\3\21\3\21\7\21\u0107\n\21\f\21\16\21\u010a")
buf.write("\13\21\3\22\7\22\u010d\n\22\f\22\16\22\u0110\13\22\3\23")
buf.write("\7\23\u0113\n\23\f\23\16\23\u0116\13\23\3\23\3\23\5\23")
buf.write("\u011a\n\23\3\24\7\24\u011d\n\24\f\24\16\24\u0120\13\24")
buf.write("\3\24\5\24\u0123\n\24\3\24\3\24\5\24\u0127\n\24\3\24\5")
buf.write("\24\u012a\n\24\3\24\5\24\u012d\n\24\3\24\5\24\u0130\n")
buf.write("\24\3\24\7\24\u0133\n\24\f\24\16\24\u0136\13\24\3\24\3")
buf.write("\24\3\24\3\24\3\24\3\25\7\25\u013e\n\25\f\25\16\25\u0141")
buf.write("\13\25\3\25\5\25\u0144\n\25\3\26\3\26\3\26\3\26\3\27\3")
buf.write("\27\3\27\3\30\3\30\5\30\u014f\n\30\3\31\3\31\3\31\3\32")
buf.write("\3\32\3\32\3\32\7\32\u0158\n\32\f\32\16\32\u015b\13\32")
buf.write("\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\6\35\u0165\n")
buf.write("\35\r\35\16\35\u0166\3\36\3\36\3\37\3\37\3 \3 \3 \7 \u0170")
buf.write("\n \f \16 \u0173\13 \3!\3!\3!\5!\u0178\n!\3\"\7\"\u017b")
buf.write("\n\"\f\"\16\"\u017e\13\"\3\"\5\"\u0181\n\"\3\"\3\"\3\"")
buf.write("\3\"\3\"\3#\3#\3$\3$\3$\7$\u018d\n$\f$\16$\u0190\13$\3")
buf.write("%\3%\5%\u0194\n%\3%\5%\u0197\n%\3&\6&\u019a\n&\r&\16&")
buf.write("\u019b\3\'\3\'\5\'\u01a0\n\'\3\'\3\'\5\'\u01a4\n\'\3\'")
buf.write("\3\'\5\'\u01a8\n\'\3\'\3\'\5\'\u01ac\n\'\5\'\u01ae\n\'")
buf.write("\3(\3(\3(\3(\5(\u01b4\n(\3)\3)\3)\3)\3*\3*\3*\3*\7*\u01be")
buf.write("\n*\f*\16*\u01c1\13*\3+\3+\3+\3+\3+\3+\5+\u01c9\n+\3,")
buf.write("\3,\5,\u01cd\n,\3-\3-\5-\u01d1\n-\3.\3.\3.\7.\u01d6\n")
buf.write(".\f.\16.\u01d9\13.\3/\5/\u01dc\n/\3/\6/\u01df\n/\r/\16")
buf.write("/\u01e0\3/\5/\u01e4\n/\3\60\3\60\5\60\u01e8\n\60\3\60")
buf.write("\3\60\5\60\u01ec\n\60\3\60\3\60\5\60\u01f0\n\60\3\60\3")
buf.write("\60\5\60\u01f4\n\60\3\60\5\60\u01f7\n\60\3\61\3\61\3\61")
buf.write("\3\61\5\61\u01fd\n\61\3\62\3\62\5\62\u0201\n\62\3\62\3")
buf.write("\62\5\62\u0205\n\62\3\62\3\62\5\62\u0209\n\62\5\62\u020b")
buf.write("\n\62\3\63\3\63\3\63\3\63\3\63\3\63\5\63\u0213\n\63\3")
buf.write("\63\5\63\u0216\n\63\3\64\3\64\3\64\3\64\3\64\5\64\u021d")
buf.write("\n\64\5\64\u021f\n\64\3\65\3\65\3\65\3\65\5\65\u0225\n")
buf.write("\65\3\66\3\66\3\66\3\66\7\66\u022b\n\66\f\66\16\66\u022e")
buf.write("\13\66\3\66\3\66\3\67\3\67\5\67\u0234\n\67\3\67\3\67\5")
buf.write("\67\u0238\n\67\3\67\3\67\5\67\u023c\n\67\38\38\58\u0240")
buf.write("\n8\38\78\u0243\n8\f8\168\u0246\138\38\58\u0249\n8\38")
buf.write("\38\38\39\39\59\u0250\n9\39\59\u0253\n9\3:\3:\3:\3:\3")
buf.write(";\3;\5;\u025b\n;\3;\3;\5;\u025f\n;\5;\u0261\n;\3<\3<\3")
buf.write("<\3<\7<\u0267\n<\f<\16<\u026a\13<\3<\3<\3=\3=\3=\3=\3")
buf.write("=\5=\u0273\n=\5=\u0275\n=\3>\3>\5>\u0279\n>\3>\2\2?\2")
buf.write("\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64")
buf.write("\668:<>@BDFHJLNPRTVXZ\\^`bdfhjlnprtvxz\2\4\4\2\23\23\27")
buf.write("\31\4\2++..\2\u02a8\2\177\3\2\2\2\4\u0099\3\2\2\2\6\u00a0")
buf.write("\3\2\2\2\b\u00a2\3\2\2\2\n\u00ad\3\2\2\2\f\u00bc\3\2\2")
buf.write("\2\16\u00be\3\2\2\2\20\u00c9\3\2\2\2\22\u00cb\3\2\2\2")
buf.write("\24\u00d1\3\2\2\2\26\u00d7\3\2\2\2\30\u00e2\3\2\2\2\32")
buf.write("\u00ee\3\2\2\2\34\u00f0\3\2\2\2\36\u00f9\3\2\2\2 \u0102")
buf.write("\3\2\2\2\"\u010e\3\2\2\2$\u0114\3\2\2\2&\u011e\3\2\2\2")
buf.write("(\u013f\3\2\2\2*\u0145\3\2\2\2,\u0149\3\2\2\2.\u014e\3")
buf.write("\2\2\2\60\u0150\3\2\2\2\62\u0153\3\2\2\2\64\u015c\3\2")
buf.write("\2\2\66\u015f\3\2\2\28\u0164\3\2\2\2:\u0168\3\2\2\2<\u016a")
buf.write("\3\2\2\2>\u016c\3\2\2\2@\u0174\3\2\2\2B\u017c\3\2\2\2")
buf.write("D\u0187\3\2\2\2F\u0189\3\2\2\2H\u0196\3\2\2\2J\u0199\3")
buf.write("\2\2\2L\u01ad\3\2\2\2N\u01af\3\2\2\2P\u01b5\3\2\2\2R\u01b9")
buf.write("\3\2\2\2T\u01c8\3\2\2\2V\u01cc\3\2\2\2X\u01d0\3\2\2\2")
buf.write("Z\u01d2\3\2\2\2\\\u01e3\3\2\2\2^\u01f6\3\2\2\2`\u01f8")
buf.write("\3\2\2\2b\u020a\3\2\2\2d\u0215\3\2\2\2f\u021e\3\2\2\2")
buf.write("h\u0224\3\2\2\2j\u0226\3\2\2\2l\u023b\3\2\2\2n\u023d\3")
buf.write("\2\2\2p\u024d\3\2\2\2r\u0254\3\2\2\2t\u0260\3\2\2\2v\u0262")
buf.write("\3\2\2\2x\u0274\3\2\2\2z\u0278\3\2\2\2|~\7\6\2\2}|\3\2")
buf.write("\2\2~\u0081\3\2\2\2\177}\3\2\2\2\177\u0080\3\2\2\2\u0080")
buf.write("\u0082\3\2\2\2\u0081\177\3\2\2\2\u0082\u0083\5\4\3\2\u0083")
buf.write("\u0084\5z>\2\u0084\u0088\7#\2\2\u0085\u0087\5\6\4\2\u0086")
buf.write("\u0085\3\2\2\2\u0087\u008a\3\2\2\2\u0088\u0086\3\2\2\2")
buf.write("\u0088\u0089\3\2\2\2\u0089\u008b\3\2\2\2\u008a\u0088\3")
buf.write("\2\2\2\u008b\u008f\5\"\22\2\u008c\u008e\5 \21\2\u008d")
buf.write("\u008c\3\2\2\2\u008e\u0091\3\2\2\2\u008f\u008d\3\2\2\2")
buf.write("\u008f\u0090\3\2\2\2\u0090\u0092\3\2\2\2\u0091\u008f\3")
buf.write("\2\2\2\u0092\u0093\7\2\2\3\u0093\3\3\2\2\2\u0094\u0095")
buf.write("\7\24\2\2\u0095\u009a\7\26\2\2\u0096\u0097\7\25\2\2\u0097")
buf.write("\u009a\7\26\2\2\u0098\u009a\7\26\2\2\u0099\u0094\3\2\2")
buf.write("\2\u0099\u0096\3\2\2\2\u0099\u0098\3\2\2\2\u009a\5\3\2")
buf.write("\2\2\u009b\u00a1\5\b\5\2\u009c\u00a1\5\16\b\2\u009d\u00a1")
buf.write("\5\22\n\2\u009e\u00a1\5\24\13\2\u009f\u00a1\5\30\r\2\u00a0")
buf.write("\u009b\3\2\2\2\u00a0\u009c\3\2\2\2\u00a0\u009d\3\2\2\2")
buf.write("\u00a0\u009e\3\2\2\2\u00a0\u009f\3\2\2\2\u00a1\7\3\2\2")
buf.write("\2\u00a2\u00a8\7\17\2\2\u00a3\u00a4\5\n\6\2\u00a4\u00a5")
buf.write("\7#\2\2\u00a5\u00a7\3\2\2\2\u00a6\u00a3\3\2\2\2\u00a7")
buf.write("\u00aa\3\2\2\2\u00a8\u00a6\3\2\2\2\u00a8\u00a9\3\2\2\2")
buf.write("\u00a9\u00ab\3\2\2\2\u00aa\u00a8\3\2\2\2\u00ab\u00ac\7")
buf.write("\'\2\2\u00ac\t\3\2\2\2\u00ad\u00ae\5z>\2\u00ae\u00af\7")
buf.write("+\2\2\u00af\u00b0\5\f\7\2\u00b0\13\3\2\2\2\u00b1\u00b6")
buf.write("\5z>\2\u00b2\u00b3\7\63\2\2\u00b3\u00b5\5z>\2\u00b4\u00b2")
buf.write("\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6")
buf.write("\u00b7\3\2\2\2\u00b7\u00bd\3\2\2\2\u00b8\u00b6\3\2\2\2")
buf.write("\u00b9\u00bd\7\13\2\2\u00ba\u00bd\5\34\17\2\u00bb\u00bd")
buf.write("\7\n\2\2\u00bc\u00b1\3\2\2\2\u00bc\u00b9\3\2\2\2\u00bc")
buf.write("\u00ba\3\2\2\2\u00bc\u00bb\3\2\2\2\u00bd\r\3\2\2\2\u00be")
buf.write("\u00bf\7\22\2\2\u00bf\u00c4\5\20\t\2\u00c0\u00c1\7\"\2")
buf.write("\2\u00c1\u00c3\5\20\t\2\u00c2\u00c0\3\2\2\2\u00c3\u00c6")
buf.write("\3\2\2\2\u00c4\u00c2\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5")
buf.write("\u00c7\3\2\2\2\u00c6\u00c4\3\2\2\2\u00c7\u00c8\7#\2\2")
buf.write("\u00c8\17\3\2\2\2\u00c9\u00ca\5z>\2\u00ca\21\3\2\2\2\u00cb")
buf.write("\u00cd\7\20\2\2\u00cc\u00ce\5\26\f\2\u00cd\u00cc\3\2\2")
buf.write("\2\u00cd\u00ce\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d0")
buf.write("\7\'\2\2\u00d0\23\3\2\2\2\u00d1\u00d3\7\21\2\2\u00d2\u00d4")
buf.write("\5\26\f\2\u00d3\u00d2\3\2\2\2\u00d3\u00d4\3\2\2\2\u00d4")
buf.write("\u00d5\3\2\2\2\u00d5\u00d6\7\'\2\2\u00d6\25\3\2\2\2\u00d7")
buf.write("\u00dc\5z>\2\u00d8\u00d9\7\"\2\2\u00d9\u00db\5z>\2\u00da")
buf.write("\u00d8\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2")
buf.write("\u00dc\u00dd\3\2\2\2\u00dd\u00e0\3\2\2\2\u00de\u00dc\3")
buf.write("\2\2\2\u00df\u00e1\7\"\2\2\u00e0\u00df\3\2\2\2\u00e0\u00e1")
buf.write("\3\2\2\2\u00e1\27\3\2\2\2\u00e2\u00e6\7\64\2\2\u00e3\u00e4")
buf.write("\5\32\16\2\u00e4\u00e5\7!\2\2\u00e5\u00e7\3\2\2\2\u00e6")
buf.write("\u00e3\3\2\2\2\u00e6\u00e7\3\2\2\2\u00e7\u00e8\3\2\2\2")
buf.write("\u00e8\u00e9\5z>\2\u00e9\u00ea\5\34\17\2\u00ea\31\3\2")
buf.write("\2\2\u00eb\u00ef\5z>\2\u00ec\u00ef\7\24\2\2\u00ed\u00ef")
buf.write("\7\25\2\2\u00ee\u00eb\3\2\2\2\u00ee\u00ec\3\2\2\2\u00ee")
buf.write("\u00ed\3\2\2\2\u00ef\33\3\2\2\2\u00f0\u00f4\7\16\2\2\u00f1")
buf.write("\u00f3\7?\2\2\u00f2\u00f1\3\2\2\2\u00f3\u00f6\3\2\2\2")
buf.write("\u00f4\u00f2\3\2\2\2\u00f4\u00f5\3\2\2\2\u00f5\u00f7\3")
buf.write("\2\2\2\u00f6\u00f4\3\2\2\2\u00f7\u00f8\7=\2\2\u00f8\35")
buf.write("\3\2\2\2\u00f9\u00fd\7\r\2\2\u00fa\u00fc\7<\2\2\u00fb")
buf.write("\u00fa\3\2\2\2\u00fc\u00ff\3\2\2\2\u00fd\u00fb\3\2\2\2")
buf.write("\u00fd\u00fe\3\2\2\2\u00fe\u0100\3\2\2\2\u00ff\u00fd\3")
buf.write("\2\2\2\u0100\u0101\7:\2\2\u0101\37\3\2\2\2\u0102\u0103")
buf.write("\7\37\2\2\u0103\u0104\5z>\2\u0104\u0108\7#\2\2\u0105\u0107")
buf.write("\5B\"\2\u0106\u0105\3\2\2\2\u0107\u010a\3\2\2\2\u0108")
buf.write("\u0106\3\2\2\2\u0108\u0109\3\2\2\2\u0109!\3\2\2\2\u010a")
buf.write("\u0108\3\2\2\2\u010b\u010d\5$\23\2\u010c\u010b\3\2\2\2")
buf.write("\u010d\u0110\3\2\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3")
buf.write("\2\2\2\u010f#\3\2\2\2\u0110\u010e\3\2\2\2\u0111\u0113")
buf.write("\7\7\2\2\u0112\u0111\3\2\2\2\u0113\u0116\3\2\2\2\u0114")
buf.write("\u0112\3\2\2\2\u0114\u0115\3\2\2\2\u0115\u0119\3\2\2\2")
buf.write("\u0116\u0114\3\2\2\2\u0117\u011a\5&\24\2\u0118\u011a\5")
buf.write("B\"\2\u0119\u0117\3\2\2\2\u0119\u0118\3\2\2\2\u011a%\3")
buf.write("\2\2\2\u011b\u011d\7\6\2\2\u011c\u011b\3\2\2\2\u011d\u0120")
buf.write("\3\2\2\2\u011e\u011c\3\2\2\2\u011e\u011f\3\2\2\2\u011f")
buf.write("\u0122\3\2\2\2\u0120\u011e\3\2\2\2\u0121\u0123\58\35\2")
buf.write("\u0122\u0121\3\2\2\2\u0122\u0123\3\2\2\2\u0123\u0124\3")
buf.write("\2\2\2\u0124\u0126\7\4\2\2\u0125\u0127\5\36\20\2\u0126")
buf.write("\u0125\3\2\2\2\u0126\u0127\3\2\2\2\u0127\u0129\3\2\2\2")
buf.write("\u0128\u012a\5\60\31\2\u0129\u0128\3\2\2\2\u0129\u012a")
buf.write("\3\2\2\2\u012a\u012c\3\2\2\2\u012b\u012d\5\62\32\2\u012c")
buf.write("\u012b\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012f\3\2\2\2")
buf.write("\u012e\u0130\5\64\33\2\u012f\u012e\3\2\2\2\u012f\u0130")
buf.write("\3\2\2\2\u0130\u0134\3\2\2\2\u0131\u0133\5.\30\2\u0132")
buf.write("\u0131\3\2\2\2\u0133\u0136\3\2\2\2\u0134\u0132\3\2\2\2")
buf.write("\u0134\u0135\3\2\2\2\u0135\u0137\3\2\2\2\u0136\u0134\3")
buf.write("\2\2\2\u0137\u0138\7 \2\2\u0138\u0139\5<\37\2\u0139\u013a")
buf.write("\7#\2\2\u013a\u013b\5(\25\2\u013b\'\3\2\2\2\u013c\u013e")
buf.write("\5*\26\2\u013d\u013c\3\2\2\2\u013e\u0141\3\2\2\2\u013f")
buf.write("\u013d\3\2\2\2\u013f\u0140\3\2\2\2\u0140\u0143\3\2\2\2")
buf.write("\u0141\u013f\3\2\2\2\u0142\u0144\5,\27\2\u0143\u0142\3")
buf.write("\2\2\2\u0143\u0144\3\2\2\2\u0144)\3\2\2\2\u0145\u0146")
buf.write("\7\35\2\2\u0146\u0147\5\36\20\2\u0147\u0148\5\34\17\2")
buf.write("\u0148+\3\2\2\2\u0149\u014a\7\36\2\2\u014a\u014b\5\34")
buf.write("\17\2\u014b-\3\2\2\2\u014c\u014f\5\b\5\2\u014d\u014f\5")
buf.write("\66\34\2\u014e\u014c\3\2\2\2\u014e\u014d\3\2\2\2\u014f")
buf.write("/\3\2\2\2\u0150\u0151\7\32\2\2\u0151\u0152\5\36\20\2\u0152")
buf.write("\61\3\2\2\2\u0153\u0154\7\34\2\2\u0154\u0159\5z>\2\u0155")
buf.write("\u0156\7\"\2\2\u0156\u0158\5z>\2\u0157\u0155\3\2\2\2\u0158")
buf.write("\u015b\3\2\2\2\u0159\u0157\3\2\2\2\u0159\u015a\3\2\2\2")
buf.write("\u015a\63\3\2\2\2\u015b\u0159\3\2\2\2\u015c\u015d\7\33")
buf.write("\2\2\u015d\u015e\5\36\20\2\u015e\65\3\2\2\2\u015f\u0160")
buf.write("\7\64\2\2\u0160\u0161\5z>\2\u0161\u0162\5\34\17\2\u0162")
buf.write("\67\3\2\2\2\u0163\u0165\5:\36\2\u0164\u0163\3\2\2\2\u0165")
buf.write("\u0166\3\2\2\2\u0166\u0164\3\2\2\2\u0166\u0167\3\2\2\2")
buf.write("\u01679\3\2\2\2\u0168\u0169\t\2\2\2\u0169;\3\2\2\2\u016a")
buf.write("\u016b\5> \2\u016b=\3\2\2\2\u016c\u0171\5@!\2\u016d\u016e")
buf.write("\7\60\2\2\u016e\u0170\5@!\2\u016f\u016d\3\2\2\2\u0170")
buf.write("\u0173\3\2\2\2\u0171\u016f\3\2\2\2\u0171\u0172\3\2\2\2")
buf.write("\u0172?\3\2\2\2\u0173\u0171\3\2\2\2\u0174\u0177\5\\/\2")
buf.write("\u0175\u0176\7\65\2\2\u0176\u0178\5z>\2\u0177\u0175\3")
buf.write("\2\2\2\u0177\u0178\3\2\2\2\u0178A\3\2\2\2\u0179\u017b")
buf.write("\7\6\2\2\u017a\u0179\3\2\2\2\u017b\u017e\3\2\2\2\u017c")
buf.write("\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d\u0180\3\2\2\2")
buf.write("\u017e\u017c\3\2\2\2\u017f\u0181\7\23\2\2\u0180\u017f")
buf.write("\3\2\2\2\u0180\u0181\3\2\2\2\u0181\u0182\3\2\2\2\u0182")
buf.write("\u0183\7\3\2\2\u0183\u0184\7 \2\2\u0184\u0185\5D#\2\u0185")
buf.write("\u0186\7#\2\2\u0186C\3\2\2\2\u0187\u0188\5F$\2\u0188E")
buf.write("\3\2\2\2\u0189\u018e\5H%\2\u018a\u018b\7\60\2\2\u018b")
buf.write("\u018d\5H%\2\u018c\u018a\3\2\2\2\u018d\u0190\3\2\2\2\u018e")
buf.write("\u018c\3\2\2\2\u018e\u018f\3\2\2\2\u018fG\3\2\2\2\u0190")
buf.write("\u018e\3\2\2\2\u0191\u0193\5J&\2\u0192\u0194\5R*\2\u0193")
buf.write("\u0192\3\2\2\2\u0193\u0194\3\2\2\2\u0194\u0197\3\2\2\2")
buf.write("\u0195\u0197\3\2\2\2\u0196\u0191\3\2\2\2\u0196\u0195\3")
buf.write("\2\2\2\u0197I\3\2\2\2\u0198\u019a\5L\'\2\u0199\u0198\3")
buf.write("\2\2\2\u019a\u019b\3\2\2\2\u019b\u0199\3\2\2\2\u019b\u019c")
buf.write("\3\2\2\2\u019cK\3\2\2\2\u019d\u019f\5N(\2\u019e\u01a0")
buf.write("\5b\62\2\u019f\u019e\3\2\2\2\u019f\u01a0\3\2\2\2\u01a0")
buf.write("\u01ae\3\2\2\2\u01a1\u01a3\5d\63\2\u01a2\u01a4\5b\62\2")
buf.write("\u01a3\u01a2\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01ae\3")
buf.write("\2\2\2\u01a5\u01a7\5P)\2\u01a6\u01a8\5b\62\2\u01a7\u01a6")
buf.write("\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01ae\3\2\2\2\u01a9")
buf.write("\u01ab\5\34\17\2\u01aa\u01ac\7,\2\2\u01ab\u01aa\3\2\2")
buf.write("\2\u01ab\u01ac\3\2\2\2\u01ac\u01ae\3\2\2\2\u01ad\u019d")
buf.write("\3\2\2\2\u01ad\u01a1\3\2\2\2\u01ad\u01a5\3\2\2\2\u01ad")
buf.write("\u01a9\3\2\2\2\u01aeM\3\2\2\2\u01af\u01b0\5z>\2\u01b0")
buf.write("\u01b3\t\3\2\2\u01b1\u01b4\5d\63\2\u01b2\u01b4\5P)\2\u01b3")
buf.write("\u01b1\3\2\2\2\u01b3\u01b2\3\2\2\2\u01b4O\3\2\2\2\u01b5")
buf.write("\u01b6\7$\2\2\u01b6\u01b7\5F$\2\u01b7\u01b8\7%\2\2\u01b8")
buf.write("Q\3\2\2\2\u01b9\u01ba\7(\2\2\u01ba\u01bf\5T+\2\u01bb\u01bc")
buf.write("\7\"\2\2\u01bc\u01be\5T+\2\u01bd\u01bb\3\2\2\2\u01be\u01c1")
buf.write("\3\2\2\2\u01bf\u01bd\3\2\2\2\u01bf\u01c0\3\2\2\2\u01c0")
buf.write("S\3\2\2\2\u01c1\u01bf\3\2\2\2\u01c2\u01c3\5V,\2\u01c3")
buf.write("\u01c4\7$\2\2\u01c4\u01c5\5X-\2\u01c5\u01c6\7%\2\2\u01c6")
buf.write("\u01c9\3\2\2\2\u01c7\u01c9\5V,\2\u01c8\u01c2\3\2\2\2\u01c8")
buf.write("\u01c7\3\2\2\2\u01c9U\3\2\2\2\u01ca\u01cd\5z>\2\u01cb")
buf.write("\u01cd\7\37\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cb\3\2\2")
buf.write("\2\u01cdW\3\2\2\2\u01ce\u01d1\5z>\2\u01cf\u01d1\7\n\2")
buf.write("\2\u01d0\u01ce\3\2\2\2\u01d0\u01cf\3\2\2\2\u01d1Y\3\2")
buf.write("\2\2\u01d2\u01d7\5\\/\2\u01d3\u01d4\7\60\2\2\u01d4\u01d6")
buf.write("\5\\/\2\u01d5\u01d3\3\2\2\2\u01d6\u01d9\3\2\2\2\u01d7")
buf.write("\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8[\3\2\2\2\u01d9")
buf.write("\u01d7\3\2\2\2\u01da\u01dc\5v<\2\u01db\u01da\3\2\2\2\u01db")
buf.write("\u01dc\3\2\2\2\u01dc\u01de\3\2\2\2\u01dd\u01df\5^\60\2")
buf.write("\u01de\u01dd\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01de\3")
buf.write("\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e4\3\2\2\2\u01e2\u01e4")
buf.write("\3\2\2\2\u01e3\u01db\3\2\2\2\u01e3\u01e2\3\2\2\2\u01e4")
buf.write("]\3\2\2\2\u01e5\u01e7\5`\61\2\u01e6\u01e8\5b\62\2\u01e7")
buf.write("\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01f7\3\2\2\2")
buf.write("\u01e9\u01eb\5f\64\2\u01ea\u01ec\5b\62\2\u01eb\u01ea\3")
buf.write("\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01f7\3\2\2\2\u01ed\u01ef")
buf.write("\5n8\2\u01ee\u01f0\5b\62\2\u01ef\u01ee\3\2\2\2\u01ef\u01f0")
buf.write("\3\2\2\2\u01f0\u01f7\3\2\2\2\u01f1\u01f3\5\34\17\2\u01f2")
buf.write("\u01f4\7,\2\2\u01f3\u01f2\3\2\2\2\u01f3\u01f4\3\2\2\2")
buf.write("\u01f4\u01f7\3\2\2\2\u01f5\u01f7\7\6\2\2\u01f6\u01e5\3")
buf.write("\2\2\2\u01f6\u01e9\3\2\2\2\u01f6\u01ed\3\2\2\2\u01f6\u01f1")
buf.write("\3\2\2\2\u01f6\u01f5\3\2\2\2\u01f7_\3\2\2\2\u01f8\u01f9")
buf.write("\5z>\2\u01f9\u01fc\t\3\2\2\u01fa\u01fd\5f\64\2\u01fb\u01fd")
buf.write("\5n8\2\u01fc\u01fa\3\2\2\2\u01fc\u01fb\3\2\2\2\u01fda")
buf.write("\3\2\2\2\u01fe\u0200\7,\2\2\u01ff\u0201\7,\2\2\u0200\u01ff")
buf.write("\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u020b\3\2\2\2\u0202")
buf.write("\u0204\7-\2\2\u0203\u0205\7,\2\2\u0204\u0203\3\2\2\2\u0204")
buf.write("\u0205\3\2\2\2\u0205\u020b\3\2\2\2\u0206\u0208\7/\2\2")
buf.write("\u0207\u0209\7,\2\2\u0208\u0207\3\2\2\2\u0208\u0209\3")
buf.write("\2\2\2\u0209\u020b\3\2\2\2\u020a\u01fe\3\2\2\2\u020a\u0202")
buf.write("\3\2\2\2\u020a\u0206\3\2\2\2\u020bc\3\2\2\2\u020c\u0216")
buf.write("\5r:\2\u020d\u0216\5t;\2\u020e\u0216\5h\65\2\u020f\u0216")
buf.write("\7\5\2\2\u0210\u0212\7\63\2\2\u0211\u0213\5v<\2\u0212")
buf.write("\u0211\3\2\2\2\u0212\u0213\3\2\2\2\u0213\u0216\3\2\2\2")
buf.write("\u0214\u0216\7\6\2\2\u0215\u020c\3\2\2\2\u0215\u020d\3")
buf.write("\2\2\2\u0215\u020e\3\2\2\2\u0215\u020f\3\2\2\2\u0215\u0210")
buf.write("\3\2\2\2\u0215\u0214\3\2\2\2\u0216e\3\2\2\2\u0217\u021f")
buf.write("\5t;\2\u0218\u021f\5p9\2\u0219\u021f\5h\65\2\u021a\u021c")
buf.write("\7\63\2\2\u021b\u021d\5v<\2\u021c\u021b\3\2\2\2\u021c")
buf.write("\u021d\3\2\2\2\u021d\u021f\3\2\2\2\u021e\u0217\3\2\2\2")
buf.write("\u021e\u0218\3\2\2\2\u021e\u0219\3\2\2\2\u021e\u021a\3")
buf.write("\2\2\2\u021fg\3\2\2\2\u0220\u0221\7\66\2\2\u0221\u0225")
buf.write("\5l\67\2\u0222\u0223\7\66\2\2\u0223\u0225\5j\66\2\u0224")
buf.write("\u0220\3\2\2\2\u0224\u0222\3\2\2\2\u0225i\3\2\2\2\u0226")
buf.write("\u0227\7$\2\2\u0227\u022c\5l\67\2\u0228\u0229\7\60\2\2")
buf.write("\u0229\u022b\5l\67\2\u022a\u0228\3\2\2\2\u022b\u022e\3")
buf.write("\2\2\2\u022c\u022a\3\2\2\2\u022c\u022d\3\2\2\2\u022d\u022f")
buf.write("\3\2\2\2\u022e\u022c\3\2\2\2\u022f\u0230\7%\2\2\u0230")
buf.write("k\3\2\2\2\u0231\u0233\7\3\2\2\u0232\u0234\5v<\2\u0233")
buf.write("\u0232\3\2\2\2\u0233\u0234\3\2\2\2\u0234\u023c\3\2\2\2")
buf.write("\u0235\u0237\7\13\2\2\u0236\u0238\5v<\2\u0237\u0236\3")
buf.write("\2\2\2\u0237\u0238\3\2\2\2\u0238\u023c\3\2\2\2\u0239\u023c")
buf.write("\5r:\2\u023a\u023c\7\5\2\2\u023b\u0231\3\2\2\2\u023b\u0235")
buf.write("\3\2\2\2\u023b\u0239\3\2\2\2\u023b\u023a\3\2\2\2\u023c")
buf.write("m\3\2\2\2\u023d\u0248\7$\2\2\u023e\u0240\5\b\5\2\u023f")
buf.write("\u023e\3\2\2\2\u023f\u0240\3\2\2\2\u0240\u0244\3\2\2\2")
buf.write("\u0241\u0243\5\66\34\2\u0242\u0241\3\2\2\2\u0243\u0246")
buf.write("\3\2\2\2\u0244\u0242\3\2\2\2\u0244\u0245\3\2\2\2\u0245")
buf.write("\u0247\3\2\2\2\u0246\u0244\3\2\2\2\u0247\u0249\7 \2\2")
buf.write("\u0248\u023f\3\2\2\2\u0248\u0249\3\2\2\2\u0249\u024a\3")
buf.write("\2\2\2\u024a\u024b\5Z.\2\u024b\u024c\7%\2\2\u024co\3\2")
buf.write("\2\2\u024d\u024f\7\4\2\2\u024e\u0250\5\36\20\2\u024f\u024e")
buf.write("\3\2\2\2\u024f\u0250\3\2\2\2\u0250\u0252\3\2\2\2\u0251")
buf.write("\u0253\5v<\2\u0252\u0251\3\2\2\2\u0252\u0253\3\2\2\2\u0253")
buf.write("q\3\2\2\2\u0254\u0255\7\13\2\2\u0255\u0256\7\62\2\2\u0256")
buf.write("\u0257\7\13\2\2\u0257s\3\2\2\2\u0258\u025a\7\3\2\2\u0259")
buf.write("\u025b\5v<\2\u025a\u0259\3\2\2\2\u025a\u025b\3\2\2\2\u025b")
buf.write("\u0261\3\2\2\2\u025c\u025e\7\13\2\2\u025d\u025f\5v<\2")
buf.write("\u025e\u025d\3\2\2\2\u025e\u025f\3\2\2\2\u025f\u0261\3")
buf.write("\2\2\2\u0260\u0258\3\2\2\2\u0260\u025c\3\2\2\2\u0261u")
buf.write("\3\2\2\2\u0262\u0263\7)\2\2\u0263\u0268\5x=\2\u0264\u0265")
buf.write("\7\"\2\2\u0265\u0267\5x=\2\u0266\u0264\3\2\2\2\u0267\u026a")
buf.write("\3\2\2\2\u0268\u0266\3\2\2\2\u0268\u0269\3\2\2\2\u0269")
buf.write("\u026b\3\2\2\2\u026a\u0268\3\2\2\2\u026b\u026c\7*\2\2")
buf.write("\u026cw\3\2\2\2\u026d\u0275\5z>\2\u026e\u026f\5z>\2\u026f")
buf.write("\u0272\7+\2\2\u0270\u0273\5z>\2\u0271\u0273\7\13\2\2\u0272")
buf.write("\u0270\3\2\2\2\u0272\u0271\3\2\2\2\u0273\u0275\3\2\2\2")
buf.write("\u0274\u026d\3\2\2\2\u0274\u026e\3\2\2\2\u0275y\3\2\2")
buf.write("\2\u0276\u0279\7\4\2\2\u0277\u0279\7\3\2\2\u0278\u0276")
buf.write("\3\2\2\2\u0278\u0277\3\2\2\2\u0279{\3\2\2\2X\177\u0088")
buf.write("\u008f\u0099\u00a0\u00a8\u00b6\u00bc\u00c4\u00cd\u00d3")
buf.write("\u00dc\u00e0\u00e6\u00ee\u00f4\u00fd\u0108\u010e\u0114")
buf.write("\u0119\u011e\u0122\u0126\u0129\u012c\u012f\u0134\u013f")
buf.write("\u0143\u014e\u0159\u0166\u0171\u0177\u017c\u0180\u018e")
buf.write("\u0193\u0196\u019b\u019f\u01a3\u01a7\u01ab\u01ad\u01b3")
buf.write("\u01bf\u01c8\u01cc\u01d0\u01d7\u01db\u01e0\u01e3\u01e7")
buf.write("\u01eb\u01ef\u01f3\u01f6\u01fc\u0200\u0204\u0208\u020a")
buf.write("\u0212\u0215\u021c\u021e\u0224\u022c\u0233\u0237\u023b")
buf.write("\u023f\u0244\u0248\u024f\u0252\u025a\u025e\u0260\u0268")
buf.write("\u0272\u0274\u0278")
return buf.getvalue()
class ANTLRv4Parser ( Parser ):
grammarFileName = "ANTLRv4Parser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "", "", "", "",
"", "", "", "",
"", "", "", "",
"", "", "", "",
"'import'", "'fragment'", "'lexer'", "'parser'", "'grammar'",
"'protected'", "'public'", "'private'", "'returns'",
"'locals'", "'throws'", "'catch'", "'finally'", "'mode'" ]
symbolicNames = [ "", "TOKEN_REF", "RULE_REF", "LEXER_CHAR_SET",
"DOC_COMMENT", "HEADER", "BLOCK_COMMENT", "LINE_COMMENT",
"INT", "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL",
"BEGIN_ARGUMENT", "BEGIN_ACTION", "OPTIONS", "TOKENS",
"CHANNELS", "IMPORT", "FRAGMENT", "LEXER", "PARSER",
"GRAMMAR", "PROTECTED", "PUBLIC", "PRIVATE", "RETURNS",
"LOCALS", "THROWS", "CATCH", "FINALLY", "MODE", "COLON",
"COLONCOLON", "COMMA", "SEMI", "LPAREN", "RPAREN",
"LBRACE", "RBRACE", "RARROW", "LT", "GT", "ASSIGN",
"QUESTION", "STAR", "PLUS_ASSIGN", "PLUS", "OR", "DOLLAR",
"RANGE", "DOT", "AT", "POUND", "NOT", "ID", "WS",
"ERRCHAR", "END_ARGUMENT", "UNTERMINATED_ARGUMENT",
"ARGUMENT_CONTENT", "END_ACTION", "UNTERMINATED_ACTION",
"ACTION_CONTENT", "UNTERMINATED_CHAR_SET" ]
RULE_grammarSpec = 0
RULE_grammarType = 1
RULE_prequelConstruct = 2
RULE_optionsSpec = 3
RULE_option = 4
RULE_optionValue = 5
RULE_delegateGrammars = 6
RULE_delegateGrammar = 7
RULE_tokensSpec = 8
RULE_channelsSpec = 9
RULE_idList = 10
RULE_action = 11
RULE_actionScopeName = 12
RULE_actionBlock = 13
RULE_argActionBlock = 14
RULE_modeSpec = 15
RULE_rules = 16
RULE_ruleSpec = 17
RULE_parserRuleSpec = 18
RULE_exceptionGroup = 19
RULE_exceptionHandler = 20
RULE_finallyClause = 21
RULE_rulePrequel = 22
RULE_ruleReturns = 23
RULE_throwsSpec = 24
RULE_localsSpec = 25
RULE_ruleAction = 26
RULE_ruleModifiers = 27
RULE_ruleModifier = 28
RULE_ruleBlock = 29
RULE_ruleAltList = 30
RULE_labeledAlt = 31
RULE_lexerRuleSpec = 32
RULE_lexerRuleBlock = 33
RULE_lexerAltList = 34
RULE_lexerAlt = 35
RULE_lexerElements = 36
RULE_lexerElement = 37
RULE_labeledLexerElement = 38
RULE_lexerBlock = 39
RULE_lexerCommands = 40
RULE_lexerCommand = 41
RULE_lexerCommandName = 42
RULE_lexerCommandExpr = 43
RULE_altList = 44
RULE_alternative = 45
RULE_element = 46
RULE_labeledElement = 47
RULE_ebnfSuffix = 48
RULE_lexerAtom = 49
RULE_atom = 50
RULE_notSet = 51
RULE_blockSet = 52
RULE_setElement = 53
RULE_block = 54
RULE_ruleref = 55
RULE_characterRange = 56
RULE_terminal = 57
RULE_elementOptions = 58
RULE_elementOption = 59
RULE_identifier = 60
ruleNames = [ "grammarSpec", "grammarType", "prequelConstruct", "optionsSpec",
"option", "optionValue", "delegateGrammars", "delegateGrammar",
"tokensSpec", "channelsSpec", "idList", "action", "actionScopeName",
"actionBlock", "argActionBlock", "modeSpec", "rules",
"ruleSpec", "parserRuleSpec", "exceptionGroup", "exceptionHandler",
"finallyClause", "rulePrequel", "ruleReturns", "throwsSpec",
"localsSpec", "ruleAction", "ruleModifiers", "ruleModifier",
"ruleBlock", "ruleAltList", "labeledAlt", "lexerRuleSpec",
"lexerRuleBlock", "lexerAltList", "lexerAlt", "lexerElements",
"lexerElement", "labeledLexerElement", "lexerBlock",
"lexerCommands", "lexerCommand", "lexerCommandName",
"lexerCommandExpr", "altList", "alternative", "element",
"labeledElement", "ebnfSuffix", "lexerAtom", "atom",
"notSet", "blockSet", "setElement", "block", "ruleref",
"characterRange", "terminal", "elementOptions", "elementOption",
"identifier" ]
EOF = Token.EOF
TOKEN_REF=1
RULE_REF=2
LEXER_CHAR_SET=3
DOC_COMMENT=4
HEADER=5
BLOCK_COMMENT=6
LINE_COMMENT=7
INT=8
STRING_LITERAL=9
UNTERMINATED_STRING_LITERAL=10
BEGIN_ARGUMENT=11
BEGIN_ACTION=12
OPTIONS=13
TOKENS=14
CHANNELS=15
IMPORT=16
FRAGMENT=17
LEXER=18
PARSER=19
GRAMMAR=20
PROTECTED=21
PUBLIC=22
PRIVATE=23
RETURNS=24
LOCALS=25
THROWS=26
CATCH=27
FINALLY=28
MODE=29
COLON=30
COLONCOLON=31
COMMA=32
SEMI=33
LPAREN=34
RPAREN=35
LBRACE=36
RBRACE=37
RARROW=38
LT=39
GT=40
ASSIGN=41
QUESTION=42
STAR=43
PLUS_ASSIGN=44
PLUS=45
OR=46
DOLLAR=47
RANGE=48
DOT=49
AT=50
POUND=51
NOT=52
ID=53
WS=54
ERRCHAR=55
END_ARGUMENT=56
UNTERMINATED_ARGUMENT=57
ARGUMENT_CONTENT=58
END_ACTION=59
UNTERMINATED_ACTION=60
ACTION_CONTENT=61
UNTERMINATED_CHAR_SET=62
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class GrammarSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.gtype = None # GrammarTypeContext
self.gname = None # IdentifierContext
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def rules(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulesContext,0)
def EOF(self):
return self.getToken(ANTLRv4Parser.EOF, 0)
def grammarType(self):
return self.getTypedRuleContext(ANTLRv4Parser.GrammarTypeContext,0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def prequelConstruct(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.PrequelConstructContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.PrequelConstructContext,i)
def modeSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ModeSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ModeSpecContext,i)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_grammarSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrammarSpec" ):
listener.enterGrammarSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrammarSpec" ):
listener.exitGrammarSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarSpec" ):
return visitor.visitGrammarSpec(self)
else:
return visitor.visitChildren(self)
def grammarSpec(self):
localctx = ANTLRv4Parser.GrammarSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_grammarSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 122
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 127
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 128
localctx.gtype = self.grammarType()
self.state = 129
localctx.gname = self.identifier()
self.state = 130
self.match(ANTLRv4Parser.SEMI)
self.state = 134
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.TOKENS) | (1 << ANTLRv4Parser.CHANNELS) | (1 << ANTLRv4Parser.IMPORT) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 131
self.prequelConstruct()
self.state = 136
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 137
self.rules()
self.state = 141
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.MODE:
self.state = 138
self.modeSpec()
self.state = 143
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 144
self.match(ANTLRv4Parser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrammarTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER(self):
return self.getToken(ANTLRv4Parser.LEXER, 0)
def GRAMMAR(self):
return self.getToken(ANTLRv4Parser.GRAMMAR, 0)
def PARSER(self):
return self.getToken(ANTLRv4Parser.PARSER, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_grammarType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrammarType" ):
listener.enterGrammarType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrammarType" ):
listener.exitGrammarType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarType" ):
return visitor.visitGrammarType(self)
else:
return visitor.visitChildren(self)
def grammarType(self):
localctx = ANTLRv4Parser.GrammarTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_grammarType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 151
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.LEXER]:
self.state = 146
self.match(ANTLRv4Parser.LEXER)
self.state = 147
self.match(ANTLRv4Parser.GRAMMAR)
pass
elif token in [ANTLRv4Parser.PARSER]:
self.state = 148
self.match(ANTLRv4Parser.PARSER)
self.state = 149
self.match(ANTLRv4Parser.GRAMMAR)
pass
elif token in [ANTLRv4Parser.GRAMMAR]:
self.state = 150
self.match(ANTLRv4Parser.GRAMMAR)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrequelConstructContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def delegateGrammars(self):
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarsContext,0)
def tokensSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.TokensSpecContext,0)
def channelsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ChannelsSpecContext,0)
def action(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_prequelConstruct
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrequelConstruct" ):
listener.enterPrequelConstruct(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrequelConstruct" ):
listener.exitPrequelConstruct(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrequelConstruct" ):
return visitor.visitPrequelConstruct(self)
else:
return visitor.visitChildren(self)
def prequelConstruct(self):
localctx = ANTLRv4Parser.PrequelConstructContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_prequelConstruct)
try:
self.state = 158
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 153
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.IMPORT]:
self.enterOuterAlt(localctx, 2)
self.state = 154
self.delegateGrammars()
pass
elif token in [ANTLRv4Parser.TOKENS]:
self.enterOuterAlt(localctx, 3)
self.state = 155
self.tokensSpec()
pass
elif token in [ANTLRv4Parser.CHANNELS]:
self.enterOuterAlt(localctx, 4)
self.state = 156
self.channelsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 5)
self.state = 157
self.action()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPTIONS(self):
return self.getToken(ANTLRv4Parser.OPTIONS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def option(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.OptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.OptionContext,i)
def SEMI(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.SEMI)
else:
return self.getToken(ANTLRv4Parser.SEMI, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptionsSpec" ):
listener.enterOptionsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptionsSpec" ):
listener.exitOptionsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptionsSpec" ):
return visitor.visitOptionsSpec(self)
else:
return visitor.visitChildren(self)
def optionsSpec(self):
localctx = ANTLRv4Parser.OptionsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_optionsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.match(ANTLRv4Parser.OPTIONS)
self.state = 166
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 161
self.option()
self.state = 162
self.match(ANTLRv4Parser.SEMI)
self.state = 168
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 169
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
self.value = None # OptionValueContext
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def optionValue(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionValueContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_option
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOption" ):
listener.enterOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOption" ):
listener.exitOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOption" ):
return visitor.visitOption(self)
else:
return visitor.visitChildren(self)
def option(self):
localctx = ANTLRv4Parser.OptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_option)
try:
self.enterOuterAlt(localctx, 1)
self.state = 171
localctx.name = self.identifier()
self.state = 172
self.match(ANTLRv4Parser.ASSIGN)
self.state = 173
localctx.value = self.optionValue()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionValueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionValue
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class StringOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringOption" ):
listener.enterStringOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringOption" ):
listener.exitStringOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringOption" ):
return visitor.visitStringOption(self)
else:
return visitor.visitChildren(self)
class IntOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntOption" ):
listener.enterIntOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntOption" ):
listener.exitIntOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntOption" ):
return visitor.visitIntOption(self)
else:
return visitor.visitChildren(self)
class ActionOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # ActionBlockContext
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionOption" ):
listener.enterActionOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionOption" ):
listener.exitActionOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionOption" ):
return visitor.visitActionOption(self)
else:
return visitor.visitChildren(self)
class PathOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self._identifier = None # IdentifierContext
self.value = list() # of IdentifierContexts
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def DOT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOT)
else:
return self.getToken(ANTLRv4Parser.DOT, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPathOption" ):
listener.enterPathOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPathOption" ):
listener.exitPathOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPathOption" ):
return visitor.visitPathOption(self)
else:
return visitor.visitChildren(self)
def optionValue(self):
localctx = ANTLRv4Parser.OptionValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_optionValue)
self._la = 0 # Token type
try:
self.state = 186
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.PathOptionContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 175
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOT:
self.state = 176
self.match(ANTLRv4Parser.DOT)
self.state = 177
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 182
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.StringOptionContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 183
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
pass
elif token in [ANTLRv4Parser.BEGIN_ACTION]:
localctx = ANTLRv4Parser.ActionOptionContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 184
localctx.value = self.actionBlock()
pass
elif token in [ANTLRv4Parser.INT]:
localctx = ANTLRv4Parser.IntOptionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 185
localctx.value = self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IMPORT(self):
return self.getToken(ANTLRv4Parser.IMPORT, 0)
def delegateGrammar(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.DelegateGrammarContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarContext,i)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammars
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammars" ):
listener.enterDelegateGrammars(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammars" ):
listener.exitDelegateGrammars(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammars" ):
return visitor.visitDelegateGrammars(self)
else:
return visitor.visitChildren(self)
def delegateGrammars(self):
localctx = ANTLRv4Parser.DelegateGrammarsContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_delegateGrammars)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 188
self.match(ANTLRv4Parser.IMPORT)
self.state = 189
self.delegateGrammar()
self.state = 194
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 190
self.match(ANTLRv4Parser.COMMA)
self.state = 191
self.delegateGrammar()
self.state = 196
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 197
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # IdentifierContext
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammar" ):
listener.enterDelegateGrammar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammar" ):
listener.exitDelegateGrammar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammar" ):
return visitor.visitDelegateGrammar(self)
else:
return visitor.visitChildren(self)
def delegateGrammar(self):
localctx = ANTLRv4Parser.DelegateGrammarContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_delegateGrammar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 199
localctx.value = self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TokensSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.defs = None # IdListContext
def TOKENS(self):
return self.getToken(ANTLRv4Parser.TOKENS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_tokensSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokensSpec" ):
listener.enterTokensSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokensSpec" ):
listener.exitTokensSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokensSpec" ):
return visitor.visitTokensSpec(self)
else:
return visitor.visitChildren(self)
def tokensSpec(self):
localctx = ANTLRv4Parser.TokensSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_tokensSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 201
self.match(ANTLRv4Parser.TOKENS)
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 202
localctx.defs = self.idList()
self.state = 205
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ChannelsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CHANNELS(self):
return self.getToken(ANTLRv4Parser.CHANNELS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_channelsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterChannelsSpec" ):
listener.enterChannelsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitChannelsSpec" ):
listener.exitChannelsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitChannelsSpec" ):
return visitor.visitChannelsSpec(self)
else:
return visitor.visitChildren(self)
def channelsSpec(self):
localctx = ANTLRv4Parser.ChannelsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_channelsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 207
self.match(ANTLRv4Parser.CHANNELS)
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 208
self.idList()
self.state = 211
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._identifier = None # IdentifierContext
self.defs = list() # of IdentifierContexts
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_idList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdList" ):
listener.enterIdList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdList" ):
listener.exitIdList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdList" ):
return visitor.visitIdList(self)
else:
return visitor.visitChildren(self)
def idList(self):
localctx = ANTLRv4Parser.IdListContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_idList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
localctx._identifier = self.identifier()
localctx.defs.append(localctx._identifier)
self.state = 218
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 214
self.match(ANTLRv4Parser.COMMA)
self.state = 215
localctx._identifier = self.identifier()
localctx.defs.append(localctx._identifier)
self.state = 220
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
self.state = 222
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.COMMA:
self.state = 221
self.match(ANTLRv4Parser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AT(self):
return self.getToken(ANTLRv4Parser.AT, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def actionScopeName(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionScopeNameContext,0)
def COLONCOLON(self):
return self.getToken(ANTLRv4Parser.COLONCOLON, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_action
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAction" ):
listener.enterAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAction" ):
listener.exitAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAction" ):
return visitor.visitAction(self)
else:
return visitor.visitChildren(self)
def action(self):
localctx = ANTLRv4Parser.ActionContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_action)
try:
self.enterOuterAlt(localctx, 1)
self.state = 224
self.match(ANTLRv4Parser.AT)
self.state = 228
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,13,self._ctx)
if la_ == 1:
self.state = 225
self.actionScopeName()
self.state = 226
self.match(ANTLRv4Parser.COLONCOLON)
self.state = 230
self.identifier()
self.state = 231
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionScopeNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def LEXER(self):
return self.getToken(ANTLRv4Parser.LEXER, 0)
def PARSER(self):
return self.getToken(ANTLRv4Parser.PARSER, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_actionScopeName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionScopeName" ):
listener.enterActionScopeName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionScopeName" ):
listener.exitActionScopeName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionScopeName" ):
return visitor.visitActionScopeName(self)
else:
return visitor.visitChildren(self)
def actionScopeName(self):
localctx = ANTLRv4Parser.ActionScopeNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_actionScopeName)
try:
self.state = 236
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 233
self.identifier()
pass
elif token in [ANTLRv4Parser.LEXER]:
self.enterOuterAlt(localctx, 2)
self.state = 234
self.match(ANTLRv4Parser.LEXER)
pass
elif token in [ANTLRv4Parser.PARSER]:
self.enterOuterAlt(localctx, 3)
self.state = 235
self.match(ANTLRv4Parser.PARSER)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BEGIN_ACTION(self):
return self.getToken(ANTLRv4Parser.BEGIN_ACTION, 0)
def END_ACTION(self):
return self.getToken(ANTLRv4Parser.END_ACTION, 0)
def ACTION_CONTENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.ACTION_CONTENT)
else:
return self.getToken(ANTLRv4Parser.ACTION_CONTENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_actionBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionBlock" ):
listener.enterActionBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionBlock" ):
listener.exitActionBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionBlock" ):
return visitor.visitActionBlock(self)
else:
return visitor.visitChildren(self)
def actionBlock(self):
localctx = ANTLRv4Parser.ActionBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_actionBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 238
self.match(ANTLRv4Parser.BEGIN_ACTION)
self.state = 242
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.ACTION_CONTENT:
self.state = 239
self.match(ANTLRv4Parser.ACTION_CONTENT)
self.state = 244
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 245
self.match(ANTLRv4Parser.END_ACTION)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgActionBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BEGIN_ARGUMENT(self):
return self.getToken(ANTLRv4Parser.BEGIN_ARGUMENT, 0)
def END_ARGUMENT(self):
return self.getToken(ANTLRv4Parser.END_ARGUMENT, 0)
def ARGUMENT_CONTENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.ARGUMENT_CONTENT)
else:
return self.getToken(ANTLRv4Parser.ARGUMENT_CONTENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_argActionBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgActionBlock" ):
listener.enterArgActionBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgActionBlock" ):
listener.exitArgActionBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgActionBlock" ):
return visitor.visitArgActionBlock(self)
else:
return visitor.visitChildren(self)
def argActionBlock(self):
localctx = ANTLRv4Parser.ArgActionBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_argActionBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 247
self.match(ANTLRv4Parser.BEGIN_ARGUMENT)
self.state = 251
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.ARGUMENT_CONTENT:
self.state = 248
self.match(ANTLRv4Parser.ARGUMENT_CONTENT)
self.state = 253
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 254
self.match(ANTLRv4Parser.END_ARGUMENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ModeSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def MODE(self):
return self.getToken(ANTLRv4Parser.MODE, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def lexerRuleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerRuleSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_modeSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterModeSpec" ):
listener.enterModeSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitModeSpec" ):
listener.exitModeSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitModeSpec" ):
return visitor.visitModeSpec(self)
else:
return visitor.visitChildren(self)
def modeSpec(self):
localctx = ANTLRv4Parser.ModeSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_modeSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 256
self.match(ANTLRv4Parser.MODE)
self.state = 257
self.identifier()
self.state = 258
self.match(ANTLRv4Parser.SEMI)
self.state = 262
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.FRAGMENT))) != 0):
self.state = 259
self.lexerRuleSpec()
self.state = 264
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleSpecContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_rules
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRules" ):
listener.enterRules(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRules" ):
listener.exitRules(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRules" ):
return visitor.visitRules(self)
else:
return visitor.visitChildren(self)
def rules(self):
localctx = ANTLRv4Parser.RulesContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rules)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 268
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.HEADER) | (1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0):
self.state = 265
self.ruleSpec()
self.state = 270
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._HEADER = None # Token
self.headers = list() # of Tokens
def parserRuleSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ParserRuleSpecContext,0)
def lexerRuleSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,0)
def HEADER(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.HEADER)
else:
return self.getToken(ANTLRv4Parser.HEADER, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleSpec" ):
listener.enterRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleSpec" ):
listener.exitRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleSpec" ):
return visitor.visitRuleSpec(self)
else:
return visitor.visitChildren(self)
def ruleSpec(self):
localctx = ANTLRv4Parser.RuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_ruleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 274
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.HEADER:
self.state = 271
localctx._HEADER = self.match(ANTLRv4Parser.HEADER)
localctx.headers.append(localctx._HEADER)
self.state = 276
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 279
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.state = 277
self.parserRuleSpec()
pass
elif la_ == 2:
self.state = 278
self.lexerRuleSpec()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParserRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.name = None # Token
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def ruleBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleBlockContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def exceptionGroup(self):
return self.getTypedRuleContext(ANTLRv4Parser.ExceptionGroupContext,0)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def ruleModifiers(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleModifiersContext,0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def ruleReturns(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleReturnsContext,0)
def throwsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ThrowsSpecContext,0)
def localsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.LocalsSpecContext,0)
def rulePrequel(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RulePrequelContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RulePrequelContext,i)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_parserRuleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserRuleSpec" ):
listener.enterParserRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserRuleSpec" ):
listener.exitParserRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserRuleSpec" ):
return visitor.visitParserRuleSpec(self)
else:
return visitor.visitChildren(self)
def parserRuleSpec(self):
localctx = ANTLRv4Parser.ParserRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_parserRuleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 284
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 281
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 286
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 288
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0):
self.state = 287
self.ruleModifiers()
self.state = 290
localctx.name = self.match(ANTLRv4Parser.RULE_REF)
self.state = 292
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 291
self.argActionBlock()
self.state = 295
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.RETURNS:
self.state = 294
self.ruleReturns()
self.state = 298
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.THROWS:
self.state = 297
self.throwsSpec()
self.state = 301
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LOCALS:
self.state = 300
self.localsSpec()
self.state = 306
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OPTIONS or _la==ANTLRv4Parser.AT:
self.state = 303
self.rulePrequel()
self.state = 308
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 309
self.match(ANTLRv4Parser.COLON)
self.state = 310
self.ruleBlock()
self.state = 311
self.match(ANTLRv4Parser.SEMI)
self.state = 312
self.exceptionGroup()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExceptionGroupContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def exceptionHandler(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ExceptionHandlerContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ExceptionHandlerContext,i)
def finallyClause(self):
return self.getTypedRuleContext(ANTLRv4Parser.FinallyClauseContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_exceptionGroup
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExceptionGroup" ):
listener.enterExceptionGroup(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExceptionGroup" ):
listener.exitExceptionGroup(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExceptionGroup" ):
return visitor.visitExceptionGroup(self)
else:
return visitor.visitChildren(self)
def exceptionGroup(self):
localctx = ANTLRv4Parser.ExceptionGroupContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_exceptionGroup)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 317
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.CATCH:
self.state = 314
self.exceptionHandler()
self.state = 319
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 321
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.FINALLY:
self.state = 320
self.finallyClause()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExceptionHandlerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CATCH(self):
return self.getToken(ANTLRv4Parser.CATCH, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_exceptionHandler
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExceptionHandler" ):
listener.enterExceptionHandler(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExceptionHandler" ):
listener.exitExceptionHandler(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExceptionHandler" ):
return visitor.visitExceptionHandler(self)
else:
return visitor.visitChildren(self)
def exceptionHandler(self):
localctx = ANTLRv4Parser.ExceptionHandlerContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_exceptionHandler)
try:
self.enterOuterAlt(localctx, 1)
self.state = 323
self.match(ANTLRv4Parser.CATCH)
self.state = 324
self.argActionBlock()
self.state = 325
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FinallyClauseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FINALLY(self):
return self.getToken(ANTLRv4Parser.FINALLY, 0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_finallyClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFinallyClause" ):
listener.enterFinallyClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFinallyClause" ):
listener.exitFinallyClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFinallyClause" ):
return visitor.visitFinallyClause(self)
else:
return visitor.visitChildren(self)
def finallyClause(self):
localctx = ANTLRv4Parser.FinallyClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_finallyClause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 327
self.match(ANTLRv4Parser.FINALLY)
self.state = 328
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulePrequelContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_rulePrequel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRulePrequel" ):
listener.enterRulePrequel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRulePrequel" ):
listener.exitRulePrequel(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRulePrequel" ):
return visitor.visitRulePrequel(self)
else:
return visitor.visitChildren(self)
def rulePrequel(self):
localctx = ANTLRv4Parser.RulePrequelContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_rulePrequel)
try:
self.state = 332
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 330
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 2)
self.state = 331
self.ruleAction()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleReturnsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RETURNS(self):
return self.getToken(ANTLRv4Parser.RETURNS, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleReturns
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleReturns" ):
listener.enterRuleReturns(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleReturns" ):
listener.exitRuleReturns(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleReturns" ):
return visitor.visitRuleReturns(self)
else:
return visitor.visitChildren(self)
def ruleReturns(self):
localctx = ANTLRv4Parser.RuleReturnsContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_ruleReturns)
try:
self.enterOuterAlt(localctx, 1)
self.state = 334
self.match(ANTLRv4Parser.RETURNS)
self.state = 335
self.argActionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ThrowsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def THROWS(self):
return self.getToken(ANTLRv4Parser.THROWS, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_throwsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterThrowsSpec" ):
listener.enterThrowsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitThrowsSpec" ):
listener.exitThrowsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitThrowsSpec" ):
return visitor.visitThrowsSpec(self)
else:
return visitor.visitChildren(self)
def throwsSpec(self):
localctx = ANTLRv4Parser.ThrowsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_throwsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 337
self.match(ANTLRv4Parser.THROWS)
self.state = 338
self.identifier()
self.state = 343
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 339
self.match(ANTLRv4Parser.COMMA)
self.state = 340
self.identifier()
self.state = 345
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LocalsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LOCALS(self):
return self.getToken(ANTLRv4Parser.LOCALS, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_localsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLocalsSpec" ):
listener.enterLocalsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLocalsSpec" ):
listener.exitLocalsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLocalsSpec" ):
return visitor.visitLocalsSpec(self)
else:
return visitor.visitChildren(self)
def localsSpec(self):
localctx = ANTLRv4Parser.LocalsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_localsSpec)
try:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(ANTLRv4Parser.LOCALS)
self.state = 347
self.argActionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleActionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AT(self):
return self.getToken(ANTLRv4Parser.AT, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleAction
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleAction" ):
listener.enterRuleAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleAction" ):
listener.exitRuleAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleAction" ):
return visitor.visitRuleAction(self)
else:
return visitor.visitChildren(self)
def ruleAction(self):
localctx = ANTLRv4Parser.RuleActionContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_ruleAction)
try:
self.enterOuterAlt(localctx, 1)
self.state = 349
self.match(ANTLRv4Parser.AT)
self.state = 350
self.identifier()
self.state = 351
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleModifiersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleModifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleModifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleModifierContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleModifiers
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleModifiers" ):
listener.enterRuleModifiers(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleModifiers" ):
listener.exitRuleModifiers(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleModifiers" ):
return visitor.visitRuleModifiers(self)
else:
return visitor.visitChildren(self)
def ruleModifiers(self):
localctx = ANTLRv4Parser.RuleModifiersContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_ruleModifiers)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 354
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 353
self.ruleModifier()
self.state = 356
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleModifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PUBLIC(self):
return self.getToken(ANTLRv4Parser.PUBLIC, 0)
def PRIVATE(self):
return self.getToken(ANTLRv4Parser.PRIVATE, 0)
def PROTECTED(self):
return self.getToken(ANTLRv4Parser.PROTECTED, 0)
def FRAGMENT(self):
return self.getToken(ANTLRv4Parser.FRAGMENT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleModifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleModifier" ):
listener.enterRuleModifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleModifier" ):
listener.exitRuleModifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleModifier" ):
return visitor.visitRuleModifier(self)
else:
return visitor.visitChildren(self)
def ruleModifier(self):
localctx = ANTLRv4Parser.RuleModifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_ruleModifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 358
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleAltListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleBlock" ):
listener.enterRuleBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleBlock" ):
listener.exitRuleBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleBlock" ):
return visitor.visitRuleBlock(self)
else:
return visitor.visitChildren(self)
def ruleBlock(self):
localctx = ANTLRv4Parser.RuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_ruleBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 360
self.ruleAltList()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._labeledAlt = None # LabeledAltContext
self.alts = list() # of LabeledAltContexts
def labeledAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LabeledAltContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LabeledAltContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleAltList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleAltList" ):
listener.enterRuleAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleAltList" ):
listener.exitRuleAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleAltList" ):
return visitor.visitRuleAltList(self)
else:
return visitor.visitChildren(self)
def ruleAltList(self):
localctx = ANTLRv4Parser.RuleAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_ruleAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 362
localctx._labeledAlt = self.labeledAlt()
localctx.alts.append(localctx._labeledAlt)
self.state = 367
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 363
self.match(ANTLRv4Parser.OR)
self.state = 364
localctx._labeledAlt = self.labeledAlt()
localctx.alts.append(localctx._labeledAlt)
self.state = 369
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def alternative(self):
return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,0)
def POUND(self):
return self.getToken(ANTLRv4Parser.POUND, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledAlt
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledAlt" ):
listener.enterLabeledAlt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledAlt" ):
listener.exitLabeledAlt(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledAlt" ):
return visitor.visitLabeledAlt(self)
else:
return visitor.visitChildren(self)
def labeledAlt(self):
localctx = ANTLRv4Parser.LabeledAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_labeledAlt)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 370
self.alternative()
self.state = 373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.POUND:
self.state = 371
self.match(ANTLRv4Parser.POUND)
self.state = 372
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.frag = None # Token
self.name = None # Token
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def lexerRuleBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleBlockContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def FRAGMENT(self):
return self.getToken(ANTLRv4Parser.FRAGMENT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerRuleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerRuleSpec" ):
listener.enterLexerRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerRuleSpec" ):
listener.exitLexerRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleSpec" ):
return visitor.visitLexerRuleSpec(self)
else:
return visitor.visitChildren(self)
def lexerRuleSpec(self):
localctx = ANTLRv4Parser.LexerRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_lexerRuleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 378
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 375
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 380
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 382
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.FRAGMENT:
self.state = 381
localctx.frag = self.match(ANTLRv4Parser.FRAGMENT)
self.state = 384
localctx.name = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 385
self.match(ANTLRv4Parser.COLON)
self.state = 386
self.lexerRuleBlock()
self.state = 387
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerRuleBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerRuleBlock" ):
listener.enterLexerRuleBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerRuleBlock" ):
listener.exitLexerRuleBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleBlock" ):
return visitor.visitLexerRuleBlock(self)
else:
return visitor.visitChildren(self)
def lexerRuleBlock(self):
localctx = ANTLRv4Parser.LexerRuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_lexerRuleBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 389
self.lexerAltList()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._lexerAlt = None # LexerAltContext
self.alts = list() # of LexerAltContexts
def lexerAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerAltContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAltList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAltList" ):
listener.enterLexerAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAltList" ):
listener.exitLexerAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAltList" ):
return visitor.visitLexerAltList(self)
else:
return visitor.visitChildren(self)
def lexerAltList(self):
localctx = ANTLRv4Parser.LexerAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_lexerAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 391
localctx._lexerAlt = self.lexerAlt()
localctx.alts.append(localctx._lexerAlt)
self.state = 396
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 392
self.match(ANTLRv4Parser.OR)
self.state = 393
localctx._lexerAlt = self.lexerAlt()
localctx.alts.append(localctx._lexerAlt)
self.state = 398
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElements(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerElementsContext,0)
def lexerCommands(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAlt
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAlt" ):
listener.enterLexerAlt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAlt" ):
listener.exitLexerAlt(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAlt" ):
return visitor.visitLexerAlt(self)
else:
return visitor.visitChildren(self)
def lexerAlt(self):
localctx = ANTLRv4Parser.LexerAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_lexerAlt)
self._la = 0 # Token type
try:
self.state = 404
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.enterOuterAlt(localctx, 1)
self.state = 399
self.lexerElements()
self.state = 401
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.RARROW:
self.state = 400
self.lexerCommands()
pass
elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._lexerElement = None # LexerElementContext
self.elements = list() # of LexerElementContexts
def lexerElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerElementContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerElements
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElements" ):
listener.enterLexerElements(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElements" ):
listener.exitLexerElements(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElements" ):
return visitor.visitLexerElements(self)
else:
return visitor.visitChildren(self)
def lexerElements(self):
localctx = ANTLRv4Parser.LexerElementsContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_lexerElements)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 407
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 406
localctx._lexerElement = self.lexerElement()
localctx.elements.append(localctx._lexerElement)
self.state = 409
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.LEXER_CHAR_SET) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LexerElementLabeledContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LabeledLexerElementContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def labeledLexerElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.LabeledLexerElementContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementLabeled" ):
listener.enterLexerElementLabeled(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementLabeled" ):
listener.exitLexerElementLabeled(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementLabeled" ):
return visitor.visitLexerElementLabeled(self)
else:
return visitor.visitChildren(self)
class LexerElementBlockContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LexerBlockContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def lexerBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementBlock" ):
listener.enterLexerElementBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementBlock" ):
listener.exitLexerElementBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementBlock" ):
return visitor.visitLexerElementBlock(self)
else:
return visitor.visitChildren(self)
class LexerElementActionContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def QUESTION(self):
return self.getToken(ANTLRv4Parser.QUESTION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementAction" ):
listener.enterLexerElementAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementAction" ):
listener.exitLexerElementAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementAction" ):
return visitor.visitLexerElementAction(self)
else:
return visitor.visitChildren(self)
class LexerElementAtomContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LexerAtomContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def lexerAtom(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementAtom" ):
listener.enterLexerElementAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementAtom" ):
listener.exitLexerElementAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementAtom" ):
return visitor.visitLexerElementAtom(self)
else:
return visitor.visitChildren(self)
def lexerElement(self):
localctx = ANTLRv4Parser.LexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_lexerElement)
self._la = 0 # Token type
try:
self.state = 427
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,45,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerElementLabeledContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 411
localctx.value = self.labeledLexerElement()
self.state = 413
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 412
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerElementAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 415
localctx.value = self.lexerAtom()
self.state = 417
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 416
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerElementBlockContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 419
localctx.value = self.lexerBlock()
self.state = 421
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 420
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerElementActionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 423
self.actionBlock()
self.state = 425
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 424
self.match(ANTLRv4Parser.QUESTION)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledLexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def PLUS_ASSIGN(self):
return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0)
def lexerAtom(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0)
def lexerBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledLexerElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledLexerElement" ):
listener.enterLabeledLexerElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledLexerElement" ):
listener.exitLabeledLexerElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledLexerElement" ):
return visitor.visitLabeledLexerElement(self)
else:
return visitor.visitChildren(self)
def labeledLexerElement(self):
localctx = ANTLRv4Parser.LabeledLexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_labeledLexerElement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 429
self.identifier()
self.state = 430
_la = self._input.LA(1)
if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 433
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.state = 431
self.lexerAtom()
pass
elif token in [ANTLRv4Parser.LPAREN]:
self.state = 432
self.lexerBlock()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def lexerAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerBlock" ):
listener.enterLexerBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerBlock" ):
listener.exitLexerBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerBlock" ):
return visitor.visitLexerBlock(self)
else:
return visitor.visitChildren(self)
def lexerBlock(self):
localctx = ANTLRv4Parser.LexerBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_lexerBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 435
self.match(ANTLRv4Parser.LPAREN)
self.state = 436
self.lexerAltList()
self.state = 437
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RARROW(self):
return self.getToken(ANTLRv4Parser.RARROW, 0)
def lexerCommand(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerCommandContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommands
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommands" ):
listener.enterLexerCommands(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommands" ):
listener.exitLexerCommands(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommands" ):
return visitor.visitLexerCommands(self)
else:
return visitor.visitChildren(self)
def lexerCommands(self):
localctx = ANTLRv4Parser.LexerCommandsContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_lexerCommands)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 439
self.match(ANTLRv4Parser.RARROW)
self.state = 440
self.lexerCommand()
self.state = 445
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 441
self.match(ANTLRv4Parser.COMMA)
self.state = 442
self.lexerCommand()
self.state = 447
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerCommandName(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandNameContext,0)
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def lexerCommandExpr(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandExprContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommand
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommand" ):
listener.enterLexerCommand(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommand" ):
listener.exitLexerCommand(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommand" ):
return visitor.visitLexerCommand(self)
else:
return visitor.visitChildren(self)
def lexerCommand(self):
localctx = ANTLRv4Parser.LexerCommandContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_lexerCommand)
try:
self.state = 454
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,48,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 448
self.lexerCommandName()
self.state = 449
self.match(ANTLRv4Parser.LPAREN)
self.state = 450
self.lexerCommandExpr()
self.state = 451
self.match(ANTLRv4Parser.RPAREN)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 453
self.lexerCommandName()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def MODE(self):
return self.getToken(ANTLRv4Parser.MODE, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommandName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommandName" ):
listener.enterLexerCommandName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommandName" ):
listener.exitLexerCommandName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommandName" ):
return visitor.visitLexerCommandName(self)
else:
return visitor.visitChildren(self)
def lexerCommandName(self):
localctx = ANTLRv4Parser.LexerCommandNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_lexerCommandName)
try:
self.state = 458
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 456
self.identifier()
pass
elif token in [ANTLRv4Parser.MODE]:
self.enterOuterAlt(localctx, 2)
self.state = 457
self.match(ANTLRv4Parser.MODE)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommandExpr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommandExpr" ):
listener.enterLexerCommandExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommandExpr" ):
listener.exitLexerCommandExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommandExpr" ):
return visitor.visitLexerCommandExpr(self)
else:
return visitor.visitChildren(self)
def lexerCommandExpr(self):
localctx = ANTLRv4Parser.LexerCommandExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_lexerCommandExpr)
try:
self.state = 462
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 460
self.identifier()
pass
elif token in [ANTLRv4Parser.INT]:
self.enterOuterAlt(localctx, 2)
self.state = 461
self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._alternative = None # AlternativeContext
self.alts = list() # of AlternativeContexts
def alternative(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.AlternativeContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_altList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAltList" ):
listener.enterAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAltList" ):
listener.exitAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAltList" ):
return visitor.visitAltList(self)
else:
return visitor.visitChildren(self)
def altList(self):
localctx = ANTLRv4Parser.AltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_altList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 464
localctx._alternative = self.alternative()
localctx.alts.append(localctx._alternative)
self.state = 469
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 465
self.match(ANTLRv4Parser.OR)
self.state = 466
localctx._alternative = self.alternative()
localctx.alts.append(localctx._alternative)
self.state = 471
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlternativeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._element = None # ElementContext
self.elements = list() # of ElementContexts
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def element(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_alternative
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAlternative" ):
listener.enterAlternative(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAlternative" ):
listener.exitAlternative(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAlternative" ):
return visitor.visitAlternative(self)
else:
return visitor.visitChildren(self)
def alternative(self):
localctx = ANTLRv4Parser.AlternativeContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_alternative)
self._la = 0 # Token type
try:
self.state = 481
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.LT, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.enterOuterAlt(localctx, 1)
self.state = 473
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 472
self.elementOptions()
self.state = 476
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 475
localctx._element = self.element()
localctx.elements.append(localctx._element)
self.state = 478
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)):
break
pass
elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR, ANTLRv4Parser.POUND]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_element
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParserElementLabeledContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # LabeledElementContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def labeledElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.LabeledElementContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementLabeled" ):
listener.enterParserElementLabeled(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementLabeled" ):
listener.exitParserElementLabeled(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementLabeled" ):
return visitor.visitParserElementLabeled(self)
else:
return visitor.visitChildren(self)
class ParserElementBlockContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # BlockContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def block(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementBlock" ):
listener.enterParserElementBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementBlock" ):
listener.exitParserElementBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementBlock" ):
return visitor.visitParserElementBlock(self)
else:
return visitor.visitChildren(self)
class ParserElementAtomContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # AtomContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def atom(self):
return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementAtom" ):
listener.enterParserElementAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementAtom" ):
listener.exitParserElementAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementAtom" ):
return visitor.visitParserElementAtom(self)
else:
return visitor.visitChildren(self)
class ParserInlineDocContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserInlineDoc" ):
listener.enterParserInlineDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserInlineDoc" ):
listener.exitParserInlineDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserInlineDoc" ):
return visitor.visitParserInlineDoc(self)
else:
return visitor.visitChildren(self)
class ParserElementActionContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def QUESTION(self):
return self.getToken(ANTLRv4Parser.QUESTION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementAction" ):
listener.enterParserElementAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementAction" ):
listener.exitParserElementAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementAction" ):
return visitor.visitParserElementAction(self)
else:
return visitor.visitChildren(self)
def element(self):
localctx = ANTLRv4Parser.ElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_element)
self._la = 0 # Token type
try:
self.state = 500
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,59,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.ParserElementLabeledContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 483
localctx.value = self.labeledElement()
self.state = 485
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 484
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.ParserElementAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 487
localctx.value = self.atom()
self.state = 489
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 488
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.ParserElementBlockContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 491
localctx.value = self.block()
self.state = 493
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 492
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.ParserElementActionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 495
self.actionBlock()
self.state = 497
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 496
self.match(ANTLRv4Parser.QUESTION)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.ParserInlineDocContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 499
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def PLUS_ASSIGN(self):
return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0)
def atom(self):
return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0)
def block(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledElement" ):
listener.enterLabeledElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledElement" ):
listener.exitLabeledElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledElement" ):
return visitor.visitLabeledElement(self)
else:
return visitor.visitChildren(self)
def labeledElement(self):
localctx = ANTLRv4Parser.LabeledElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_labeledElement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 502
self.identifier()
self.state = 503
_la = self._input.LA(1)
if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 506
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.state = 504
self.atom()
pass
elif token in [ANTLRv4Parser.LPAREN]:
self.state = 505
self.block()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EbnfSuffixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def QUESTION(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.QUESTION)
else:
return self.getToken(ANTLRv4Parser.QUESTION, i)
def STAR(self):
return self.getToken(ANTLRv4Parser.STAR, 0)
def PLUS(self):
return self.getToken(ANTLRv4Parser.PLUS, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ebnfSuffix
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEbnfSuffix" ):
listener.enterEbnfSuffix(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEbnfSuffix" ):
listener.exitEbnfSuffix(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEbnfSuffix" ):
return visitor.visitEbnfSuffix(self)
else:
return visitor.visitChildren(self)
def ebnfSuffix(self):
localctx = ANTLRv4Parser.EbnfSuffixContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_ebnfSuffix)
self._la = 0 # Token type
try:
self.state = 520
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.QUESTION]:
self.enterOuterAlt(localctx, 1)
self.state = 508
self.match(ANTLRv4Parser.QUESTION)
self.state = 510
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 509
self.match(ANTLRv4Parser.QUESTION)
pass
elif token in [ANTLRv4Parser.STAR]:
self.enterOuterAlt(localctx, 2)
self.state = 512
self.match(ANTLRv4Parser.STAR)
self.state = 514
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 513
self.match(ANTLRv4Parser.QUESTION)
pass
elif token in [ANTLRv4Parser.PLUS]:
self.enterOuterAlt(localctx, 3)
self.state = 516
self.match(ANTLRv4Parser.PLUS)
self.state = 518
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 517
self.match(ANTLRv4Parser.QUESTION)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAtom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LexerAtomNotContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomNot" ):
listener.enterLexerAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomNot" ):
listener.exitLexerAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomNot" ):
return visitor.visitLexerAtomNot(self)
else:
return visitor.visitChildren(self)
class LexerAtomRangeContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomRange" ):
listener.enterLexerAtomRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomRange" ):
listener.exitLexerAtomRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomRange" ):
return visitor.visitLexerAtomRange(self)
else:
return visitor.visitChildren(self)
class LexerAtomCharSetContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomCharSet" ):
listener.enterLexerAtomCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomCharSet" ):
listener.exitLexerAtomCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomCharSet" ):
return visitor.visitLexerAtomCharSet(self)
else:
return visitor.visitChildren(self)
class LexerAtomWildcardContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomWildcard" ):
listener.enterLexerAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomWildcard" ):
listener.exitLexerAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomWildcard" ):
return visitor.visitLexerAtomWildcard(self)
else:
return visitor.visitChildren(self)
class LexerAtomTerminalContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomTerminal" ):
listener.enterLexerAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomTerminal" ):
listener.exitLexerAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomTerminal" ):
return visitor.visitLexerAtomTerminal(self)
else:
return visitor.visitChildren(self)
class LexerAtomDocContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomDoc" ):
listener.enterLexerAtomDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomDoc" ):
listener.exitLexerAtomDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomDoc" ):
return visitor.visitLexerAtomDoc(self)
else:
return visitor.visitChildren(self)
def lexerAtom(self):
localctx = ANTLRv4Parser.LexerAtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_lexerAtom)
self._la = 0 # Token type
try:
self.state = 531
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,66,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerAtomRangeContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 522
self.characterRange()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerAtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 523
self.terminal()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerAtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 524
self.notSet()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerAtomCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 525
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.LexerAtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 526
self.match(ANTLRv4Parser.DOT)
self.state = 528
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 527
self.elementOptions()
pass
elif la_ == 6:
localctx = ANTLRv4Parser.LexerAtomDocContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 530
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_atom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AtomTerminalContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomTerminal" ):
listener.enterAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomTerminal" ):
listener.exitAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomTerminal" ):
return visitor.visitAtomTerminal(self)
else:
return visitor.visitChildren(self)
class AtomWildcardContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomWildcard" ):
listener.enterAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomWildcard" ):
listener.exitAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomWildcard" ):
return visitor.visitAtomWildcard(self)
else:
return visitor.visitChildren(self)
class AtomRuleRefContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def ruleref(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulerefContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomRuleRef" ):
listener.enterAtomRuleRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomRuleRef" ):
listener.exitAtomRuleRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomRuleRef" ):
return visitor.visitAtomRuleRef(self)
else:
return visitor.visitChildren(self)
class AtomNotContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomNot" ):
listener.enterAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomNot" ):
listener.exitAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomNot" ):
return visitor.visitAtomNot(self)
else:
return visitor.visitChildren(self)
def atom(self):
localctx = ANTLRv4Parser.AtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_atom)
self._la = 0 # Token type
try:
self.state = 540
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.AtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 533
self.terminal()
pass
elif token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.AtomRuleRefContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 534
self.ruleref()
pass
elif token in [ANTLRv4Parser.NOT]:
localctx = ANTLRv4Parser.AtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 535
self.notSet()
pass
elif token in [ANTLRv4Parser.DOT]:
localctx = ANTLRv4Parser.AtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 536
self.match(ANTLRv4Parser.DOT)
self.state = 538
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 537
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NotSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_notSet
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NotBlockContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # BlockSetContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def blockSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotBlock" ):
listener.enterNotBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotBlock" ):
listener.exitNotBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotBlock" ):
return visitor.visitNotBlock(self)
else:
return visitor.visitChildren(self)
class NotElementContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # SetElementContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def setElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotElement" ):
listener.enterNotElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotElement" ):
listener.exitNotElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotElement" ):
return visitor.visitNotElement(self)
else:
return visitor.visitChildren(self)
def notSet(self):
localctx = ANTLRv4Parser.NotSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_notSet)
try:
self.state = 546
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,69,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.NotElementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 542
self.match(ANTLRv4Parser.NOT)
self.state = 543
localctx.value = self.setElement()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.NotBlockContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 544
self.match(ANTLRv4Parser.NOT)
self.state = 545
localctx.value = self.blockSet()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._setElement = None # SetElementContext
self.elements = list() # of SetElementContexts
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def setElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.SetElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_blockSet
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockSet" ):
listener.enterBlockSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockSet" ):
listener.exitBlockSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockSet" ):
return visitor.visitBlockSet(self)
else:
return visitor.visitChildren(self)
def blockSet(self):
localctx = ANTLRv4Parser.BlockSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_blockSet)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 548
self.match(ANTLRv4Parser.LPAREN)
self.state = 549
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 554
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 550
self.match(ANTLRv4Parser.OR)
self.state = 551
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 556
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 557
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_setElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SetElementRefContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRef" ):
listener.enterSetElementRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRef" ):
listener.exitSetElementRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRef" ):
return visitor.visitSetElementRef(self)
else:
return visitor.visitChildren(self)
class SetElementRangeContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRange" ):
listener.enterSetElementRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRange" ):
listener.exitSetElementRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRange" ):
return visitor.visitSetElementRange(self)
else:
return visitor.visitChildren(self)
class SetElementLitContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementLit" ):
listener.enterSetElementLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementLit" ):
listener.exitSetElementLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementLit" ):
return visitor.visitSetElementLit(self)
else:
return visitor.visitChildren(self)
class SetElementCharSetContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementCharSet" ):
listener.enterSetElementCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementCharSet" ):
listener.exitSetElementCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementCharSet" ):
return visitor.visitSetElementCharSet(self)
else:
return visitor.visitChildren(self)
def setElement(self):
localctx = ANTLRv4Parser.SetElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_setElement)
self._la = 0 # Token type
try:
self.state = 569
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,73,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.SetElementRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 559
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 561
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 560
self.elementOptions()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.SetElementLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 563
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 565
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 564
self.elementOptions()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.SetElementRangeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 567
self.characterRange()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.SetElementCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 568
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def altList(self):
return self.getTypedRuleContext(ANTLRv4Parser.AltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleActionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock" ):
return visitor.visitBlock(self)
else:
return visitor.visitChildren(self)
def block(self):
localctx = ANTLRv4Parser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 571
self.match(ANTLRv4Parser.LPAREN)
self.state = 582
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.COLON) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 573
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.OPTIONS:
self.state = 572
self.optionsSpec()
self.state = 578
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.AT:
self.state = 575
self.ruleAction()
self.state = 580
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 581
self.match(ANTLRv4Parser.COLON)
self.state = 584
self.altList()
self.state = 585
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulerefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # Token
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleref
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleref" ):
listener.enterRuleref(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleref" ):
listener.exitRuleref(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleref" ):
return visitor.visitRuleref(self)
else:
return visitor.visitChildren(self)
def ruleref(self):
localctx = ANTLRv4Parser.RulerefContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_ruleref)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 587
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
self.state = 589
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 588
self.argActionBlock()
self.state = 592
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 591
self.elementOptions()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CharacterRangeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.start = None # Token
self.end = None # Token
def RANGE(self):
return self.getToken(ANTLRv4Parser.RANGE, 0)
def STRING_LITERAL(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.STRING_LITERAL)
else:
return self.getToken(ANTLRv4Parser.STRING_LITERAL, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_characterRange
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCharacterRange" ):
listener.enterCharacterRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCharacterRange" ):
listener.exitCharacterRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCharacterRange" ):
return visitor.visitCharacterRange(self)
else:
return visitor.visitChildren(self)
def characterRange(self):
localctx = ANTLRv4Parser.CharacterRangeContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_characterRange)
try:
self.enterOuterAlt(localctx, 1)
self.state = 594
localctx.start = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 595
self.match(ANTLRv4Parser.RANGE)
self.state = 596
localctx.end = self.match(ANTLRv4Parser.STRING_LITERAL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TerminalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_terminal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TerminalRefContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalRef" ):
listener.enterTerminalRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalRef" ):
listener.exitTerminalRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalRef" ):
return visitor.visitTerminalRef(self)
else:
return visitor.visitChildren(self)
class TerminalLitContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalLit" ):
listener.enterTerminalLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalLit" ):
listener.exitTerminalLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalLit" ):
return visitor.visitTerminalLit(self)
else:
return visitor.visitChildren(self)
def terminal(self):
localctx = ANTLRv4Parser.TerminalContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_terminal)
self._la = 0 # Token type
try:
self.state = 606
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TerminalRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 598
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 600
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 599
self.elementOptions()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.TerminalLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 602
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 604
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 603
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LT(self):
return self.getToken(ANTLRv4Parser.LT, 0)
def elementOption(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementOptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionContext,i)
def GT(self):
return self.getToken(ANTLRv4Parser.GT, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOptions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOptions" ):
listener.enterElementOptions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOptions" ):
listener.exitElementOptions(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOptions" ):
return visitor.visitElementOptions(self)
else:
return visitor.visitChildren(self)
def elementOptions(self):
localctx = ANTLRv4Parser.ElementOptionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_elementOptions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 608
self.match(ANTLRv4Parser.LT)
self.state = 609
self.elementOption()
self.state = 614
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 610
self.match(ANTLRv4Parser.COMMA)
self.state = 611
self.elementOption()
self.state = 616
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 617
self.match(ANTLRv4Parser.GT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOption
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOption" ):
listener.enterElementOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOption" ):
listener.exitElementOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOption" ):
return visitor.visitElementOption(self)
else:
return visitor.visitChildren(self)
def elementOption(self):
localctx = ANTLRv4Parser.ElementOptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_elementOption)
try:
self.state = 626
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,84,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 619
self.identifier()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 620
self.identifier()
self.state = 621
self.match(ANTLRv4Parser.ASSIGN)
self.state = 624
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.state = 622
self.identifier()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
self.state = 623
self.match(ANTLRv4Parser.STRING_LITERAL)
pass
else:
raise NoViableAltException(self)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_identifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RuleRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleRefIdentifier" ):
listener.enterRuleRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleRefIdentifier" ):
listener.exitRuleRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleRefIdentifier" ):
return visitor.visitRuleRefIdentifier(self)
else:
return visitor.visitChildren(self)
class TokenRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokenRefIdentifier" ):
listener.enterTokenRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokenRefIdentifier" ):
listener.exitTokenRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokenRefIdentifier" ):
return visitor.visitTokenRefIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = ANTLRv4Parser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_identifier)
try:
self.state = 630
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.RuleRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 628
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
pass
elif token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TokenRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 629
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ANTLRv4Parser.tokens 0000664 0000000 0000000 00000001612 14351074657 0025577 0 ustar 00root root 0000000 0000000 TOKEN_REF=1
RULE_REF=2
LEXER_CHAR_SET=3
DOC_COMMENT=4
HEADER=5
BLOCK_COMMENT=6
LINE_COMMENT=7
INT=8
STRING_LITERAL=9
UNTERMINATED_STRING_LITERAL=10
BEGIN_ARGUMENT=11
BEGIN_ACTION=12
OPTIONS=13
TOKENS=14
CHANNELS=15
IMPORT=16
FRAGMENT=17
LEXER=18
PARSER=19
GRAMMAR=20
PROTECTED=21
PUBLIC=22
PRIVATE=23
RETURNS=24
LOCALS=25
THROWS=26
CATCH=27
FINALLY=28
MODE=29
COLON=30
COLONCOLON=31
COMMA=32
SEMI=33
LPAREN=34
RPAREN=35
LBRACE=36
RBRACE=37
RARROW=38
LT=39
GT=40
ASSIGN=41
QUESTION=42
STAR=43
PLUS_ASSIGN=44
PLUS=45
OR=46
DOLLAR=47
RANGE=48
DOT=49
AT=50
POUND=51
NOT=52
ID=53
WS=54
ERRCHAR=55
END_ARGUMENT=56
UNTERMINATED_ARGUMENT=57
ARGUMENT_CONTENT=58
END_ACTION=59
UNTERMINATED_ACTION=60
ACTION_CONTENT=61
UNTERMINATED_CHAR_SET=62
'import'=16
'fragment'=17
'lexer'=18
'parser'=19
'grammar'=20
'protected'=21
'public'=22
'private'=23
'returns'=24
'locals'=25
'throws'=26
'catch'=27
'finally'=28
'mode'=29
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ANTLRv4ParserListener.py 0000664 0000000 0000000 00000063001 14351074657 0026432 0 ustar 00root root 0000000 0000000 from antlr4 import *
if __name__ is not None and "." in __name__:
from .ANTLRv4Parser import ANTLRv4Parser
else:
from ANTLRv4Parser import ANTLRv4Parser
# This class defines a complete listener for a parse tree produced by ANTLRv4Parser.
class ANTLRv4ParserListener(ParseTreeListener):
# Enter a parse tree produced by ANTLRv4Parser#grammarSpec.
def enterGrammarSpec(self, ctx:ANTLRv4Parser.GrammarSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#grammarSpec.
def exitGrammarSpec(self, ctx:ANTLRv4Parser.GrammarSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#grammarType.
def enterGrammarType(self, ctx:ANTLRv4Parser.GrammarTypeContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#grammarType.
def exitGrammarType(self, ctx:ANTLRv4Parser.GrammarTypeContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#prequelConstruct.
def enterPrequelConstruct(self, ctx:ANTLRv4Parser.PrequelConstructContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#prequelConstruct.
def exitPrequelConstruct(self, ctx:ANTLRv4Parser.PrequelConstructContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#optionsSpec.
def enterOptionsSpec(self, ctx:ANTLRv4Parser.OptionsSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#optionsSpec.
def exitOptionsSpec(self, ctx:ANTLRv4Parser.OptionsSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#option.
def enterOption(self, ctx:ANTLRv4Parser.OptionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#option.
def exitOption(self, ctx:ANTLRv4Parser.OptionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#pathOption.
def enterPathOption(self, ctx:ANTLRv4Parser.PathOptionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#pathOption.
def exitPathOption(self, ctx:ANTLRv4Parser.PathOptionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#stringOption.
def enterStringOption(self, ctx:ANTLRv4Parser.StringOptionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#stringOption.
def exitStringOption(self, ctx:ANTLRv4Parser.StringOptionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#actionOption.
def enterActionOption(self, ctx:ANTLRv4Parser.ActionOptionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#actionOption.
def exitActionOption(self, ctx:ANTLRv4Parser.ActionOptionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#intOption.
def enterIntOption(self, ctx:ANTLRv4Parser.IntOptionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#intOption.
def exitIntOption(self, ctx:ANTLRv4Parser.IntOptionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#delegateGrammars.
def enterDelegateGrammars(self, ctx:ANTLRv4Parser.DelegateGrammarsContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#delegateGrammars.
def exitDelegateGrammars(self, ctx:ANTLRv4Parser.DelegateGrammarsContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#delegateGrammar.
def enterDelegateGrammar(self, ctx:ANTLRv4Parser.DelegateGrammarContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#delegateGrammar.
def exitDelegateGrammar(self, ctx:ANTLRv4Parser.DelegateGrammarContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#tokensSpec.
def enterTokensSpec(self, ctx:ANTLRv4Parser.TokensSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#tokensSpec.
def exitTokensSpec(self, ctx:ANTLRv4Parser.TokensSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#channelsSpec.
def enterChannelsSpec(self, ctx:ANTLRv4Parser.ChannelsSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#channelsSpec.
def exitChannelsSpec(self, ctx:ANTLRv4Parser.ChannelsSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#idList.
def enterIdList(self, ctx:ANTLRv4Parser.IdListContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#idList.
def exitIdList(self, ctx:ANTLRv4Parser.IdListContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#action.
def enterAction(self, ctx:ANTLRv4Parser.ActionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#action.
def exitAction(self, ctx:ANTLRv4Parser.ActionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#actionScopeName.
def enterActionScopeName(self, ctx:ANTLRv4Parser.ActionScopeNameContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#actionScopeName.
def exitActionScopeName(self, ctx:ANTLRv4Parser.ActionScopeNameContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#actionBlock.
def enterActionBlock(self, ctx:ANTLRv4Parser.ActionBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#actionBlock.
def exitActionBlock(self, ctx:ANTLRv4Parser.ActionBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#argActionBlock.
def enterArgActionBlock(self, ctx:ANTLRv4Parser.ArgActionBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#argActionBlock.
def exitArgActionBlock(self, ctx:ANTLRv4Parser.ArgActionBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#modeSpec.
def enterModeSpec(self, ctx:ANTLRv4Parser.ModeSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#modeSpec.
def exitModeSpec(self, ctx:ANTLRv4Parser.ModeSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#rules.
def enterRules(self, ctx:ANTLRv4Parser.RulesContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#rules.
def exitRules(self, ctx:ANTLRv4Parser.RulesContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleSpec.
def enterRuleSpec(self, ctx:ANTLRv4Parser.RuleSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleSpec.
def exitRuleSpec(self, ctx:ANTLRv4Parser.RuleSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserRuleSpec.
def enterParserRuleSpec(self, ctx:ANTLRv4Parser.ParserRuleSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserRuleSpec.
def exitParserRuleSpec(self, ctx:ANTLRv4Parser.ParserRuleSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#exceptionGroup.
def enterExceptionGroup(self, ctx:ANTLRv4Parser.ExceptionGroupContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#exceptionGroup.
def exitExceptionGroup(self, ctx:ANTLRv4Parser.ExceptionGroupContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#exceptionHandler.
def enterExceptionHandler(self, ctx:ANTLRv4Parser.ExceptionHandlerContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#exceptionHandler.
def exitExceptionHandler(self, ctx:ANTLRv4Parser.ExceptionHandlerContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#finallyClause.
def enterFinallyClause(self, ctx:ANTLRv4Parser.FinallyClauseContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#finallyClause.
def exitFinallyClause(self, ctx:ANTLRv4Parser.FinallyClauseContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#rulePrequel.
def enterRulePrequel(self, ctx:ANTLRv4Parser.RulePrequelContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#rulePrequel.
def exitRulePrequel(self, ctx:ANTLRv4Parser.RulePrequelContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleReturns.
def enterRuleReturns(self, ctx:ANTLRv4Parser.RuleReturnsContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleReturns.
def exitRuleReturns(self, ctx:ANTLRv4Parser.RuleReturnsContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#throwsSpec.
def enterThrowsSpec(self, ctx:ANTLRv4Parser.ThrowsSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#throwsSpec.
def exitThrowsSpec(self, ctx:ANTLRv4Parser.ThrowsSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#localsSpec.
def enterLocalsSpec(self, ctx:ANTLRv4Parser.LocalsSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#localsSpec.
def exitLocalsSpec(self, ctx:ANTLRv4Parser.LocalsSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleAction.
def enterRuleAction(self, ctx:ANTLRv4Parser.RuleActionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleAction.
def exitRuleAction(self, ctx:ANTLRv4Parser.RuleActionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleModifiers.
def enterRuleModifiers(self, ctx:ANTLRv4Parser.RuleModifiersContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleModifiers.
def exitRuleModifiers(self, ctx:ANTLRv4Parser.RuleModifiersContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleModifier.
def enterRuleModifier(self, ctx:ANTLRv4Parser.RuleModifierContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleModifier.
def exitRuleModifier(self, ctx:ANTLRv4Parser.RuleModifierContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleBlock.
def enterRuleBlock(self, ctx:ANTLRv4Parser.RuleBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleBlock.
def exitRuleBlock(self, ctx:ANTLRv4Parser.RuleBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleAltList.
def enterRuleAltList(self, ctx:ANTLRv4Parser.RuleAltListContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleAltList.
def exitRuleAltList(self, ctx:ANTLRv4Parser.RuleAltListContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#labeledAlt.
def enterLabeledAlt(self, ctx:ANTLRv4Parser.LabeledAltContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#labeledAlt.
def exitLabeledAlt(self, ctx:ANTLRv4Parser.LabeledAltContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerRuleSpec.
def enterLexerRuleSpec(self, ctx:ANTLRv4Parser.LexerRuleSpecContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerRuleSpec.
def exitLexerRuleSpec(self, ctx:ANTLRv4Parser.LexerRuleSpecContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerRuleBlock.
def enterLexerRuleBlock(self, ctx:ANTLRv4Parser.LexerRuleBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerRuleBlock.
def exitLexerRuleBlock(self, ctx:ANTLRv4Parser.LexerRuleBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAltList.
def enterLexerAltList(self, ctx:ANTLRv4Parser.LexerAltListContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAltList.
def exitLexerAltList(self, ctx:ANTLRv4Parser.LexerAltListContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAlt.
def enterLexerAlt(self, ctx:ANTLRv4Parser.LexerAltContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAlt.
def exitLexerAlt(self, ctx:ANTLRv4Parser.LexerAltContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElements.
def enterLexerElements(self, ctx:ANTLRv4Parser.LexerElementsContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElements.
def exitLexerElements(self, ctx:ANTLRv4Parser.LexerElementsContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElementLabeled.
def enterLexerElementLabeled(self, ctx:ANTLRv4Parser.LexerElementLabeledContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElementLabeled.
def exitLexerElementLabeled(self, ctx:ANTLRv4Parser.LexerElementLabeledContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElementAtom.
def enterLexerElementAtom(self, ctx:ANTLRv4Parser.LexerElementAtomContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElementAtom.
def exitLexerElementAtom(self, ctx:ANTLRv4Parser.LexerElementAtomContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElementBlock.
def enterLexerElementBlock(self, ctx:ANTLRv4Parser.LexerElementBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElementBlock.
def exitLexerElementBlock(self, ctx:ANTLRv4Parser.LexerElementBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElementAction.
def enterLexerElementAction(self, ctx:ANTLRv4Parser.LexerElementActionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElementAction.
def exitLexerElementAction(self, ctx:ANTLRv4Parser.LexerElementActionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#labeledLexerElement.
def enterLabeledLexerElement(self, ctx:ANTLRv4Parser.LabeledLexerElementContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#labeledLexerElement.
def exitLabeledLexerElement(self, ctx:ANTLRv4Parser.LabeledLexerElementContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerBlock.
def enterLexerBlock(self, ctx:ANTLRv4Parser.LexerBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerBlock.
def exitLexerBlock(self, ctx:ANTLRv4Parser.LexerBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommands.
def enterLexerCommands(self, ctx:ANTLRv4Parser.LexerCommandsContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommands.
def exitLexerCommands(self, ctx:ANTLRv4Parser.LexerCommandsContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommand.
def enterLexerCommand(self, ctx:ANTLRv4Parser.LexerCommandContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommand.
def exitLexerCommand(self, ctx:ANTLRv4Parser.LexerCommandContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommandName.
def enterLexerCommandName(self, ctx:ANTLRv4Parser.LexerCommandNameContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommandName.
def exitLexerCommandName(self, ctx:ANTLRv4Parser.LexerCommandNameContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommandExpr.
def enterLexerCommandExpr(self, ctx:ANTLRv4Parser.LexerCommandExprContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommandExpr.
def exitLexerCommandExpr(self, ctx:ANTLRv4Parser.LexerCommandExprContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#altList.
def enterAltList(self, ctx:ANTLRv4Parser.AltListContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#altList.
def exitAltList(self, ctx:ANTLRv4Parser.AltListContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#alternative.
def enterAlternative(self, ctx:ANTLRv4Parser.AlternativeContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#alternative.
def exitAlternative(self, ctx:ANTLRv4Parser.AlternativeContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserElementLabeled.
def enterParserElementLabeled(self, ctx:ANTLRv4Parser.ParserElementLabeledContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserElementLabeled.
def exitParserElementLabeled(self, ctx:ANTLRv4Parser.ParserElementLabeledContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserElementAtom.
def enterParserElementAtom(self, ctx:ANTLRv4Parser.ParserElementAtomContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserElementAtom.
def exitParserElementAtom(self, ctx:ANTLRv4Parser.ParserElementAtomContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserElementBlock.
def enterParserElementBlock(self, ctx:ANTLRv4Parser.ParserElementBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserElementBlock.
def exitParserElementBlock(self, ctx:ANTLRv4Parser.ParserElementBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserElementAction.
def enterParserElementAction(self, ctx:ANTLRv4Parser.ParserElementActionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserElementAction.
def exitParserElementAction(self, ctx:ANTLRv4Parser.ParserElementActionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserInlineDoc.
def enterParserInlineDoc(self, ctx:ANTLRv4Parser.ParserInlineDocContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserInlineDoc.
def exitParserInlineDoc(self, ctx:ANTLRv4Parser.ParserInlineDocContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#labeledElement.
def enterLabeledElement(self, ctx:ANTLRv4Parser.LabeledElementContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#labeledElement.
def exitLabeledElement(self, ctx:ANTLRv4Parser.LabeledElementContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ebnfSuffix.
def enterEbnfSuffix(self, ctx:ANTLRv4Parser.EbnfSuffixContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ebnfSuffix.
def exitEbnfSuffix(self, ctx:ANTLRv4Parser.EbnfSuffixContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtomRange.
def enterLexerAtomRange(self, ctx:ANTLRv4Parser.LexerAtomRangeContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtomRange.
def exitLexerAtomRange(self, ctx:ANTLRv4Parser.LexerAtomRangeContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtomTerminal.
def enterLexerAtomTerminal(self, ctx:ANTLRv4Parser.LexerAtomTerminalContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtomTerminal.
def exitLexerAtomTerminal(self, ctx:ANTLRv4Parser.LexerAtomTerminalContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtomNot.
def enterLexerAtomNot(self, ctx:ANTLRv4Parser.LexerAtomNotContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtomNot.
def exitLexerAtomNot(self, ctx:ANTLRv4Parser.LexerAtomNotContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtomCharSet.
def enterLexerAtomCharSet(self, ctx:ANTLRv4Parser.LexerAtomCharSetContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtomCharSet.
def exitLexerAtomCharSet(self, ctx:ANTLRv4Parser.LexerAtomCharSetContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtomWildcard.
def enterLexerAtomWildcard(self, ctx:ANTLRv4Parser.LexerAtomWildcardContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtomWildcard.
def exitLexerAtomWildcard(self, ctx:ANTLRv4Parser.LexerAtomWildcardContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtomDoc.
def enterLexerAtomDoc(self, ctx:ANTLRv4Parser.LexerAtomDocContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtomDoc.
def exitLexerAtomDoc(self, ctx:ANTLRv4Parser.LexerAtomDocContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#atomTerminal.
def enterAtomTerminal(self, ctx:ANTLRv4Parser.AtomTerminalContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#atomTerminal.
def exitAtomTerminal(self, ctx:ANTLRv4Parser.AtomTerminalContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#atomRuleRef.
def enterAtomRuleRef(self, ctx:ANTLRv4Parser.AtomRuleRefContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#atomRuleRef.
def exitAtomRuleRef(self, ctx:ANTLRv4Parser.AtomRuleRefContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#atomNot.
def enterAtomNot(self, ctx:ANTLRv4Parser.AtomNotContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#atomNot.
def exitAtomNot(self, ctx:ANTLRv4Parser.AtomNotContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#atomWildcard.
def enterAtomWildcard(self, ctx:ANTLRv4Parser.AtomWildcardContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#atomWildcard.
def exitAtomWildcard(self, ctx:ANTLRv4Parser.AtomWildcardContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#notElement.
def enterNotElement(self, ctx:ANTLRv4Parser.NotElementContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#notElement.
def exitNotElement(self, ctx:ANTLRv4Parser.NotElementContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#notBlock.
def enterNotBlock(self, ctx:ANTLRv4Parser.NotBlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#notBlock.
def exitNotBlock(self, ctx:ANTLRv4Parser.NotBlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#blockSet.
def enterBlockSet(self, ctx:ANTLRv4Parser.BlockSetContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#blockSet.
def exitBlockSet(self, ctx:ANTLRv4Parser.BlockSetContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#setElementRef.
def enterSetElementRef(self, ctx:ANTLRv4Parser.SetElementRefContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#setElementRef.
def exitSetElementRef(self, ctx:ANTLRv4Parser.SetElementRefContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#setElementLit.
def enterSetElementLit(self, ctx:ANTLRv4Parser.SetElementLitContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#setElementLit.
def exitSetElementLit(self, ctx:ANTLRv4Parser.SetElementLitContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#setElementRange.
def enterSetElementRange(self, ctx:ANTLRv4Parser.SetElementRangeContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#setElementRange.
def exitSetElementRange(self, ctx:ANTLRv4Parser.SetElementRangeContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#setElementCharSet.
def enterSetElementCharSet(self, ctx:ANTLRv4Parser.SetElementCharSetContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#setElementCharSet.
def exitSetElementCharSet(self, ctx:ANTLRv4Parser.SetElementCharSetContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#block.
def enterBlock(self, ctx:ANTLRv4Parser.BlockContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#block.
def exitBlock(self, ctx:ANTLRv4Parser.BlockContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleref.
def enterRuleref(self, ctx:ANTLRv4Parser.RulerefContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleref.
def exitRuleref(self, ctx:ANTLRv4Parser.RulerefContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#characterRange.
def enterCharacterRange(self, ctx:ANTLRv4Parser.CharacterRangeContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#characterRange.
def exitCharacterRange(self, ctx:ANTLRv4Parser.CharacterRangeContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#terminalRef.
def enterTerminalRef(self, ctx:ANTLRv4Parser.TerminalRefContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#terminalRef.
def exitTerminalRef(self, ctx:ANTLRv4Parser.TerminalRefContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#terminalLit.
def enterTerminalLit(self, ctx:ANTLRv4Parser.TerminalLitContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#terminalLit.
def exitTerminalLit(self, ctx:ANTLRv4Parser.TerminalLitContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#elementOptions.
def enterElementOptions(self, ctx:ANTLRv4Parser.ElementOptionsContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#elementOptions.
def exitElementOptions(self, ctx:ANTLRv4Parser.ElementOptionsContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#elementOption.
def enterElementOption(self, ctx:ANTLRv4Parser.ElementOptionContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#elementOption.
def exitElementOption(self, ctx:ANTLRv4Parser.ElementOptionContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleRefIdentifier.
def enterRuleRefIdentifier(self, ctx:ANTLRv4Parser.RuleRefIdentifierContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleRefIdentifier.
def exitRuleRefIdentifier(self, ctx:ANTLRv4Parser.RuleRefIdentifierContext):
pass
# Enter a parse tree produced by ANTLRv4Parser#tokenRefIdentifier.
def enterTokenRefIdentifier(self, ctx:ANTLRv4Parser.TokenRefIdentifierContext):
pass
# Exit a parse tree produced by ANTLRv4Parser#tokenRefIdentifier.
def exitTokenRefIdentifier(self, ctx:ANTLRv4Parser.TokenRefIdentifierContext):
pass
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/ANTLRv4ParserVisitor.py 0000664 0000000 0000000 00000036320 14351074657 0026310 0 ustar 00root root 0000000 0000000 from antlr4 import *
if __name__ is not None and "." in __name__:
from .ANTLRv4Parser import ANTLRv4Parser
else:
from ANTLRv4Parser import ANTLRv4Parser
# This class defines a complete generic visitor for a parse tree produced by ANTLRv4Parser.
class ANTLRv4ParserVisitor(ParseTreeVisitor):
# Visit a parse tree produced by ANTLRv4Parser#grammarSpec.
def visitGrammarSpec(self, ctx:ANTLRv4Parser.GrammarSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#grammarType.
def visitGrammarType(self, ctx:ANTLRv4Parser.GrammarTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#prequelConstruct.
def visitPrequelConstruct(self, ctx:ANTLRv4Parser.PrequelConstructContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#optionsSpec.
def visitOptionsSpec(self, ctx:ANTLRv4Parser.OptionsSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#option.
def visitOption(self, ctx:ANTLRv4Parser.OptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#pathOption.
def visitPathOption(self, ctx:ANTLRv4Parser.PathOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#stringOption.
def visitStringOption(self, ctx:ANTLRv4Parser.StringOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#actionOption.
def visitActionOption(self, ctx:ANTLRv4Parser.ActionOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#intOption.
def visitIntOption(self, ctx:ANTLRv4Parser.IntOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#delegateGrammars.
def visitDelegateGrammars(self, ctx:ANTLRv4Parser.DelegateGrammarsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#delegateGrammar.
def visitDelegateGrammar(self, ctx:ANTLRv4Parser.DelegateGrammarContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#tokensSpec.
def visitTokensSpec(self, ctx:ANTLRv4Parser.TokensSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#channelsSpec.
def visitChannelsSpec(self, ctx:ANTLRv4Parser.ChannelsSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#idList.
def visitIdList(self, ctx:ANTLRv4Parser.IdListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#action.
def visitAction(self, ctx:ANTLRv4Parser.ActionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#actionScopeName.
def visitActionScopeName(self, ctx:ANTLRv4Parser.ActionScopeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#actionBlock.
def visitActionBlock(self, ctx:ANTLRv4Parser.ActionBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#argActionBlock.
def visitArgActionBlock(self, ctx:ANTLRv4Parser.ArgActionBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#modeSpec.
def visitModeSpec(self, ctx:ANTLRv4Parser.ModeSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#rules.
def visitRules(self, ctx:ANTLRv4Parser.RulesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleSpec.
def visitRuleSpec(self, ctx:ANTLRv4Parser.RuleSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#parserRuleSpec.
def visitParserRuleSpec(self, ctx:ANTLRv4Parser.ParserRuleSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#exceptionGroup.
def visitExceptionGroup(self, ctx:ANTLRv4Parser.ExceptionGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#exceptionHandler.
def visitExceptionHandler(self, ctx:ANTLRv4Parser.ExceptionHandlerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#finallyClause.
def visitFinallyClause(self, ctx:ANTLRv4Parser.FinallyClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#rulePrequel.
def visitRulePrequel(self, ctx:ANTLRv4Parser.RulePrequelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleReturns.
def visitRuleReturns(self, ctx:ANTLRv4Parser.RuleReturnsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#throwsSpec.
def visitThrowsSpec(self, ctx:ANTLRv4Parser.ThrowsSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#localsSpec.
def visitLocalsSpec(self, ctx:ANTLRv4Parser.LocalsSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleAction.
def visitRuleAction(self, ctx:ANTLRv4Parser.RuleActionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleModifiers.
def visitRuleModifiers(self, ctx:ANTLRv4Parser.RuleModifiersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleModifier.
def visitRuleModifier(self, ctx:ANTLRv4Parser.RuleModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleBlock.
def visitRuleBlock(self, ctx:ANTLRv4Parser.RuleBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleAltList.
def visitRuleAltList(self, ctx:ANTLRv4Parser.RuleAltListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#labeledAlt.
def visitLabeledAlt(self, ctx:ANTLRv4Parser.LabeledAltContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerRuleSpec.
def visitLexerRuleSpec(self, ctx:ANTLRv4Parser.LexerRuleSpecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerRuleBlock.
def visitLexerRuleBlock(self, ctx:ANTLRv4Parser.LexerRuleBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAltList.
def visitLexerAltList(self, ctx:ANTLRv4Parser.LexerAltListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAlt.
def visitLexerAlt(self, ctx:ANTLRv4Parser.LexerAltContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerElements.
def visitLexerElements(self, ctx:ANTLRv4Parser.LexerElementsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerElementLabeled.
def visitLexerElementLabeled(self, ctx:ANTLRv4Parser.LexerElementLabeledContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerElementAtom.
def visitLexerElementAtom(self, ctx:ANTLRv4Parser.LexerElementAtomContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerElementBlock.
def visitLexerElementBlock(self, ctx:ANTLRv4Parser.LexerElementBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerElementAction.
def visitLexerElementAction(self, ctx:ANTLRv4Parser.LexerElementActionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#labeledLexerElement.
def visitLabeledLexerElement(self, ctx:ANTLRv4Parser.LabeledLexerElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerBlock.
def visitLexerBlock(self, ctx:ANTLRv4Parser.LexerBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerCommands.
def visitLexerCommands(self, ctx:ANTLRv4Parser.LexerCommandsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerCommand.
def visitLexerCommand(self, ctx:ANTLRv4Parser.LexerCommandContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerCommandName.
def visitLexerCommandName(self, ctx:ANTLRv4Parser.LexerCommandNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerCommandExpr.
def visitLexerCommandExpr(self, ctx:ANTLRv4Parser.LexerCommandExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#altList.
def visitAltList(self, ctx:ANTLRv4Parser.AltListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#alternative.
def visitAlternative(self, ctx:ANTLRv4Parser.AlternativeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#parserElementLabeled.
def visitParserElementLabeled(self, ctx:ANTLRv4Parser.ParserElementLabeledContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#parserElementAtom.
def visitParserElementAtom(self, ctx:ANTLRv4Parser.ParserElementAtomContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#parserElementBlock.
def visitParserElementBlock(self, ctx:ANTLRv4Parser.ParserElementBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#parserElementAction.
def visitParserElementAction(self, ctx:ANTLRv4Parser.ParserElementActionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#parserInlineDoc.
def visitParserInlineDoc(self, ctx:ANTLRv4Parser.ParserInlineDocContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#labeledElement.
def visitLabeledElement(self, ctx:ANTLRv4Parser.LabeledElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ebnfSuffix.
def visitEbnfSuffix(self, ctx:ANTLRv4Parser.EbnfSuffixContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAtomRange.
def visitLexerAtomRange(self, ctx:ANTLRv4Parser.LexerAtomRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAtomTerminal.
def visitLexerAtomTerminal(self, ctx:ANTLRv4Parser.LexerAtomTerminalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAtomNot.
def visitLexerAtomNot(self, ctx:ANTLRv4Parser.LexerAtomNotContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAtomCharSet.
def visitLexerAtomCharSet(self, ctx:ANTLRv4Parser.LexerAtomCharSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAtomWildcard.
def visitLexerAtomWildcard(self, ctx:ANTLRv4Parser.LexerAtomWildcardContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#lexerAtomDoc.
def visitLexerAtomDoc(self, ctx:ANTLRv4Parser.LexerAtomDocContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#atomTerminal.
def visitAtomTerminal(self, ctx:ANTLRv4Parser.AtomTerminalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#atomRuleRef.
def visitAtomRuleRef(self, ctx:ANTLRv4Parser.AtomRuleRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#atomNot.
def visitAtomNot(self, ctx:ANTLRv4Parser.AtomNotContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#atomWildcard.
def visitAtomWildcard(self, ctx:ANTLRv4Parser.AtomWildcardContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#notElement.
def visitNotElement(self, ctx:ANTLRv4Parser.NotElementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#notBlock.
def visitNotBlock(self, ctx:ANTLRv4Parser.NotBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#blockSet.
def visitBlockSet(self, ctx:ANTLRv4Parser.BlockSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#setElementRef.
def visitSetElementRef(self, ctx:ANTLRv4Parser.SetElementRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#setElementLit.
def visitSetElementLit(self, ctx:ANTLRv4Parser.SetElementLitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#setElementRange.
def visitSetElementRange(self, ctx:ANTLRv4Parser.SetElementRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#setElementCharSet.
def visitSetElementCharSet(self, ctx:ANTLRv4Parser.SetElementCharSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#block.
def visitBlock(self, ctx:ANTLRv4Parser.BlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleref.
def visitRuleref(self, ctx:ANTLRv4Parser.RulerefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#characterRange.
def visitCharacterRange(self, ctx:ANTLRv4Parser.CharacterRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#terminalRef.
def visitTerminalRef(self, ctx:ANTLRv4Parser.TerminalRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#terminalLit.
def visitTerminalLit(self, ctx:ANTLRv4Parser.TerminalLitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#elementOptions.
def visitElementOptions(self, ctx:ANTLRv4Parser.ElementOptionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#elementOption.
def visitElementOption(self, ctx:ANTLRv4Parser.ElementOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#ruleRefIdentifier.
def visitRuleRefIdentifier(self, ctx:ANTLRv4Parser.RuleRefIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ANTLRv4Parser#tokenRefIdentifier.
def visitTokenRefIdentifier(self, ctx:ANTLRv4Parser.TokenRefIdentifierContext):
return self.visitChildren(ctx)
del ANTLRv4Parser
sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/gen/syntax/__init__.py 0000664 0000000 0000000 00000000000 14351074657 0024142 0 ustar 00root root 0000000 0000000 sphinx-a4doc-1.6.0/sphinx_a4doc/syntax/lexer_adaptor.py 0000664 0000000 0000000 00000004566 14351074657 0023162 0 ustar 00root root 0000000 0000000 from antlr4 import *
class LexerAdaptor(Lexer):
"""
Track whether we are inside of a rule and whether it is lexical parser. _currentRuleType==Token.INVALID_TYPE
means that we are outside of a rule. At the first sign of a rule name reference and _currentRuleType==invalid, we
can assume that we are starting a parser rule. Similarly, seeing a token reference when not already in rule means
starting a token rule. The terminating ';' of a rule, flips this back to invalid type.
This is not perfect logic but works. For example, "grammar T;" means that we start and stop a lexical rule for
the "T;". Dangerous but works.
The whole point of this state information is to distinguish between [..arg actions..] and [charsets]. Char sets
can only occur in lexical rules and arg actions cannot occur.
"""
_currentRuleType = Token.INVALID_TYPE
def __init__(self, inp, output):
Lexer.__init__(self, inp, output)
def getCurrentRuleType(self):
return self._currentRuleType
def setCurrentRuleType(self, ruleType):
self._currentRuleType = ruleType
def handleBeginArgument(self):
if self.inLexerRule():
self.pushMode(self.LexerCharSet)
self.more()
else:
self.pushMode(self.Argument)
def handleEndArgument(self):
self.popMode()
if len(self._modeStack) > 0:
self._type = self.ARGUMENT_CONTENT
def handleEndAction(self):
self.popMode()
if len(self._modeStack) > 0:
self._type = self.ACTION_CONTENT
def emit(self):
if self._type == self.ID:
firstChar = self._input.getText(self._tokenStartCharIndex, self._tokenStartCharIndex)
if firstChar[0].isupper():
self._type = self.TOKEN_REF
else:
self._type = self.RULE_REF
if self._currentRuleType == Token.INVALID_TYPE: # if outside of rule def
self._currentRuleType = self._type # set to inside lexer or parser rule
elif self._type == self.SEMI: # exit rule def
self._currentRuleType = Token.INVALID_TYPE
return Lexer.emit(self)
def inLexerRule(self):
return self._currentRuleType == self.TOKEN_REF
def inParserRule(self): # not used, but added for clarity
return self._currentRuleType == self.RULE_REF