pax_global_header 0000666 0000000 0000000 00000000064 12650717014 0014515 g ustar 00root root 0000000 0000000 52 comment=a75542f513fae7372fce9cdea6ecda0a21836074
python-ptk-1.3.1/ 0000775 0000000 0000000 00000000000 12650717014 0013634 5 ustar 00root root 0000000 0000000 python-ptk-1.3.1/LICENSE.txt 0000664 0000000 0000000 00000016743 12650717014 0015472 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
python-ptk-1.3.1/Makefile 0000664 0000000 0000000 00000001436 12650717014 0015300 0 ustar 00root root 0000000 0000000
PYTHON?=python3
all:
@echo Targets:
@echo all: This help
@echo prepare: Refresh generated files
@echo unittests: All unit tests
@echo coverage: Test coverage
@echo lint: Pylint
@echo nuke: Delete all unversioned files
@echo documentation: Documentation
@echo tarball: Source distribution
prepare:
$(PYTHON) ./prepare.py
unittests:
$(PYTHON) tests/test_all.py
coverage:
$(PYTHON) -m coverage run --branch --omit "tests/*,/usr/*" tests/test_all.py
$(PYTHON) -m coverage html
lint:
-$(PYTHON) -m pylint ptk > lint.html
nuke:
hg purge --all
documentation:
cd doc; $(MAKE) html
rm -rf html
mv doc/build/html .
tarball: documentation
$(PYTHON) setup.py sdist --formats=bztar
release: documentation
python ./setup.py sdist upload -r pypi
python-ptk-1.3.1/PKG-INFO 0000664 0000000 0000000 00000001456 12650717014 0014737 0 ustar 00root root 0000000 0000000 Metadata-Version: 1.1
Name: ptk
Version: 1.3.1
Summary: LR(1) parsing framework for Python with support for asynchronous input
Home-page: https://bitbucket.org/fraca7/ptk
Author: Jérôme Laheurte
Author-email: jerome@jeromelaheurte.net
License: UNKNOWN
Download-URL: https://pypi.python.org/packages/source/p/ptk/ptk-1.3.1.tar.gz
Description: UNKNOWN
Keywords: parser,parsing,compiler,lr,slr
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Topic :: Software Development :: Compilers
Classifier: Topic :: Software Development :: Libraries :: Python Modules
python-ptk-1.3.1/README.rst.in 0000664 0000000 0000000 00000011113 12650717014 0015725 0 ustar 00root root 0000000 0000000
Parser Toolkit
==============
PTK - (c) Jérôme Laheurte 2015
.. contents:: **Table of contents**
What is PTK ?
-------------
PTK is a LR(1) parser "generator" for Python. It is not actually a
"generator" in the sense that it doesn't output source code, using
Python's dynamic nature to build everything it needs at runtime
instead. Also, it supports asynchronous parsing; see the API
documentation for details.
This code is licensed under the `GNU LGPL version 3 or, at your
option, any later version
`_.
Why another one of those ?
--------------------------
There are a number of parser generators for Python out there. Most of
them only support LL(1) or PEG. The other ones are either
- Unmaintained
- Straight translations from Yacc/Bison, and thus use an ugly syntax
- All of the above
The main goals of PTK are
- Clean, compact, Python-friendly syntax
- Support for asynchronous input streams: why would you need the
whole input string to start working when the underlying system is
actually an automaton ?
- Play nice in 'special' cases, like when the underlying
'filesystem' is a PyZipFile archive.
- Don't use hacks like module-level introspection to compensate for
an ugly design (I'm looking at you PLY). Those tend to produce
subtle and headache-inducing bugs when running from compiled code.
Supported platforms
-------------------
All unit tests pass on the following platforms/Python version:
+-----+-------+-----+---------+
| | Linux | OSX | Windows |
+=====+=======+=====+=========+
| 2.7 | X | X | X |
+-----+-------+-----+---------+
| 3.2 | X | | X |
+-----+-------+-----+---------+
| 3.3 | | | X |
+-----+-------+-----+---------+
| 3.4 | | | X |
+-----+-------+-----+---------+
| 3.5 | | X | |
+-----+-------+-----+---------+
See the
`Buildbot `_ for
details.
Although Python 2.6 is not officially supported, it does work with a
few minor code modifications, namely replacing
functools.total_ordering with the `backport to 2.6 `_.
Installation
------------
Using pip::
$ pip install -U ptk
From source::
$ wget https://pypi.python.org/packages/source/p/ptk/ptk-%(version)s.tar.gz
$ tar xjf ptk-%(version)s.tar.bz2; cd ptk-%(version)s
$ sudo python ./setup.py install
Sample usage
------------
Four-operations integer calculator:
.. code-block:: python
from ptk.parser import LRParser, production, leftAssoc
from ptk.lexer import ReLexer, token
import operator, six
@leftAssoc('+', '-')
@rightAssoc('*', '/')
class Parser(LRParser, ReLexer):
@token('[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
@production('E -> number')
def litteral(self, n):
return n
@production('E -> "-" E', priority='*')
def minus(self, val):
return -val
@production('E -> "(" E ")"')
def paren(self, val):
return val
@production('E -> E "+" E')
@production('E -> E "-" E')
@production('E -> E "*" E')
@production('E -> E "/" E')
def binaryop(self, left, op, right):
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv
}[op](left, right)
parser = Parser()
while True:
expr = six.moves.input('> ')
print parser.parse(expr)
Code samples
------------
The *samples* subdirectory in the source tree contains the
aforementioned calculator and a script that generates a skeleton
Python file from a Yacc or Bison grammar file.
API documentation
-----------------
The full documentation is hosted `here `_.
Changelog
---------
Version 1.3.1:
- Fix version number in README.rst
Version 1.3.0:
- Added deferred_lexer and deferred_parser (asynchronous parsing using
Twisted Deferred objects)
- Asynchronous classes cannot be imported from 'regular' modules
anymore, import them explicitely from 'ptk.async_lexer' and 'ptk.async_parser'.
Version 1.2.0:
- Production methods cannot have the same name any more. This was
idiotic to begin with. Inheritance thus works as expected.
- Add AsyncLexer and AsyncLRParser for asynchronous parsing.
Version 1.1.0:
- Added repeat operators ('*', '+', '?') in production syntax.
- Support for more yacc/bison declarations in yacc2py sample (most are
actually ignored)
python-ptk-1.3.1/TODO.txt 0000664 0000000 0000000 00000000160 12650717014 0015137 0 ustar 00root root 0000000 0000000 * LALR ? Not sure it's worth it.
* ProgressiveLexer: subset construction in byte mode
* Serialization, maybe
python-ptk-1.3.1/doc/ 0000775 0000000 0000000 00000000000 12650717014 0014401 5 ustar 00root root 0000000 0000000 python-ptk-1.3.1/doc/Makefile 0000664 0000000 0000000 00000016356 12650717014 0016054 0 ustar 00root root 0000000 0000000 # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ptk.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ptk.qhc"
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/ptk"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ptk"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
python-ptk-1.3.1/doc/make.bat 0000664 0000000 0000000 00000016124 12650717014 0016012 0 ustar 00root root 0000000 0000000 @ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
set I18NSPHINXOPTS=%SPHINXOPTS% source
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^` where ^ is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
echo. coverage to run coverage check of the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 1>NUL 2>NUL
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
:sphinx_ok
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\ptk.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\ptk.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "coverage" (
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
if errorlevel 1 exit /b 1
echo.
echo.Testing of coverage in the sources finished, look at the ^
results in %BUILDDIR%/coverage/python.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end
python-ptk-1.3.1/doc/source/ 0000775 0000000 0000000 00000000000 12650717014 0015701 5 ustar 00root root 0000000 0000000 python-ptk-1.3.1/doc/source/conf.py 0000664 0000000 0000000 00000001253 12650717014 0017201 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import sys
import os
import shlex
import six
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from ptk.meta import version, PackageInfo
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = PackageInfo.project_name
copyright = six.u('2015, %s') % PackageInfo.author_name
author = PackageInfo.author_name
release = version
language = None
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'alabaster'
html_static_path = ['_static']
htmlhelp_basename = six.u('{name}doc').format(name=author)
python-ptk-1.3.1/doc/source/index.rst 0000664 0000000 0000000 00000000660 12650717014 0017544 0 ustar 00root root 0000000 0000000 .. ptk documentation master file, created by
sphinx-quickstart on Sun Dec 20 14:14:37 2015.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to ptk's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 2
lexer
parser
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
python-ptk-1.3.1/doc/source/lexer.rst 0000664 0000000 0000000 00000004626 12650717014 0017562 0 ustar 00root root 0000000 0000000
Lexical analysis
================
.. py:decorator:: token(regex, types=None)
Decorator for token definitions in classes derived from :py:class:`LexerBase`.
:param str rx: A regular expression defining the possible token values
:param types: A list of token types that this method can recognize. If omitted, the token type is assumed to be the method's name.
:type types: List of strings
Basic usage:
.. code-block:: python
class MyLexer(ReLexer):
@token(r'[a-zA-Z_][a-zA-Z0-9_]*')
def identifier(self, tok):
pass
This will define an *identifier* token type, which value is the
recognized string. The *tok* parameter holds two attributes,
*type* and *value*. You can modify the value in place:
.. code-block:: python
class MyLexer(ReLexer):
@token(r'[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
In some cases it may be necessary to change the token's type as
well; for instance to disambiguate between identifiers that
are builtins and other ones. In order for the lexer to know which
token types can be generated, you should pass a list of strings as the
*types* parameter:
.. code-block:: python
class MyLexer(ReLexer):
@token(r'[a-zA-Z_][a-zA-Z0-9_]*', types=['builtin', 'identifier'])
def identifier_or_builtin(self, tok):
tok.type = 'builtin' if tok.value in ['len', 'id'] else 'identifier'
In this case the default value of the *type* attribute is *None*
and you **must** set it. Letting None as token type (or setting it
to None) will cause the token to be ignored.
.. note::
The type of token values depends on the type of the strings
used to define the regular expressions. Unicode expressions
will hold Unicode values, and bytes expressions will hold
bytes values.
.. note::
Disambiguation is done the regular way: if several regular
expressions match the input, the longest match is choosen. If
the matches are of equal length, the first (in source code
order) declaration wins.
.. automodule:: ptk.lexer
:members:
:member-order: bysource
.. autoclass:: ptk.async_lexer.AsyncLexer
.. py:data:: EOF
This is a singleton used to indicate end of stream. It may be used
as a token, a token type and a token value. In the first case it is
its own type and value.
python-ptk-1.3.1/doc/source/parser.rst 0000664 0000000 0000000 00000025065 12650717014 0017737 0 ustar 00root root 0000000 0000000
Syntactic analysis
==================
.. py:decorator:: production(prod, priority=None)
Use this decorator to declare a grammar production:
.. code-block:: python
class MyParser(LRParser, ReLexer):
@production('E -> E "+" E')
def sum(self):
pass
See the :ref:`production-syntax` section.
The *priority* argument may be specified to declare that the production has the same priority as an existing token type. Typical use for unary minus:
.. code-block:: python
class MyParser(LRParser, ReLexer):
# omitting productions for binary +, -, * and /
@production('E -> "-" E', priority='*')
def minus(self):
pass
You can also use a token type that has not been declared to the lexer as long as you have declared an explicit priority for it, using one of the associativity decorators:
.. code-block:: python
@leftAssoc('+', '-')
@leftAssoc('*', '/')
@nonAssoc('UNARYMINUS') # Non associative, higher priority than anything else
class MyParser(LRParser, ReLexer):
@production('E -> "-" E', priority='UNARYMINUS')
def minus(self):
pass
.. automodule:: ptk.parser
:members:
:member-order: bysource
.. autoclass:: ptk.async_parser.AsyncLRParser
.. _production-syntax:
Production syntax
-----------------
Basics
^^^^^^
The productions specified through the :py:func:`production` decorator must be specified in a variant of BNF; for example
.. code-block:: python
class Parser(LRParser, ReLexer):
@production('E -> E plus E')
def binaryop_sum(self):
pass
@production('E -> E minus E')
def binaryop_minus(self):
pass
Here non terminal symbols are uppercase and terminals (token types) are lowercase, but this is only a convention.
When you don't need separate semantic actions you can group several productions by using either the '|' symbol:
.. code-block:: python
class Parser(LRParser, ReLexer):
@production('E -> E plus E | E minus E')
def binaryop(self):
pass
Or decorating the same method several times:
.. code-block:: python
class Parser(LRParser, ReLexer):
@production('E -> E plus E')
@production('E -> E minus E')
def binaryop(self):
pass
Semantic values
^^^^^^^^^^^^^^^
The semantic value associated with a production is the return value of the decorated method. Values for items on the right side of the production are not passed to the method by default; you have to use a specific syntax to associate each item with a name, which will then be used as the name of a keyword argument passed to the method. The name must be specified between brackets after the item, for instance:
.. code-block:: python
class Parser(LRParser, ReLexer):
@production('E -> E plus E')
def sum(self, left, right):
return left + right
You can thus use alternatives and default argument values to slightly change the action's behavior depending on the actual matched production:
.. code-block:: python
class Parser(LRParser, ReLexer):
@production('SYMNAME -> identifier | identifier left_bracket identifier right_bracket')
def symname(self, value, name=None):
if name is None:
# First form, name not specified
else:
# Second form
Litteral tokens
^^^^^^^^^^^^^^^
A litteral token name may appear in a production, between double quotes. This allows you to skip declaring "simple" tokens at the lexer level.
.. code-block:: python
class Parser(LRParser, ReLexer):
@production('E -> E "+" E')
def sum(self):
pass
.. note::
Those tokens are considered "declared" after the ones explicitely declared through the :py:func:`token` decorator. This may be important because of the disambiguation rules; see the notes for the :py:func:`token` decorator.
Litteral tokens may be named as well.
Repeat operators
^^^^^^^^^^^^^^^^
A nonterminal in the right side of a production may be immediately
followed by a repeat operator among "*", "+" and "?", which have the
same meaning as in regular expressions. Note that this is only
syntactic sugar; under the hood additional productions are
generated.
.. code-block:: none
A -> B?
is equivalent to
.. code-block:: none
A ->
A -> B
The semantic value is None if the empty production was applied, or the
semantic value of B if the 'A -> B' production was applied.
.. code-block:: none
A -> B*
is equivalent to
.. code-block:: none
A ->
A -> L_B
L_B -> B
L_B -> L_B B
The semantic value is a list of semantic values for B. '+' works the
same way except for the empty production, so the list cannot be empty.
Additionally, the '*' and '+' operators may include a separator
specification, which is a symbol name or litteral token between parens:
.. code-block:: none
A -> B+("|")
is equivalent to
.. code-block:: none
A -> L_B
L_B -> B
L_B -> L_B "|" B
The semantic value is still a list of B values; there is no way to get
the values for the separators.
Wrapping it up
--------------
Fully functional parser for a four-operations integer calculator:
.. code-block:: python
@leftAssoc('+', '-')
@leftAssoc('*', '/')
class Parser(LRParser, ReLexer):
@token('[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
@production('E -> number')
def litteral(self, n):
return n
@production('E -> "-" E', priority='*')
def minus(self, val):
return -val
@production('E -> "(" E ")"')
def paren(self, val):
return val
@production('E -> E "+" E')
@production('E -> E "-" E')
@production('E -> E "*" E')
@production('E -> E "/" E')
def binaryop(self, left, op, right):
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv
}[op](left, right)
Parsing lists of integers separated by commas:
.. code-block:: python
class Parser(LRParser, ReLexer):
@token('[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
@production('LIST -> number*(",")')
def integer_list(self, values):
print('Values are: %s' % values)
Conflict resolution rules
=========================
Conflict resolution rules are the same as those used by Yacc/Bison. A shift/reduce conflict is resolved by choosing to shift. A reduce/reduce conflict is resolved by choosing the reduction associated with the first declared production. :py:func:`leftAssoc`, :py:func:`rightAssoc`, :py:func:`nonAssoc` and the *priority* argument to :py:func:`production` allows you to explicitely disambiguate.
Asynchronous lexer/parser
=========================
The :py:class:`AsyncLexer` and :py:class:`AsyncLRParser` classes allow
you to parse an input stream asynchronously. Since this uses the new
asynchronous method syntax introduced in Python 3.5, it's only
available with this version of Python.
The basic idea is that the production methods are asynchronous. Feed
the input stream one byte/char at a time by awaiting on
:py:func:`AsyncLexer.asyncFeed`. When a token has been recognized
unambiguously, this will in turn await on
:py:func:`AsyncParser.asyncNewToken`. Semantic actions may then be
awaited on as a result.
The samples directory contains the following example of an
asynchronous parser:
.. code-block:: python
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Four operations calculator, asynchronous. Due to various buffering
problems you probably won't see what's the point unless you force
stdin to be noninteractive, e.g.
$ echo '3*4+6' | python3 ./async_calc.py
"""
import six, operator, os, asyncio, sys, codecs
from ptk.lexer import token, AsyncLexer, EOF
from ptk.parser import production, leftAssoc, AsyncLRParser, ParseError
@leftAssoc('+', '-')
@leftAssoc('*', '/')
class Parser(AsyncLRParser, AsyncLexer):
async def asyncNewSentence(self, result):
six.print_('== Result:', result)
# Lexer
def ignore(self, char):
return char in [' ', '\t']
@token(r'[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
# Parser
@production('E -> "-" E', priority='*')
async def minus(self, value):
six.print_('== Neg: - %d' % value)
return -value
@production('E -> "(" E ")"')
async def paren(self, value):
return value
@production('E -> number')
async def litteral(self, number):
return number
@production('E -> E "+" E')
@production('E -> E "-" E')
@production('E -> E "*" E')
@production('E -> E "/" E')
async def binaryop(self, left, op, right):
six.print_('Binary operation: %s %s %s' % (left, op, right))
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv
}[op](left, right)
async def main():
reader = asyncio.StreamReader()
await asyncio.get_event_loop().connect_read_pipe(lambda: asyncio.StreamReaderProtocol(reader), sys.stdin)
decoder = codecs.getincrementaldecoder('utf_8')()
parser = Parser()
while True:
byte = await reader.read(1)
if not byte:
break
char = decoder.decode(byte)
if char:
if char == '\n':
char = EOF
else:
six.print_('Input char: "%s"' % repr(char))
await parser.asyncFeed(char)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
Asynchronous lexer/parser using Deferreds
=========================================
The :py:class:`DeferredLexer` and :py:class:`DeferredLRParser` work the same
as :py:class:`AsyncLexer` and :py:class:`AsyncLRParser`, but use
Twisted's Deferred objects instead of Python 3.5-like asynchronous
methods. The special methods are called :py:func:`DeferredLexer.deferNewToken`and
:py:func:`DeferredLRParser.deferNewSentence` and must return
Deferred instances. Semantic actions can return either Deferred
instances or regular values. See the defer_calc.py sample for details.
python-ptk-1.3.1/prepare.py 0000664 0000000 0000000 00000000644 12650717014 0015650 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from ptk.meta import PackageInfo
def generateReadme():
with open('README.rst.in', 'rb') as src:
contents = src.read().decode('UTF-8')
contents = contents % PackageInfo.__dict__
with open('README.rst', 'wb') as dst:
dst.write(contents.encode('UTF-8'))
def prepare():
generateReadme()
if __name__ == '__main__':
prepare()
python-ptk-1.3.1/ptk/ 0000775 0000000 0000000 00000000000 12650717014 0014432 5 ustar 00root root 0000000 0000000 python-ptk-1.3.1/ptk/__init__.py 0000664 0000000 0000000 00000000161 12650717014 0016541 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
from .meta import version, version_info
python-ptk-1.3.1/ptk/async_lexer.py 0000664 0000000 0000000 00000002053 12650717014 0017320 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
# XXXTODO: when pylint supports async, remove this...
# pylint: skip-file
from ptk.lexer import ProgressiveLexer, token, EOF, LexerError
class AsyncLexer(ProgressiveLexer):
"""
This class works like :py:class:`ProgressiveLexer` but can be feed
the input asynchronously via :py:func:`asyncFeed`. It works with
:py:class:`AsyncLRParser`.
"""
async def asyncFeed(self, char, charPos=None):
"""
Asynchronous version of :py:func:`ProgressiveLexer.feed`. This
awaits on the :py:func:`asyncNewToken` method instead of
calling 'newToken' synchronously.
"""
self._input.append((char, charPos))
while self._input:
char, charPos = self._input.pop(0)
for tok in self._feed(char, charPos):
await self.asyncNewToken(tok)
async def asyncNewToken(self, tok):
"""
Asynchronous version of py:func:`LexerBase.newToken`.
"""
raise NotImplementedError
python-ptk-1.3.1/ptk/async_parser.py 0000664 0000000 0000000 00000005706 12650717014 0017505 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
# XXXTODO: when pylint supports async, remove this...
# pylint: skip-file
from ptk.parser import production, LRParser, ProductionParser, leftAssoc, rightAssoc, nonAssoc, ParseError, _Accept, _Reduce, _Shift
class _AsyncShift(_Shift):
async def asyncDoAction(self, grammar, stack, tok):
return self.doAction(grammar, stack, tok)
class _AsyncReduce(_Reduce):
async def asyncDoAction(self, grammar, stack, tok):
callback, kwargs = self._getCallback(stack)
prodVal = await callback(grammar, **kwargs)
self._applied(grammar, stack, prodVal)
return False
class AsyncProductionParser(ProductionParser):
def _wrapCallbackNone(self, name, prod):
previous = prod.callback
async def callback(*args, **kwargs):
kwargs[name] = None
return await previous(*args, **kwargs)
prod.callback = callback
def _wrapCallbackEmpty(self, name, prod):
previous = prod.callback
async def cbEmpty(*args, **kwargs):
if name is not None:
kwargs[name] = []
return await previous(*args, **kwargs)
prod.callback = cbEmpty
def _wrapCallbackOne(self):
async def cbOne(_, item):
return [item]
return cbOne
def _wrapCallbackNext(self):
async def cbNext(_, items, item):
items.append(item)
return items
return cbNext
def asyncCallbackByName(name):
async def _wrapper(instance, *args, **kwargs):
return await getattr(instance, name)(*args, **kwargs)
return _wrapper
class AsyncLRParser(LRParser):
"""
This class works like :py:class:`LRParser` but supports
asynchronous methods (new in Python 3.5). You must use
:py:class:`AsyncLexer` in conjuction with it:
.. code-block:: python
class Parser(AsyncLRParser, AsyncLexer):
# ...
And only use :py:func:`AsyncLexer.asyncFeed` to feed it the input
stream. Semantic actions must be asynchronous methods as
well. When the start symbol is reduced, the
:py:func:`asyncNewSentence` method is awaited.
"""
async def asyncNewToken(self, tok):
try:
for action, stack in self._processToken(tok):
if await action.asyncDoAction(self, stack, tok):
break
except _Accept as exc:
self._restartParser()
await self.asyncNewSentence(exc.result)
async def asyncNewSentence(self, result):
"""
Awaited when the start symbol is reached.
"""
raise NotImplementedError
@classmethod
def _createProductionParser(cls, name, priority):
return AsyncProductionParser(asyncCallbackByName(name), priority, cls)
@classmethod
def _createShiftAction(cls, state):
return _AsyncShift(state)
@classmethod
def _createReduceAction(cls, item):
return _AsyncReduce(item)
python-ptk-1.3.1/ptk/deferred_lexer.py 0000664 0000000 0000000 00000003113 12650717014 0017761 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
from ptk.lexer import ProgressiveLexer, token, EOF, LexerError # pylint: disable=W0611
from twisted.internet.defer import Deferred
import six
class DeferredLexer(ProgressiveLexer):
"""
This class works like :py:class:`ProgressiveLexer` but can be feed
the input asynchronously via :py:func:`deferFeed`. It works with
:py:class:`DeferredLRParser`.
"""
def deferFeed(self, char, charPos=None):
"""
Asynchronous version of :py:func:`ProgressiveLexer.feed`. This
will wait for the deferred returned by
:py:func:`deferNewToken` instead of calling 'newToken'
synchronously.
"""
self._input.append((char, charPos))
d = Deferred()
def nextInput(result): # pylint: disable=W0613
if self._input:
char, charPos = self._input.pop(0)
tokens = self._feed(char, charPos)
def gotToken(result): # pylint: disable=W0613
try:
tok = six.next(tokens)
except StopIteration:
nextInput(None)
else:
self.deferNewToken(tok).addCallbacks(gotToken, d.errback)
gotToken(None)
else:
d.callback(None)
nextInput(None)
return d
def deferNewToken(self, tok):
"""
Asynchronous version of py:func:`LexerBase.newToken`. Must
return a Deferred.
"""
raise NotImplementedError
python-ptk-1.3.1/ptk/deferred_parser.py 0000664 0000000 0000000 00000006762 12650717014 0020153 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
from ptk.parser import production, LRParser, ProductionParser, leftAssoc, rightAssoc, nonAssoc, ParseError, _Accept, _Reduce, _Shift # pylint: disable=W0611
from ptk.utils import callbackByName
from twisted.internet.defer import succeed, Deferred, maybeDeferred
from twisted.python.failure import Failure
import six
class _DeferShift(_Shift):
def deferDoAction(self, grammar, stack, tok):
return succeed(self.doAction(grammar, stack, tok))
class _DeferReduce(_Reduce):
def deferDoAction(self, grammar, stack, tok): # pylint: disable=W0613
callback, kwargs = self._getCallback(stack)
d = Deferred()
def applied(prodVal):
try:
self._applied(grammar, stack, prodVal)
d.callback(False)
except Exception: # pylint: disable=W0703
d.errback(Failure())
maybeDeferred(callback, grammar, **kwargs).addCallbacks(applied, d.errback)
return d
class DeferredProductionParser(ProductionParser):
# XXXFIXME inheritance problem
def __init__(self, *args, **kwargs):
self.__class__.__prepared__ = True
super(DeferredProductionParser, self).__init__(*args, **kwargs)
def _wrapCallbackOne(self):
def cbOne(_, item):
return succeed([item])
return cbOne
def _wrapCallbackNext(self):
def cbNext(_, items, item):
items.append(item)
return succeed(items)
return cbNext
class DeferredLRParser(LRParser):
"""
This class works like :py:class:`LRParser` but supports
returning a Deferred from semantic actions. You must use
:py:class:`DeferredLexer` in conjuction with it:
.. code-block:: python
class Parser(DeferredLRParser, DeferredLexer):
# ...
And only use :py:func:`DeferredLexer.deferFeed` to feed it the input
stream. Semantic actions must return Deferred instances. When the
start symbol is reduced, the :py:func:`deferNewSentence` method is
called and must return a Deferred."""
def deferNewToken(self, tok):
d = Deferred()
actions = self._processToken(tok)
def error(reason):
if reason.check(_Accept):
self._restartParser()
self.deferNewSentence(reason.value.result).chainDeferred(d)
else:
d.errback(reason)
def nextAction(result):
if result:
d.callback(None)
return result
try:
action, stack = six.next(actions)
except StopIteration:
d.callback(None)
else:
try:
df = action.deferDoAction(self, stack, tok)
except Exception: # pylint: disable=W0703
d.errback(Failure())
else:
df.addCallback(nextAction)
df.addErrback(error)
nextAction(False)
return d
def deferNewSentence(self, result):
"""
Called when the start symbol is reached. Must return a Deferred.
"""
raise NotImplementedError
@classmethod
def _createProductionParser(cls, name, priority):
return DeferredProductionParser(callbackByName(name), priority, cls)
@classmethod
def _createShiftAction(cls, state):
return _DeferShift(state)
@classmethod
def _createReduceAction(cls, item):
return _DeferReduce(item)
python-ptk-1.3.1/ptk/grammar.py 0000664 0000000 0000000 00000017466 12650717014 0016450 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
"""
Context-free grammars objects. To define a grammar, inherit the
Grammar class and define a method decorated with 'production' for each
production.
"""
import six
import copy
import functools
import inspect
import logging
from ptk.lexer import EOF, _LexerMeta
from ptk.utils import memoize, Singleton
class Epsilon(six.with_metaclass(Singleton, object)):
"""
Empty production
"""
__reprval__ = six.u('\u03B5') if six.PY3 else six.u('(epsilon)')
class GrammarError(Exception):
"""
Generic grammar error, like duplicate production.
"""
class GrammarParseError(GrammarError):
"""
Syntax error in a production specification.
"""
@functools.total_ordering
class Production(object):
"""
Production object
"""
def __init__(self, name, callback, priority=None):
self.name = name
self.callback = callback
self.right = list()
self.__priority = priority
self.__ids = dict() # position => id
def addSymbol(self, identifier, name=None):
"""
Append a symbol to the production's right side.
"""
if name is not None:
if name in self.__ids.values():
raise GrammarParseError('Duplicate identifier name "%s"' % name)
self.__ids[len(self.right)] = name
self.right.append(identifier)
def cloned(self):
prod = Production(self.name, self.callback, self.__priority)
prod.right = list(self.right)
prod.__ids = dict(self.__ids) # pylint: disable=W0212
return prod
def apply(self, args):
kwargs = dict([(name, args[index]) for index, name in self.__ids.items()])
return self.callback, kwargs
def rightmostTerminal(self, grammar):
"""
Returns the rightmost terminal, or None if there is none
"""
for symbol in reversed(self.right):
if symbol in grammar.tokenTypes():
return symbol
def precedence(self, grammar):
"""
Returns the production's priority (specified through the
'priority' keyword argument to the 'production' decorator), or
if there is none, the priority for the rightmost terminal.
"""
if self.__priority is not None:
return grammar.terminalPrecedence(self.__priority)
symbol = self.rightmostTerminal(grammar)
if symbol is not None:
return grammar.terminalPrecedence(symbol)
def __eq__(self, other):
return (self.name, self.right) == (other.name, other.right)
def __lt__(self, other):
return (self.name, self.right) < (other.name, other.right)
def __repr__(self): # pragma: no cover
return six.u('%s -> %s') % (self.name, six.u(' ').join([repr(p) for p in self.right]) if self.right else repr(Epsilon))
def __hash__(self):
return hash((self.name, tuple(self.right)))
# Same remark as in lexer.py.
_PRODREGISTER = list()
class _GrammarMeta(_LexerMeta):
def __new__(metacls, name, bases, attrs):
global _PRODREGISTER # pylint: disable=W0603
try:
attrs['__productions__'] = list()
attrs['__precedence__'] = list()
attrs['__prepared__'] = False
klass = super(_GrammarMeta, metacls).__new__(metacls, name, bases, attrs)
for func, string, priority in _PRODREGISTER:
parser = klass._createProductionParser(func.__name__, priority) # pylint: disable=W0212
parser.parse(string)
return klass
finally:
_PRODREGISTER = list()
def production(prod, priority=None):
def _wrap(func):
if any([func.__name__ == aFunc.__name__ and func != aFunc for aFunc, _, _ in _PRODREGISTER]):
raise TypeError('Duplicate production method name "%s"' % func.__name__)
_PRODREGISTER.append((func, prod, priority))
return func
return _wrap
class Grammar(six.with_metaclass(_GrammarMeta, object)):
"""
Base class for a context-free grammar
"""
__productions__ = list() # Make pylint happy
__precedence__ = list()
__prepared__ = False
startSymbol = None
def __init__(self):
# pylint: disable=R0912
super(Grammar, self).__init__()
if not self.__prepared__:
self.prepare()
@classmethod
def prepare(cls):
cls.startSymbol = cls._defaultStartSymbol() if cls.startSymbol is None else cls.startSymbol
productions = set()
for prod in cls.productions():
if prod in productions:
raise GrammarError('Duplicate production "%s"' % prod)
productions.add(prod)
cls.__allFirsts__ = cls.__computeFirsts()
logger = logging.getLogger('Grammar')
productions = cls.productions()
maxWidth = max([len(prod.name) for prod in productions])
for prod in productions:
logger.debug('%%- %ds -> %%s' % maxWidth, prod.name, ' '.join([repr(name) for name in prod.right]) if prod.right else Epsilon) # pylint: disable=W1201
cls.__prepared__ = True
@classmethod
def __computeFirsts(cls):
allFirsts = dict([(symbol, set([symbol])) for symbol in cls.tokenTypes() | set([EOF])])
while True:
prev = copy.deepcopy(allFirsts)
for nonterminal in cls.nonterminals():
for prod in cls.productions():
if prod.name == nonterminal:
if prod.right:
for symbol in prod.right:
first = allFirsts.get(symbol, set())
allFirsts.setdefault(nonterminal, set()).update(first)
if Epsilon not in first:
break
else:
allFirsts.setdefault(nonterminal, set()).add(Epsilon)
else:
allFirsts.setdefault(nonterminal, set()).add(Epsilon)
if prev == allFirsts:
break
return allFirsts
@classmethod
def _defaultStartSymbol(cls):
return cls.productions()[0].name
@classmethod
def productions(cls):
"""
Returns all productions
"""
productions = list()
for base in inspect.getmro(cls):
if issubclass(base, Grammar):
productions.extend(base.__productions__)
return productions
@classmethod
def nonterminals(cls):
"""
Return all non-terminal symbols
"""
result = set()
for prod in cls.productions():
result.add(prod.name)
for symbol in prod.right:
if symbol not in cls.tokenTypes():
result.add(symbol)
return result
@classmethod
def precedences(cls):
precedences = list()
for base in inspect.getmro(cls):
if issubclass(base, Grammar):
precedences.extend(base.__precedence__)
return precedences
@classmethod
def terminalPrecedence(cls, symbol):
for index, (associativity, terminals) in enumerate(cls.precedences()):
if symbol in terminals:
return associativity, index
@classmethod
@memoize
def first(cls, *symbols):
"""
Returns the first set for a group of symbols
"""
first = set()
for symbol in symbols:
rfirst = cls.__allFirsts__[symbol]
first |= set([a for a in rfirst if a is not Epsilon])
if Epsilon not in rfirst:
break
else:
first.add(Epsilon)
return first
@classmethod
def tokenTypes(cls):
# Shut up pylint
return super(Grammar, cls).tokenTypes() # pylint: disable=E1101
python-ptk-1.3.1/ptk/lexer.py 0000664 0000000 0000000 00000033100 12650717014 0016120 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
import six
import inspect
import re
import collections
from ptk.regex import buildRegex, DeadState, RegexTokenizer
from ptk.utils import Singleton, callbackByName
# In Python 3 we'd use __prepare__ and an ordered dict...
_TOKREGISTER = list()
class _LexerMeta(type):
def __new__(metacls, name, bases, attrs):
global _TOKREGISTER # pylint: disable=W0603
try:
attrs['__tokens__'] = (set(), list()) # Set of token names, list of (rx, callback, defaultType)
klass = super(_LexerMeta, metacls).__new__(metacls, name, bases, attrs)
for func, rx, toktypes in _TOKREGISTER:
klass.addTokenType(func.__name__, callbackByName(func.__name__), rx, toktypes)
return klass
finally:
_TOKREGISTER = list()
def token(rx, types=None):
def _wrap(func):
if any([func.__name__ == aFunc.__name__ and func != aFunc for aFunc, _, _ in _TOKREGISTER]):
raise TypeError('Duplicate token method name "%s"' % func.__name__)
_TOKREGISTER.append((func, rx, types))
return func
return _wrap
class LexerError(Exception):
"""
Unrecognized token in input
:ivar lineno: Line in input
:ivar colno: Column in input
"""
def __init__(self, char, colno, lineno):
super(LexerError, self).__init__('Unrecognized token "%s" at line %d, column %d' % (char, lineno, colno))
self.lineno = lineno
self.colno = colno
class EOF(six.with_metaclass(Singleton, object)):
"""
End symbol
"""
__reprval__ = six.u('$')
@property
def type(self):
"""Read-only attribute for Token duck-typing"""
return self
@property
def value(self):
"""Read-only attribute for Token duck-typing"""
return self
_LexerPosition = collections.namedtuple('_LexerPosition', ['column', 'line'])
class LexerBase(six.with_metaclass(_LexerMeta, object)):
"""
This defines the interface for lexer classes. For concrete
implementations, see :py:class:`ProgressiveLexer` and
:py:class:`ReLexer`.
"""
Token = RegexTokenizer.Token
# Shut up pychecker. Those are actually set by the metaclass.
__tokens__ = ()
class _MutableToken(object):
def __init__(self, type_, value):
self.type = type_
self.value = value
def token(self):
"""Returns the unmutable equivalent"""
return EOF if EOF in [self.type, self.value] else RegexTokenizer.Token(self.type, self.value)
def __init__(self):
super(LexerBase, self).__init__()
self.restartLexer()
def restartLexer(self, resetPos=True):
if resetPos:
self.__pos = _LexerPosition(0, 1)
self._input = list()
self.__consumer = None
def position(self):
"""
:return: The current position in stream as a 2-tuple (column, line).
"""
return self.__pos
def advanceColumn(self, count=1):
"""
Advances the current position by *count* columns.
"""
col, row = self.__pos
self.__pos = _LexerPosition(col + count, row)
def advanceLine(self, count=1):
"""
Advances the current position by *count* lines.
"""
_, row = self.__pos
self.__pos = _LexerPosition(0, row + count)
@staticmethod
def ignore(char):
"""
Override this to ignore characters in input stream. The
default is to ignore spaces and tabs.
:param char: The character to test
:return: True if *char* should be ignored
"""
return char in [six.b(' '), six.u(' '), six.b('\t'), six.u('\t')]
def setConsumer(self, consumer):
"""
Sets the current consumer. A consumer is an object with a
*feed* method; all characters seen on the input stream after
the consumer is set are passed directly to it. When the *feed*
method returns a 2-tuple (type, value), the corresponding
token is generated and the consumer reset to None. This may be
handy to parse tokens that are not easily recognized by a
regular expression but easily by code; for instance the
following lexer recognizes C strings without having to use
negative lookahead:
.. code-block:: python
class MyLexer(ReLexer):
@token('"')
def cstring(self, tok):
class CString(object):
def __init__(self):
self.state = 0
self.value = StringIO.StringIO()
def feed(self, char):
if self.state == 0:
if char == '"':
return 'cstring', self.value.getvalue()
if char == '\\\\':
self.state = 1
else:
self.value.write(char)
elif self.state == 1:
self.value.write(char)
self.state = 0
self.setConsumer(CString())
"""
self.__consumer = consumer
def consumer(self):
return self.__consumer
def parse(self, string): # pragma: no cover
"""
Parses the whole *string*
"""
raise NotImplementedError
def newToken(self, tok): # pragma: no cover
"""
This method will be invoked as soon as a token is recognized on input.
:param tok: The token. This is a named tuple with *type* and *value* attributes.
"""
raise NotImplementedError
@classmethod
def addTokenType(cls, name, callback, regex, types=None):
for typeName in [name] if types is None else types:
if typeName is not EOF:
cls.__tokens__[0].add(typeName)
cls.__tokens__[1].append((regex, callback, name if types is None else None))
@classmethod
def _allTokens(cls):
tokens = (set(), list())
for base in inspect.getmro(cls):
if issubclass(base, LexerBase):
tokens[0].update(base.__tokens__[0])
tokens[1].extend(base.__tokens__[1])
return tokens
@classmethod
def tokenTypes(cls):
"""
:return: the set of all token names, as strings.
"""
return cls._allTokens()[0]
class ReLexer(LexerBase): # pylint: disable=W0223
"""
Concrete lexer based on Python regular expressions. this is
**way** faster than :py:class:`ProgressiveLexer` but it can only
tokenize whole strings.
"""
def __init__(self):
self.__regexes = list()
for rx, callback, defaultType in self._allTokens()[1]:
if six.PY2 and isinstance(rx, str) or six.PY3 and isinstance(rx, bytes):
crx = re.compile(six.b('^') + rx)
else:
crx = re.compile(six.u('^') + rx)
self.__regexes.append((crx, callback, defaultType))
super(ReLexer, self).__init__()
def parse(self, string):
pos = 0
while pos < len(string):
char = string[pos]
if char == '\n':
self.advanceLine()
else:
self.advanceColumn()
if self.consumer() is None:
if self.ignore(char):
pos += 1
continue
pos = self.__findMatch(string, pos)
else:
tok = self.consumer().feed(char)
if tok is not None:
self.setConsumer(None)
if tok[0] is not None:
self.newToken(self.Token(*tok))
pos += 1
self.newToken(EOF)
def __findMatch(self, string, pos):
match = None
matchlen = 0
for rx, callback, defaultType in self.__regexes:
mtc = rx.search(string[pos:])
if mtc:
value = mtc.group(0)
if len(value) > matchlen:
match = value, callback, defaultType
matchlen = len(value)
if match:
value, callback, defaultType = match
tok = self._MutableToken(defaultType, value)
callback(self, tok)
pos += matchlen
if self.consumer() is None and tok.type is not None:
self.newToken(tok.token())
return pos
else:
raise LexerError(string[pos:pos+10], *self.position())
class ProgressiveLexer(LexerBase): # pylint: disable=W0223
"""
Concrete lexer based on a simple pure-Python regular expression
engine. This lexer is able to tokenize an input stream in a
progressive fashion; just call the
:py:func:`ProgressiveLexer.feed` method with whatever bytes are
available when they're available. Useful for asynchronous
contexts. Starting with Python 3.5 there is also an asynchronous
version, see :py:class:`AsyncLexer`.
This is **slow as hell**.
"""
def restartLexer(self, resetPos=True):
self.__currentState = [(buildRegex(rx).start(), callback, defaultType, [0]) for rx, callback, defaultType in self._allTokens()[1]]
self.__currentMatch = list()
self.__matches = list()
self.__maxPos = 0
self.__state = 0
self._input = list()
super(ProgressiveLexer, self).restartLexer(resetPos=resetPos)
def parse(self, string):
if six.PY3 and isinstance(string, bytes):
string = [chr(c).encode('ascii') for c in string]
for char in string:
self.feed(char)
self.feed(EOF)
def feed(self, char, charPos=None):
"""
Handle a single input character. When you're finished, call
this with EOF as argument.
"""
self._input.append((char, charPos))
while self._input:
char, charPos = self._input.pop(0)
for tok in self._feed(char, charPos):
self.newToken(tok)
def _feed(self, char, charPos): # pylint: disable=R0912,R0915
if char == '\n':
self.advanceLine()
else:
self.advanceColumn()
if self.consumer() is not None:
tok = self.consumer().feed(char)
if tok is not None:
self.setConsumer(None)
if tok[0] is not None:
yield self.Token(*tok)
return
try:
if char is EOF:
if self.__state == 0:
self.restartLexer()
yield EOF
return
self.__maxPos = max(self.__maxPos, max(pos[0] for regex, callback, defaultType, pos in self.__currentState))
if self.__maxPos == 0 and self.__currentMatch:
raise LexerError(self.__currentMatch[0][0], *self.__currentMatch[0][1])
self.__matches.extend([(pos[0], callback) for regex, callback, defaultType, pos in self.__currentState if pos[0] == self.__maxPos])
self.__matches = [(pos, callback) for pos, callback in self.__matches if pos == self.__maxPos]
else:
if self.__state == 0 and self.ignore(char):
return
self.__state = 1
newState = list()
for regex, callback, defaultType, pos in self.__currentState:
try:
if regex.feed(char):
pos[0] = len(self.__currentMatch) + 1
except DeadState:
if pos[0]:
self.__matches.append((pos[0], callback))
self.__maxPos = max(self.__maxPos, pos[0])
else:
newState.append((regex, callback, defaultType, pos))
if all([regex.isDeadEnd() for regex, callback, defaultType, pos in newState]):
for regex, callback, defaultType, pos in newState:
self.__matches.append((len(self.__currentMatch) + 1, callback))
self.__maxPos = max(self.__maxPos, len(self.__currentMatch) + 1)
newState = list()
self.__matches = [(pos, callback) for pos, callback in self.__matches if pos == self.__maxPos]
self.__currentState = newState
self.__currentMatch.append((char, self.position() if charPos is None else charPos))
if self.__currentState:
return
if self.__maxPos == 0:
raise LexerError(char, *self.position())
except LexerError:
self.restartLexer()
raise
tok = self.__finalizeMatch()
if tok is not None:
yield tok
if char is EOF:
self.restartLexer()
yield EOF
def __finalizeMatch(self):
# First declared token method
matches = set([callback for _, callback in self.__matches])
match = type(self.__currentMatch[0][0])().join([char for char, pos in self.__currentMatch[:self.__maxPos]]) # byte or unicode
remain = self.__currentMatch[self.__maxPos:]
self.restartLexer(False)
self._input.extend(remain)
for _, callback, defaultType in self._allTokens()[1]:
if callback in matches:
tok = self._MutableToken(defaultType, match)
callback(self, tok)
if tok.type is None or self.consumer() is not None:
break
return tok.token()
python-ptk-1.3.1/ptk/meta.py 0000664 0000000 0000000 00000001241 12650717014 0015730 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
import six
class PackageInfo(object):
version = six.u('1.3.1')
version_info = map(int, version.split(six.u('.')))
project_name = six.u('ptk')
project_url = six.u('https://bitbucket.org/fraca7/ptk')
download_url = six.u('https://pypi.python.org/packages/source/p/ptk/ptk-%s.tar.gz') % version
author_name = six.u('J\u00E9r\u00F4me Laheurte')
author_email = six.u('jerome@jeromelaheurte.net')
short_description = six.u('LR(1) parsing framework for Python with support for asynchronous input')
version = PackageInfo.version
version_info = PackageInfo.version_info
python-ptk-1.3.1/ptk/parser.py 0000664 0000000 0000000 00000056321 12650717014 0016307 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
import six
import functools
import collections
import logging
import re
from ptk.lexer import ProgressiveLexer, EOF, token
from ptk.grammar import Grammar, Production, GrammarError
# production is only imported so that client code doesn't have to import it from grammar
from ptk.grammar import production # pylint: disable=W0611
from ptk.utils import Singleton, callbackByName
class ParseError(Exception):
"""
Syntax error when parsing.
:ivar token: The unexpected token.
"""
def __init__(self, grammar, tok, state):
self.token = tok
super(ParseError, self).__init__(six.u('Unexpected token "%s" in state "%s"') % (tok.value, sorted(state)))
self._expecting = set()
for terminal in grammar.tokenTypes():
if grammar.__actions__.get((state, terminal), None) is not None:
self._expecting.add(terminal)
def expecting(self):
"""
Returns a set of tokens types that would have been valid in input.
"""
return self._expecting
def leftAssoc(*operators):
"""
Class decorator for left associative operators. Use this to
decorate your :py:class:`Parser` class. Operators passed as
argument are assumed to have the same priority. The later you
declare associativity, the higher the priority; so the following
code
.. code-block:: python
@leftAssoc('+', '-')
@leftAssoc('*', '/')
class MyParser(LRParser):
# ...
declares '+' and '-' to be left associative, with the same
priority. '*' and '/' are also left associative, with a higher
priority than '+' and '-'.
See also the *priority* argument to :py:func:`production`.
"""
def _wrapper(cls):
cls.__precedence__.insert(0, ('left', set(operators)))
return cls
return _wrapper
def rightAssoc(*operators):
"""
Class decorator for right associative operators. Same remarks as :py:func:`leftAssoc`.
"""
def _wrapper(cls):
cls.__precedence__.insert(0, ('right', set(operators)))
return cls
return _wrapper
def nonAssoc(*operators):
"""
Class decorator for non associative operators. Same remarks as :py:func:`leftAssoc`.
"""
def _wrapper(cls):
cls.__precedence__.insert(0, ('non', set(operators)))
return cls
return _wrapper
class _StartState(six.with_metaclass(Singleton, object)):
__reprval__ = six.u('\u03A3') if six.PY3 else six.u('(START)')
class _ResolveError(Exception):
pass
@functools.total_ordering
class _Item(object):
def __init__(self, prod, dot, terminal):
self.production = prod
self.dot = dot
self.terminal = terminal
def shouldReduce(self):
"""
Returns True if the dot is in last position
"""
return self.dot == len(self.production.right)
def next(self):
"""
Returns an item with the dot advanced one position
"""
return _Item(self.production, self.dot + 1, self.terminal)
def __repr__(self):
symbols = list(self.production.right)
symbols.insert(self.dot, six.u('\u2022') if six.PY3 else six.u('.'))
return six.u('%s -> %s (%s)') % (self.production.name, six.u(' ').join([repr(sym) for sym in symbols]), self.terminal)
def __eq__(self, other):
return (self.production, self.dot, self.terminal) == (other.production, other.dot, other.terminal)
def __lt__(self, other):
return (self.production, self.dot, self.terminal) < (other.production, other.dot, other.terminal)
def __hash__(self):
return hash((self.production, self.dot, self.terminal))
class _Accept(BaseException):
def __init__(self, result):
self.result = result
super(_Accept, self).__init__()
_StackItem = collections.namedtuple('_StackItem', ['state', 'value'])
class _Shift(object):
def __init__(self, newState):
self.newState = newState
def doAction(self, grammar, stack, tok): # pylint: disable=W0613
stack.append(_StackItem(self.newState, tok.value))
return True
class _Reduce(object):
def __init__(self, item):
self.item = item
self.nargs = len(item.production.right)
def doAction(self, grammar, stack, tok): # pylint: disable=W0613
callback, kwargs = self._getCallback(stack)
self._applied(grammar, stack, callback(grammar, **kwargs))
return False
def _applied(self, grammar, stack, prodVal):
stack.append(_StackItem(grammar.goto(stack[-1].state, self.item.production.name), prodVal))
def _getCallback(self, stack):
if self.nargs:
args = [stackItem.value for stackItem in stack[-self.nargs:]]
stack[-self.nargs:] = []
else:
args = []
return self.item.production.apply(args)
class LRParser(Grammar):
"""
LR(1) parser. This class is intended to be used with a lexer class
derived from :py:class:`LexerBase`, using inheritance; it
overrides :py:func:`LexerBase.newToken` so you must inherit from
the parser first, then the lexer:
.. code-block:: python
class MyParser(LRParser, ReLexer):
# ...
"""
def __init__(self): # pylint: disable=R0914,R0912
super(LRParser, self).__init__()
self._restartParser()
def newToken(self, tok):
try:
for action, stack in self._processToken(tok):
if action.doAction(self, stack, tok):
break
except _Accept as exc:
self._restartParser()
self.newSentence(exc.result)
def _processToken(self, tok):
while True:
action = self.__actions__.get((self.__stack[-1].state, tok.type), None)
if action is None:
raise ParseError(self, tok, self.__stack[-1].state)
yield action, self.__stack
def newSentence(self, sentence): # pragma: no cover
"""
This is called when the start symbol has been reduced.
:param sentence: The value associated with the start symbol.
"""
raise NotImplementedError
@classmethod
def _createProductionParser(cls, name, priority):
return ProductionParser(callbackByName(name), priority, cls)
@classmethod
def _createReduceAction(cls, item):
return _Reduce(item)
@classmethod
def _createShiftAction(cls, state):
return _Shift(state)
@classmethod
def prepare(cls):
for prod in cls.productions():
if prod.name is _StartState:
break
else:
def acceptor(_, result):
raise _Accept(result)
prod = Production(_StartState, acceptor)
prod.addSymbol(cls._defaultStartSymbol() if cls.startSymbol is None else cls.startSymbol, name='result')
cls.__productions__.insert(0, prod)
cls.startSymbol = _StartState
super(LRParser, cls).prepare()
states, goto = cls.__computeStates(prod)
reachable = cls.__computeActions(states, goto)
logger = logging.getLogger('LRParser')
cls.__resolveConflicts(logger)
usedTokens = set([symbol for state, symbol in cls.__actions__.keys() if symbol is not EOF])
if usedTokens != cls.tokenTypes(): # pragma: no cover
logger.warning('The following tokens are not used: %s', ','.join([repr(sym) for sym in sorted(cls.tokenTypes() - usedTokens)]))
if reachable != cls.nonterminals(): # pragma: no cover
logger.warning('The following nonterminals are not reachable: %s', ','.join([repr(sym) for sym in sorted(cls.nonterminals() - reachable)]))
# Reductions only need goto entries for nonterminals
cls._goto = dict([((state, symbol), newState) for (state, symbol), newState in goto.items() if symbol not in cls.tokenTypes()])
parts = list()
if cls.nSR:
parts.append('%d shift/reduce conflicts' % cls.nSR)
if cls.nRR:
parts.append('%d reduce/reduce conflicts' % cls.nRR)
if parts:
logger.warning(', '.join(parts))
# Cast to tuple because sets are not totally ordered
for index, state in enumerate([tuple(cls._startState)] + sorted([tuple(state) for state in states if state != cls._startState])):
logger.debug('State %d', index)
for item in sorted(state):
logger.debug(' %s', item)
logger.info('%d states.', len(states))
@classmethod
def __computeStates(cls, start):
allSyms = cls.tokenTypes() | cls.nonterminals()
goto = dict()
cls._startState = frozenset([_Item(start, 0, EOF)])
states = set([cls._startState])
stack = [cls._startState]
while stack:
state = stack.pop()
stateClosure = cls.__itemSetClosure(state)
for symbol in allSyms:
# Compute goto(symbol, state)
nextState = set()
for item in stateClosure:
if not item.shouldReduce() and item.production.right[item.dot] == symbol:
nextState.add(item.next())
if nextState:
nextState = frozenset(nextState)
goto[(state, symbol)] = nextState
if nextState not in states:
states.add(nextState)
stack.append(nextState)
return states, goto
@classmethod
def __computeActions(cls, states, goto):
cls.__actions__ = dict()
reachable = set()
for state in states:
for item in cls.__itemSetClosure(state):
if item.shouldReduce():
action = cls._createReduceAction(item)
reachable.add(item.production.name)
cls.__addReduceAction(state, item.terminal, action)
else:
symbol = item.production.right[item.dot]
if symbol in cls.tokenTypes():
cls.__addShiftAction(state, symbol, cls._createShiftAction(goto[(state, symbol)]))
return reachable
@classmethod
def __shouldPreferShift(cls, logger, reduceAction, symbol):
logger.info('Shift/reduce conflict for "%s" on "%s"', reduceAction.item, symbol)
prodPrecedence = reduceAction.item.production.precedence(cls)
tokenPrecedence = cls.terminalPrecedence(symbol)
# If both precedences are specified, use priority/associativity
if prodPrecedence is not None and tokenPrecedence is not None:
prodAssoc, prodPrio = prodPrecedence
tokenAssoc, tokenPrio = tokenPrecedence
if prodPrio > tokenPrio:
logger.info('Resolving in favor of reduction because of priority')
return False
if prodPrio < tokenPrio:
logger.info('Resolving in favor of shift because of priority')
return True
if prodAssoc == tokenAssoc:
if prodAssoc == 'non':
logger.info('Resolving in favor of error because of associativity')
raise _ResolveError()
if prodAssoc == 'left':
logger.info('Resolving in favor of reduction because of associativity')
return False
logger.info('Resolving in favor of shift because of associativity')
return True
# At least one of those is not specified; use shift
logger.warning('Could not disambiguate shift/reduce conflict for "%s" on "%s"; using shift' % (reduceAction.item, symbol))
cls.nSR += 1
return True
@classmethod
def __resolveConflicts(cls, logger):
cls.nSR = 0
cls.nRR = 0
for (state, symbol), actions in sorted(cls.__actions__.items()):
action = actions.pop()
while actions:
conflicting = actions.pop()
try:
action = cls.__resolveConflict(logger, action, conflicting, symbol)
except _ResolveError:
del cls.__actions__[(state, symbol)]
break
else:
cls.__actions__[(state, symbol)] = action
@classmethod
def __resolveConflict(cls, logger, action1, action2, symbol):
if isinstance(action2, _Shift):
action1, action2 = action2, action1
if isinstance(action1, _Shift):
# Shift/reduce
return action1 if cls.__shouldPreferShift(logger, action2, symbol) else action2
# Reduce/reduce
logger.warning('Reduce/reduce conflict between "%s" and "%s"', action1.item, action2.item)
cls.nRR += 1
# Use the first one to be declared
for prod in cls.productions():
if prod == action1.item.production:
logger.warning('Using "%s', prod)
return action1
if prod == action2.item.production:
logger.warning('Using "%s', prod)
return action2
@classmethod
def __addReduceAction(cls, state, symbol, action):
cls.__actions__.setdefault((state, symbol), list()).append(action)
@classmethod
def __addShiftAction(cls, state, symbol, action):
for existing in cls.__actions__.get((state, symbol), list()):
if isinstance(existing, _Shift):
return
cls.__actions__.setdefault((state, symbol), list()).append(action)
@classmethod
def goto(cls, state, symbol):
return cls._goto[(state, symbol)]
def _restartParser(self):
self.__stack = [_StackItem(self._startState, None)]
@classmethod
def __itemSetClosure(cls, items):
result = set(items)
while True:
prev = set(result)
for item in [item for item in result if not item.shouldReduce()]:
symbol = item.production.right[item.dot]
if symbol not in cls.tokenTypes():
terminals = cls.first(*tuple(item.production.right[item.dot + 1:] + [item.terminal]))
for prod in (prod for prod in cls.productions() if prod.name == symbol):
for terminal in terminals:
result.add(_Item(prod, 0, terminal))
if prev == result:
break
return result
class ProductionParser(LRParser, ProgressiveLexer): # pylint: disable=R0904
# pylint: disable=C0111,C0103,R0201
def __init__(self, callback, priority, grammarClass): # pylint: disable=R0915
self.callback = callback
self.priority = priority
self.grammarClass = grammarClass
super(ProductionParser, self).__init__()
@classmethod
def prepare(cls, **kwargs): # pylint: disable=R0915
# Obviously cannot use @production here
# DECL -> identifier "->" PRODS
prod = Production('DECL', cls.DECL)
prod.addSymbol('identifier', 'name')
prod.addSymbol('arrow')
prod.addSymbol('PRODS', 'prods')
cls.__productions__.append(prod)
# PRODS -> P
prod = Production('PRODS', cls.PRODS1)
prod.addSymbol('P', 'prodlist')
cls.__productions__.append(prod)
# PRODS -> PRODS "|" P
prod = Production('PRODS', cls.PRODS2)
prod.addSymbol('PRODS', 'prods')
prod.addSymbol('union')
prod.addSymbol('P', 'prodlist')
cls.__productions__.append(prod)
# P -> P SYM
prod = Production('P', cls.P1)
prod.addSymbol('P', 'prodlist')
prod.addSymbol('SYM', 'sym')
cls.__productions__.append(prod)
# P -> ɛ
prod = Production('P', cls.P2)
cls.__productions__.append(prod)
# SYM -> SYMNAME PROPERTIES
prod = Production('SYM', cls.SYM)
prod.addSymbol('SYMNAME', 'symname')
prod.addSymbol('PROPERTIES', 'properties')
cls.__productions__.append(prod)
# SYM -> SYMNAME repeat PROPERTIES
prod = Production('SYM', cls.SYMREP)
prod.addSymbol('SYMNAME', 'symname')
prod.addSymbol('repeat', 'repeat')
prod.addSymbol('PROPERTIES', 'properties')
cls.__productions__.append(prod)
# SYM -> SYMNAME repeat lparen identifier rparen PROPERTIES
prod = Production('SYM', cls.SYMREP)
prod.addSymbol('SYMNAME', 'symname')
prod.addSymbol('repeat', 'repeat')
prod.addSymbol('lparen')
prod.addSymbol('identifier', 'separator')
prod.addSymbol('rparen')
prod.addSymbol('PROPERTIES', 'properties')
cls.__productions__.append(prod)
# SYM -> SYMNAME repeat lparen litteral rparen PROPERTIES
prod = Production('SYM', cls.SYMREP_LIT)
prod.addSymbol('SYMNAME', 'symname')
prod.addSymbol('repeat', 'repeat')
prod.addSymbol('lparen')
prod.addSymbol('litteral', 'separator')
prod.addSymbol('rparen')
prod.addSymbol('PROPERTIES', 'properties')
cls.__productions__.append(prod)
# SYMNAME -> identifier
prod = Production('SYMNAME', cls.SYMNAME1)
prod.addSymbol('identifier', 'identifier')
cls.__productions__.append(prod)
# SYMNAME -> litteral
prod = Production('SYMNAME', cls.SYMNAME2)
prod.addSymbol('litteral', 'litteral')
cls.__productions__.append(prod)
# PROPERTIES -> ɛ
prod = Production('PROPERTIES', cls.PROPERTIES1)
cls.__productions__.append(prod)
# PROPERTIES -> lchev identifier rchev
prod = Production('PROPERTIES', cls.PROPERTIES2)
prod.addSymbol('lchev')
prod.addSymbol('identifier', 'name')
prod.addSymbol('rchev')
cls.__productions__.append(prod)
super(ProductionParser, cls).prepare(**kwargs)
def newSentence(self, startSymbol):
name, prods = startSymbol
for prod in prods:
if prod.name is None:
prod.name = name
self.grammarClass.__productions__.extend(prods)
# Lexer
@staticmethod
def ignore(char):
return char in ' \t\n'
@token('->')
def arrow(self, tok):
pass
@token('<')
def lchev(self, tok):
pass
@token('>')
def rchev(self, tok):
pass
@token(r'\|')
def union(self, tok):
pass
@token(r'\*|\+|\?')
def repeat(self, tok):
pass
@token(r'\(')
def lparen(self, tok):
pass
@token(r'\)')
def rparen(self, tok):
pass
@token('[a-zA-Z_][a-zA-Z0-9_]*')
def identifier(self, tok):
pass
@token(r'"|\'')
def litteral(self, tok):
class StringBuilder(object):
def __init__(self, quotetype):
self.quotetype = quotetype
self.chars = list()
self.state = 0
def feed(self, char):
if self.state == 0:
if char == '\\':
self.state = 1
elif char == self.quotetype:
return 'litteral', ''.join(self.chars)
else:
self.chars.append(char)
elif self.state == 1:
self.chars.append(char)
self.state = 0
self.setConsumer(StringBuilder(tok.value))
# Parser
def DECL(self, name, prods):
if name in self.grammarClass.tokenTypes():
raise GrammarError('"%s" is a token name and cannot be used as non-terminal' % name)
return (name, prods)
def PRODS1(self, prodlist):
return prodlist
def PRODS2(self, prods, prodlist):
prods.extend(prodlist)
return prods
def P1(self, sym, prodlist):
result = list()
symbol, properties, repeat, sep = sym
for prod in prodlist:
if prod.name is None:
if repeat is None:
prod.addSymbol(symbol, name=properties.get('name', None))
result.append(prod)
elif repeat == '?':
if sep is not None:
raise GrammarError('A separator makes no sense for "?"')
self.__addAtMostOne(result, prod, symbol, properties.get('name', None))
elif repeat in ['*', '+']:
self.__addList(result, prod, symbol, properties.get('name', None), repeat == '*', sep)
else:
result.append(prod)
return result
def __addAtMostOne(self, productions, prod, symbol, name):
clone = prod.cloned()
if name is not None:
self._wrapCallbackNone(name, clone)
productions.append(clone)
prod.addSymbol(symbol, name=name)
productions.append(prod)
def _wrapCallbackNone(self, name, prod):
previous = prod.callback
def callback(*args, **kwargs):
kwargs[name] = None
return previous(*args, **kwargs)
prod.callback = callback
def __addList(self, productions, prod, symbol, name, allowEmpty, sep):
class ListSymbol(six.with_metaclass(Singleton, object)):
__reprval__ = six.u('List(%s, "%s")') % (symbol, six.u('*') if allowEmpty else six.u('+'))
if allowEmpty:
clone = prod.cloned()
self._wrapCallbackEmpty(name, clone)
productions.append(clone)
prod.addSymbol(ListSymbol, name=name)
productions.append(prod)
listProd = Production(ListSymbol, self._wrapCallbackOne())
listProd.addSymbol(symbol, name='item')
productions.append(listProd)
listProd = Production(ListSymbol, self._wrapCallbackNext())
listProd.addSymbol(ListSymbol, name='items')
if sep is not None:
listProd.addSymbol(sep)
listProd.addSymbol(symbol, name='item')
productions.append(listProd)
def _wrapCallbackEmpty(self, name, prod):
previous = prod.callback
def cbEmpty(*args, **kwargs):
if name is not None:
kwargs[name] = []
return previous(*args, **kwargs)
prod.callback = cbEmpty
def _wrapCallbackOne(self):
def cbOne(_, item):
return [item]
return cbOne
def _wrapCallbackNext(self):
def cbNext(_, items, item):
items.append(item)
return items
return cbNext
def P2(self):
# 'name' is replaced in newSentence()
return [Production(None, self.callback, priority=self.priority)]
def SYMNAME1(self, identifier):
return identifier
def SYMNAME2(self, litteral):
name = litteral
if name not in self.grammarClass.tokenTypes():
self.grammarClass.addTokenType(name, lambda s, tok: None, re.escape(name), None)
return name
def SYM(self, symname, properties):
return (symname, properties, None, None)
def SYMREP(self, symname, repeat, properties, separator=None):
return (symname, properties, repeat, separator)
def SYMREP_LIT(self, symname, repeat, properties, separator):
if separator not in self.grammarClass.tokenTypes():
self.grammarClass.addTokenType(separator, lambda s, tok: None, re.escape(separator), None)
return self.SYMREP(symname, repeat, properties, separator)
def PROPERTIES1(self):
return dict()
def PROPERTIES2(self, name):
return dict(name=name)
python-ptk-1.3.1/ptk/regex.py 0000664 0000000 0000000 00000045117 12650717014 0016126 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
"""
Simple regular expression engine used by ProgressiveParser.
"""
import six
import re
import collections
#===============================================================================
# Regex objects
class _State(object):
pass
class DeadState(Exception):
"""
Raised when the FSA reaches a dead state
"""
class RegularExpression(object):
"""
Regular expression object (non-deterministic FSA)
"""
def __init__(self):
self._transitions = list() # of (startState, class/None, endState)
self._startState = _State()
self._finalState = _State()
self._currentState = None
self._startStates = set()
def clone(self):
"""
Returns a clone of this object. Used by concat() and union()
so that if you pass several times the same expression states
don't get all mixed up...
"""
# pylint: disable=W0212
result = RegularExpression()
for startState, trans, endState in self._transitions:
if startState is self._startState:
startState = result._startState
if startState is self._finalState:
startState = result._finalState
if endState is self._startState:
endState = result._startState
if endState is self._finalState:
endState = result._finalState
result._transitions.append((startState, trans, endState))
result._startStates = set([state for state, _, _ in result._transitions])
return result
@staticmethod
def fromClass(klass):
"""
Builds a regular expression from a CharacterClass instance
"""
# pylint: disable=W0212
result = RegularExpression()
result._transitions = [(result._startState, klass, result._finalState)]
result._startStates = set([state for state, _, _ in result._transitions])
return result
@staticmethod
def concat(*rxs):
"""
Builds the concatenation of several RegularExpression instances
"""
# pylint: disable=W0212
rxs = [rx.clone() for rx in rxs]
result = RegularExpression()
if rxs:
result._startState = rxs[0]._startState
result._transitions = list(rxs[0]._transitions)
for rx1, rx2 in zip(rxs[:-1], rxs[1:]):
for startState, trans, endState in rx2._transitions:
if startState is rx2._startState:
startState = rx1._finalState
if endState is rx2._startState:
endState = rx1._finalState
result._transitions.append((startState, trans, endState))
result._finalState = rxs[-1]._finalState
else:
result._transitions = [(result._startState, None, result._finalState)]
result._startStates = set([state for state, _, _ in result._transitions])
return result
@staticmethod
def union(*rxs):
"""
Builds the union of several RegularExpression instances
"""
# pylint: disable=W0212
result = RegularExpression()
for rx in [rx.clone() for rx in rxs]:
result._transitions.extend(rx._transitions)
result._transitions.append((result._startState, None, rx._startState))
result._transitions.append((rx._finalState, None, result._finalState))
result._startStates = set([state for state, _, _ in result._transitions])
return result
@staticmethod
def kleene(rx):
"""
Kleene closure
"""
# pylint: disable=W0212
result = RegularExpression()
result._transitions = list(rx._transitions)
result._transitions.append((result._startState, None, result._finalState))
result._transitions.append((rx._finalState, None, rx._startState))
result._transitions.append((result._startState, None, rx._startState))
result._transitions.append((rx._finalState, None, result._finalState))
result._startStates = set([state for state, _, _ in result._transitions])
return result
@staticmethod
def exponent(rx, minCount, maxCount=None):
"""
Arbitrary exponent
"""
if maxCount is None:
return RegularExpression.concat(
RegularExpression.exponent(rx, minCount, minCount),
RegularExpression.kleene(rx)
)
else:
return RegularExpression.union(*tuple(
[RegularExpression.concat(*tuple([rx for _ in range(count)])) for count in range(minCount, maxCount+1)]))
# Matching
def _epsilonClose(self, states):
newStates = set(states) # Copy
while True:
added = False
for state in set(newStates):
for startState, trans, endState in self._transitions:
if startState == state and trans is None and endState not in newStates:
newStates.add(endState)
added = True
if not added:
break
return newStates
def _closure(self, states, char):
newStates = set()
for startState, trans, endState in self._transitions:
if trans is not None and startState in states and char in trans:
newStates.add(endState)
return newStates
def start(self):
"""
Resets the internal state to the start state
"""
self._currentState = self._epsilonClose(set([self._startState]))
return self
def feed(self, char):
"""
Advance the state according to char
"""
self._currentState = self._epsilonClose(self._closure(self._currentState, char))
if not self._currentState:
raise DeadState()
return self._finalState in self._currentState
def isDeadEnd(self):
"""
Checks if the current state is a dead end, i.e. if there are no outgoing transitions from it
"""
return self._currentState and all([state not in self._startStates for state in self._currentState])
def match(self, string):
"""
Match a whole string
"""
if six.PY3 and isinstance(string, bytes):
string = [chr(c).encode('ascii') for c in string]
self.start()
try:
for char in string:
self.feed(char)
return self._finalState in self._currentState
except DeadState:
return False
#===============================================================================
# Lexing
class TokenizeError(Exception):
"""Tokenization error in a regular expression"""
class BackslashAtEndOfInputError(TokenizeError):
"""Escape character at end of input"""
class UnterminatedClassError(TokenizeError):
"""Character class not ended"""
class InvalidClassError(TokenizeError):
"""Invalid class, e.g. z-a"""
class InvalidExponentError(TokenizeError):
"""Invalid exponent value"""
class CharacterClass(object): # pylint: disable=R0903
"""Base class for character classes"""
def __contains__(self, char): # pragma: no cover
raise NotImplementedError
class AnyCharacterClass(CharacterClass):
"""The ."""
def __contains__(self, char):
return char not in [six.u('\n'), six.b('\n')]
def __eq__(self, other):
return isinstance(other, AnyCharacterClass)
class RegexCharacterClass(CharacterClass): # pylint: disable=R0903
"""
Use an actual regex; for character classes
"""
_cache = dict()
def __init__(self, pattern):
if pattern not in self._cache:
try:
flags = 0
if six.PY2 and isinstance(pattern, unicode): # pylint: disable=E0602
flags = re.UNICODE
self._cache[pattern] = re.compile(pattern, flags)
except re.error as exc:
raise InvalidClassError(str(exc))
self._rx = self._cache[pattern]
def __eq__(self, other): # pragma: no cover
return self is other # Because of cache
def __contains__(self, char):
return self._rx.match(char) is not None
class LitteralCharacterClass(CharacterClass): # pylint: disable=R0903
"""
Single char
"""
def __init__(self, char):
self._char = char
def __eq__(self, other):
return type(self) is type(other) and self._char == other._char # pylint: disable=W0212
def __contains__(self, char):
return char == self._char
def __repr__(self):
return repr(self._char)
ExponentToken = collections.namedtuple('ExponentToken', ['minCount', 'maxCount'])
class RegexTokenizer(object): # pylint: disable=R0903
"""
Tokenization of regular expressions. Actually, this does a bit
more than plain tokenization; it also handles special cases for
character classes and exponentiation.
"""
TOK_CLASS = 1
TOK_EXPONENT = 2
TOK_LPAREN = 3
TOK_RPAREN = 4
TOK_UNION = 5
Token = collections.namedtuple('Token', ['type', 'value'])
def __init__(self, regex):
if six.PY3 and isinstance(regex, bytes):
self._stack = [chr(c).encode('ascii') for c in reversed(regex)]
else:
self._stack = list(reversed(regex))
self._state = 0
self._currentClass = None
self._exponentValue = 0
self._startExponent = None
# Six.[ub] behaves badly with strings such as '\\'
if six.PY2:
self.ubackslash = '\\'.decode('ascii')
self.bbackslash = '\\'
else:
self.ubackslash = '\\'
self.bbackslash = '\\'.encode('ascii')
def tokens(self):
"""
Returns a list of tokens for the regex passed to the
constructor. Items are 2-tuples (type, value) where 'type' is
one of the TOK_* constants.
"""
tokenList = list()
while self._stack:
char = self._stack.pop()
state = getattr(self, '_state%d' % self._state)
state = state(char, tokenList)
self._state = state if state is not None else self._state
if self._state == 1:
raise BackslashAtEndOfInputError('Backslash at end of string')
if 2 <= self._state <= 8:
raise UnterminatedClassError('Unterminated character class')
if 9 <= self._state <= 12:
raise InvalidExponentError('Unterminated {')
return tokenList
# "Normal" state
def _state0(self, char, tokenList):
# Normal state
if char in [six.u('*'), six.b('*')]:
tokenList.append(self.Token(self.TOK_EXPONENT, ExponentToken(0, None)))
elif char in [six.u('+'), six.b('+')]:
tokenList.append(self.Token(self.TOK_EXPONENT, ExponentToken(1, None)))
elif char in [six.u('.'), six.b('.')]:
tokenList.append(self.Token(self.TOK_CLASS, AnyCharacterClass()))
elif char in [six.u('('), six.b('(')]:
tokenList.append(self.Token(self.TOK_LPAREN, char))
elif char in [six.u(')'), six.b(')')]:
tokenList.append(self.Token(self.TOK_RPAREN, char))
elif char in [six.u('|'), six.b('|')]:
tokenList.append(self.Token(self.TOK_UNION, char))
elif char == six.u('['):
self._currentClass = six.StringIO()
self._currentClass.write(char)
return 2
elif char == six.b('['):
self._currentClass = six.BytesIO()
self._currentClass.write(char)
return 2
elif char in [six.u('{'), six.b('{')]:
return 9
elif char in [six.u(']'), six.b(']'), six.u('}'), six.b('}')]:
raise TokenizeError('Unexpected token "%s"' % char)
elif char in [self.ubackslash, self.bbackslash]:
return 1
else:
tokenList.append(self.Token(self.TOK_CLASS, LitteralCharacterClass(char)))
def _state1(self, char, tokenList):
# After a "\" in normal state
if char in [six.u('d'), six.u('s'), six.u('w'), six.u('D'), six.u('S'), six.u('W')]:
tokenList.append(self.Token(self.TOK_CLASS, RegexCharacterClass(self.ubackslash + char)))
elif char in [six.b('d'), six.b('s'), six.b('w'), six.b('D'), six.b('S'), six.b('W')]:
tokenList.append(self.Token(self.TOK_CLASS, RegexCharacterClass(self.bbackslash + char)))
elif char == six.u('n'):
tokenList.append(self.Token(self.TOK_CLASS, LitteralCharacterClass(six.u('\n'))))
elif char == six.u('t'):
tokenList.append(self.Token(self.TOK_CLASS, LitteralCharacterClass(six.u('\t'))))
elif char == six.b('n'):
tokenList.append(self.Token(self.TOK_CLASS, LitteralCharacterClass(six.b('\n'))))
elif char == six.b('t'):
tokenList.append(self.Token(self.TOK_CLASS, LitteralCharacterClass(six.b('\t'))))
else:
tokenList.append(self.Token(self.TOK_CLASS, LitteralCharacterClass(char)))
return 0
# Character classes
def _state2(self, char, tokenList):
# In character class
if char in [self.ubackslash, self.bbackslash]:
return 3
if char in [six.u(']'), six.b(']')]:
self._currentClass.write(char)
tokenList.append(self.Token(self.TOK_CLASS, RegexCharacterClass(self._currentClass.getvalue())))
self._currentClass = None
return 0
self._currentClass.write(char)
def _state3(self, char, tokenList): # pylint: disable=W0613
# After "\" in character class
if six.PY2 and isinstance(char, str):
self._currentClass.write(self.bbackslash + char)
else:
self._currentClass.write(self.ubackslash + char)
return 2
# Exponent
def _state9(self, char, tokenList): # pylint: disable=W0613
# Start of exponent
if not char.isdigit():
raise InvalidExponentError('Exponent not starting with a number')
self._exponentValue = int(char)
return 10
def _state10(self, char, tokenList):
# In exponent, computing start value
if char in [six.u('-'), six.b('-')]:
self._startExponent = self._exponentValue
return 11
elif char in [six.u('}'), six.b('}')]:
tokenList.append(self.Token(self.TOK_EXPONENT, ExponentToken(self._exponentValue, self._exponentValue)))
return 0
elif char.isdigit():
self._exponentValue *= 10
self._exponentValue += int(char)
else:
raise InvalidExponentError('Invalid character "%s"' % char)
def _state11(self, char, tokenList): # pylint: disable=W0613
# In exponent, expecting second term of interval
if char in [six.u('}'), six.b('}')]:
raise InvalidExponentError('Missing range end')
if not char.isdigit():
raise InvalidExponentError('Invalid character "%s"' % char)
self._exponentValue = int(char)
return 12
def _state12(self, char, tokenList):
# In exponent, computing end value
if char in [six.u('}'), six.b('}')]:
if self._startExponent > self._exponentValue:
raise InvalidExponentError('Invalid exponent range %d-%d' % (self._startExponent, self._exponentValue))
tokenList.append(self.Token(self.TOK_EXPONENT, ExponentToken(self._startExponent, self._exponentValue)))
return 0
if not char.isdigit():
raise InvalidExponentError('Invalid character "%s"' % char)
self._exponentValue *= 10
self._exponentValue += int(char)
#===============================================================================
# Parsing
class RegexParseError(Exception):
"""
Regular expression parse error
"""
class RegexParser(object):
"""
Actual parsing of regular expression strings
"""
def parse(self, tokens):
"""
Well, duh
"""
tokens = list(tokens)
expr, pos = self._parse_E1(tokens, 0)
if len(tokens) != pos:
raise RegexParseError('Unexpected token "%s"' % tokens[pos].value)
return expr
def _parse_E1(self, tokens, pos):
expr, pos = self._parse_E2(tokens, pos)
return self._parse_R1(expr, tokens, pos)
def _parse_R1(self, left, tokens, pos):
while pos != len(tokens) and tokens[pos].type == RegexTokenizer.TOK_UNION:
expr, pos = self._parse_E2(tokens, pos + 1)
left = self.union(left, expr)
return left, pos
def _parse_E2(self, tokens, pos):
expr, pos = self._parse_E3(tokens, pos)
return self._parse_R2(expr, tokens, pos)
def _parse_R2(self, left, tokens, pos):
while True:
try:
tempExpr, tempPos = self._parse_E3(tokens, pos)
self._parse_R2(tempExpr, tokens, tempPos)
except RegexParseError:
break
expr, pos = self._parse_E3(tokens, pos)
left = self.concat(left, expr)
return left, pos
def _parse_E3(self, tokens, pos):
expr, pos = self._parse_E(tokens, pos)
return self._parse_R3(expr, tokens, pos)
def _parse_R3(self, left, tokens, pos):
while pos != len(tokens) and tokens[pos].type == RegexTokenizer.TOK_EXPONENT:
left = self.exponent(left, tokens[pos].value)
pos += 1
return left, pos
def _parse_E(self, tokens, pos):
if pos == len(tokens):
raise RegexParseError('Expected "(" or letter')
if tokens[pos].type == RegexTokenizer.TOK_LPAREN:
expr, pos = self._parse_E1(tokens, pos + 1)
if pos == len(tokens) or tokens[pos].type != RegexTokenizer.TOK_RPAREN:
raise RegexParseError('Missing ")"')
return expr, pos + 1
elif tokens[pos].type == RegexTokenizer.TOK_CLASS:
return self.klass(tokens[pos].value), pos + 1
raise RegexParseError('Unexpected token "%s"' % tokens[pos].value)
# Delegate
def union(self, rx1, rx2): # pylint: disable=C0111,R0201
return RegularExpression.union(rx1, rx2)
def concat(self, rx1, rx2): # pylint: disable=C0111,R0201
return RegularExpression.concat(rx1, rx2)
def exponent(self, rx, exp): # pylint: disable=C0111,R0201
return RegularExpression.exponent(rx, exp.minCount, exp.maxCount)
def klass(self, charClass): # pylint: disable=C0111,R0201
return RegularExpression.fromClass(charClass)
def buildRegex(rx):
"""
Shortcut to build a RegularExpression object from a string
"""
return RegexParser().parse(RegexTokenizer(rx).tokens())
python-ptk-1.3.1/ptk/utils.py 0000664 0000000 0000000 00000002206 12650717014 0016144 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
"""
Miscellaneous utilities.
"""
import functools
def memoize(func):
"""
Memoization of an arbitrary function
"""
cache = dict()
@functools.wraps(func)
def _wrapper(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return _wrapper
class Singleton(type):
"""
Singleton metaclass
"""
def __new__(metacls, name, bases, attrs):
# pylint: disable=C0103
cls = type.__new__(metacls, name, bases, attrs)
cls.__eq__ = lambda self, other: other is self
cls.__lt__ = lambda self, other: not other is self
cls.__copy__ = lambda self: self
cls.__deepcopy__ = lambda self, memo: self
cls.__repr__ = lambda self: self.__reprval__
cls.__len__ = lambda self: len(self.__reprval__)
cls.__hash__ = lambda self: hash(id(self))
return functools.total_ordering(cls)()
def callbackByName(funcName):
def _wrapper(instance, *args, **kwargs):
return getattr(instance, funcName)(*args, **kwargs)
return _wrapper
python-ptk-1.3.1/pylintrc 0000664 0000000 0000000 00000004176 12650717014 0015433 0 ustar 00root root 0000000 0000000 [MASTER]
ignore=.hg
persistent=yes
unsafe-load-any-extension=no
[MESSAGES CONTROL]
disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,I,R0903,C0111,C0112
[REPORTS]
output-format=html
files-output=no
reports=yes
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
max-nested-blocks=7
[BASIC]
bad-functions=filter,input
good-names=i,j,k,ex,Run,_,rx,d,df
function-rgx=[a-z_][a-zA-Z0-9_]{2,30}$
variable-rgx=[a-z_][a-zA-Z0-9_]{2,30}$
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$|version|version_info
attr-rgx=__productions__|[a-z_]_?[a-zA-Z0-9]{2,30}$
argument-rgx=[a-z_][a-zA-Z0-9_]{2,30}$
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
class-rgx=[A-Z_][a-zA-Z0-9]+$
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
method-rgx=[a-z_][a-z0-9A-Z_][a-z0-9A-Z_]{2,30}$
[FORMAT]
max-line-length=200
max-module-lines=1000
indent-string=' '
indent-after-paren=4
[LOGGING]
logging-modules=logging
[MISCELLANEOUS]
notes=FIXME,XXX,TODO
[SIMILARITIES]
min-similarity-lines=4
ignore-comments=yes
ignore-docstrings=yes
ignore-imports=no
[TYPECHECK]
ignore-mixin-members=yes
[VARIABLES]
init-import=no
dummy-variables-rgx=_$|dummy
[CLASSES]
defining-attr-methods=__init__,__new__,setUp,restartLexer
valid-classmethod-first-arg=cls
valid-metaclass-classmethod-first-arg=metacls
[DESIGN]
max-args=7
ignored-argument-names=_.*
max-locals=15
max-returns=6
max-branches=12
max-statements=50
max-parents=7
max-attributes=7
min-public-methods=2
max-public-methods=20
[EXCEPTIONS]
overgeneral-exceptions=Exception
python-ptk-1.3.1/samples/ 0000775 0000000 0000000 00000000000 12650717014 0015300 5 ustar 00root root 0000000 0000000 python-ptk-1.3.1/samples/async_calc.py 0000664 0000000 0000000 00000004325 12650717014 0017755 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Four operations calculator, asynchronous. Due to various buffering
problems you probably won't see what's the point unless you force
stdin to be noninteractive, e.g.
$ echo '3*4+6' | python3 ./async_calc.py
"""
import six, operator, os, asyncio, sys, codecs
from ptk.async_lexer import token, AsyncLexer, EOF
from ptk.async_parser import production, leftAssoc, AsyncLRParser, ParseError
@leftAssoc('+', '-')
@leftAssoc('*', '/')
class Parser(AsyncLRParser, AsyncLexer):
async def asyncNewSentence(self, result):
six.print_('== Result:', result)
# Lexer
def ignore(self, char):
return char in [' ', '\t']
@token(r'[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
# Parser
@production('E -> "-" E', priority='*')
async def minus(self, value):
six.print_('== Neg: - %d' % value)
return -value
@production('E -> "(" E ")"')
async def paren(self, value):
return value
@production('E -> number')
async def litteral(self, number):
return number
@production('E -> E "+" E')
@production('E -> E "-" E')
@production('E -> E "*" E')
@production('E -> E "/" E')
async def binaryop(self, left, op, right):
six.print_('Binary operation: %s %s %s' % (left, op, right))
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv
}[op](left, right)
async def main():
reader = asyncio.StreamReader()
await asyncio.get_event_loop().connect_read_pipe(lambda: asyncio.StreamReaderProtocol(reader), sys.stdin)
decoder = codecs.getincrementaldecoder('utf_8')()
parser = Parser()
while True:
byte = await reader.read(1)
if not byte:
break
char = decoder.decode(byte)
if char:
if char == '\n':
char = EOF
else:
six.print_('Input char: "%s"' % repr(char))
await parser.asyncFeed(char)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
python-ptk-1.3.1/samples/calc.py 0000664 0000000 0000000 00000003632 12650717014 0016560 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Simple four operations calculator.
"""
import six, operator
from ptk.lexer import ReLexer, token
from ptk.parser import LRParser, leftAssoc, production, ParseError
@leftAssoc('+', '-')
@leftAssoc('*', '/')
class SimpleCalc(LRParser, ReLexer):
def newSentence(self, result):
six.print_('== Result:', result)
# Lexer
def ignore(self, char):
return char in [' ', '\t']
@token(r'[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
# Parser
@production('E -> "-" E', priority='*')
def minus(self, value):
six.print_('== Neg: - %d' % value)
return -value
@production('E -> "(" E ")"')
def paren(self, value):
return value
@production('E -> number')
def litteral(self, number):
return number
@production('E -> E "+" E')
@production('E -> E "-" E')
@production('E -> E "*" E')
@production('E -> E "/" E')
def binaryop(self, left, op, right):
six.print_('Binary operation: %s %s %s' % (left, op, right))
return {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv
}[op](left, right)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.WARNING, format='%(asctime)-15s %(levelname)-8s %(name)-15s %(message)s')
six.print_('Enter an arithmetic expression.')
parser = SimpleCalc()
while True:
try:
line = six.moves.input('> ')
except (KeyboardInterrupt, EOFError):
six.print_()
break
try:
parser.parse(line)
except ParseError as exc:
six.print_('Parse error: %s' % exc)
six.print_('Expected %s' % exc.expecting())
python-ptk-1.3.1/samples/defer_calc.py 0000664 0000000 0000000 00000005616 12650717014 0017731 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Four operations calculator, asynchronous (using Twisted). Due to
various buffering problems you probably won't see what's the point
unless you force stdin to be noninteractive, e.g.
$ echo '3*4+6' | python3 ./defer_calc.py
"""
import six, operator, os, sys, codecs
from ptk.deferred_lexer import token, DeferredLexer, EOF
from ptk.deferred_parser import production, leftAssoc, DeferredLRParser, ParseError
from twisted.internet.defer import succeed
from twisted.internet.protocol import Protocol
from twisted.internet import stdio, reactor
@leftAssoc('+', '-')
@leftAssoc('*', '/')
class Parser(DeferredLRParser, DeferredLexer):
def deferNewSentence(self, result):
six.print_('== Result:', result)
return succeed(None)
# Lexer
def ignore(self, char):
return char in [' ', '\t']
@token(r'[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
# Parser
@production('E -> "-" E', priority='*')
def minus(self, value):
six.print_('== Neg: - %d' % value)
return -value # You can return something else than a Deferred.
@production('E -> "(" E ")"')
def paren(self, value):
return succeed(value)
@production('E -> number')
def litteral(self, number):
return succeed(number)
@production('E -> E "+" E')
@production('E -> E "-" E')
@production('E -> E "*" E')
@production('E -> E "/" E')
def binaryop(self, left, op, right):
six.print_('Binary operation: %s %s %s' % (left, op, right))
return succeed({
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv
}[op](left, right))
class BytesProtocol(Protocol):
def __init__(self, *args, **kwargs):
self.parser = Parser()
self.decoder = codecs.getincrementaldecoder('utf_8')()
def connectionLost(self, reason):
six.print_('Connection lost: %s' % reason)
reactor.stop()
def dataReceived(self, data):
# We don't want more bytes to be handled while this is running.
self.transport.pauseProducing()
bytes = list(data)
def next(result):
if bytes:
char = self.decoder.decode(bytes.pop(0))
if char:
if char == '\n':
char = EOF
else:
six.print_('Input char: "%s"' % repr(char))
self.parser.deferFeed(char).addCallbacks(next, self.error)
else:
next(None)
else:
self.transport.resumeProducing()
next(None)
def error(self, reason):
six.print_('ERROR: %s' % reason)
stdio.StandardIO(BytesProtocol())
reactor.run()
python-ptk-1.3.1/samples/yacc2py.py 0000664 0000000 0000000 00000031377 12650717014 0017237 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Converts a Yacc/Bison grammar definition into a Python skeleton that uses ptk.
"""
import getopt
import sys
import six
import collections
import codecs
import re
from ptk.parser import production, LRParser, ParseError
from ptk.lexer import token, ReLexer, EOF
from ptk.regex import buildRegex, DeadState
Symbol = collections.namedtuple('Symbol', ('name', 'argname'))
class Options(object):
def __init__(self, opts):
self.compact = False
self.arguments = False
self.filename = None
for opt, val in opts:
if opt in ('-c', '--compact'):
self.compact = True
if opt in ('-a', '--arguments'):
self.arguments = True
if opt in ('-o', '--output'):
self.filename = val
if opt in ('-h', '--help'):
self.usage()
if self.compact and self.arguments:
six.print_('--compact and --arguments are not compatible')
self.usage(1)
if self.filename is None:
six.print_('Output file not specified')
self.usage(1)
def usage(self, exitCode=0):
six.print_('Usage: %s [options] filename' % sys.argv[0])
six.print_('Options:')
six.print_(' -h, --help Print this')
six.print_(' -c, --compact Create one method for all alternatives of a production')
six.print_(' -o, --output Output to file (mandatory)')
six.print_(' -a, --arguments Generate argument names for items in productions (incompatible with --compact)')
sys.exit(exitCode)
@staticmethod
def create():
opts, args = getopt.getopt(sys.argv[1:], 'caho:', ['compact', 'arguments', 'help', 'output='])
return Options(opts), args
class NullToken(object):
def __init__(self, endMarker):
self.__rx = buildRegex('(.|\n)*%s' % re.escape(endMarker)).start()
def feed(self, char):
try:
if self.__rx.feed(char):
return None, None
except DeadState:
return None, None
class YaccParser(LRParser, ReLexer):
def __init__(self, options, stream):
self.stream = stream
self.options = options
super(YaccParser, self).__init__()
self.state = 0
self.yaccStartSymbol = None
self.allTokens = list()
self.allProductions = list()
self.precedences = list()
# Lexer
@token(r'%\{', types=[])
def c_decl(self, tok):
self.setConsumer(NullToken('%}'))
@token(r'/\*', types=[])
def comment(self, tok):
self.setConsumer(NullToken('*/'))
@token(r'%union\s*{', types=[]) # Hum, no LF possible before {
def union(self, tok):
self.setConsumer(NullToken('}'))
@token(r'%%')
def part_sep(self, tok):
self.state += 1
if self.state == 2:
# Ignore C code after last %%
class IgnoreCCode(object):
def feed(self, char):
if char is EOF:
return EOF, EOF
self.setConsumer(IgnoreCCode())
@staticmethod
def ignore(char):
return char in [' ', '\t', '\n']
@token(r'%(left|right|nonassoc)')
def assoc_decl(self, tok):
pass
@token(r'[a-zA-Z_][a-zA-Z0-9_]*')
def identifier(self, tok):
pass
@token('[1-9][0-9]*')
def number(self, tok):
tok.value = int(tok.value)
@token('"')
def string(self, tok):
class StringParser(object):
def __init__(self):
self.state = 0
self.value = six.StringIO()
def feed(self, char):
if self.state == 0:
if char == '"':
return 'string', self.value.getvalue()
if char == '\\':
self.state = 1
else:
self.value.write(char)
elif self.state == 1:
self.value.write(char)
self.state = 0
self.setConsumer(StringParser())
@token(r'\{')
def semantic_action(self, tok):
# Don't try to be too smart; just balance {} that are not in string litterals
class CSemanticAction(object):
def __init__(self):
self.state = 0
self.count = 1
self.value = six.StringIO()
self.value.write('{')
def feed(self, char):
self.value.write(char)
if self.state == 0: # Nothing special
if char == '}':
self.count -= 1
if self.count == 0:
return 'semantic_action', self.value.getvalue()
elif char == '{':
self.count += 1
elif char == '\\':
self.state = 1
elif char == '\'':
self.state = 2
elif char == '"':
self.state = 4
elif self.state == 1: # Escaping single char
self.state = 0
elif self.state == 2: # Character litteral. Not that this accepts several characters
if char == '\\':
self.state = 3
elif char == '\'':
self.state = 0
elif self.state == 3: # Escaping in character litteral
self.state = 2
elif self.state == 4: # In string litteral
if char == '\\':
self.state = 5
elif char == '"':
self.state = 0
elif self.state == 5: # Escaping in string litteral
self.state = 4
self.setConsumer(CSemanticAction())
@token(r'\'.\'')
def litteral_token(self, tok):
tok.value = tok.value[1]
# Parser
@production('YACC_FILE -> META_DECLARATION* part_sep PRODUCTION_DECL*')
def yacc_file(self):
pass
# Tokens, start symbol, etc
@production('META_DECLARATION -> "%token" identifier+')
def token_declaration(self, tokens):
self.allTokens.extend(tokens)
@production('META_DECLARATION -> assoc_decl identifier+')
def assoc_declaration(self, assoc, tokens):
self.precedences.append((assoc, tokens))
@production('META_DECLARATION -> "%start" identifier')
def start_declaration(self, name):
self.yaccStartSymbol = name
@production('META_DECLARATION -> "%type" identifier identifier+')
@production('META_DECLARATION -> "%expect" number')
@production('META_DECLARATION -> "%debug"')
@production('META_DECLARATION -> "%defines"')
@production('META_DECLARATION -> "%destructor" semantic_action identifier+')
@production('META_DECLARATION -> "%file-prefix" "=" string')
@production('META_DECLARATION -> "%locations"')
@production('META_DECLARATION -> "%name-prefix" "=" string')
@production('META_DECLARATION -> "%no-parser')
@production('META_DECLARATION -> "%no-lines')
@production('META_DECLARATION -> "%output" "=" string')
@production('META_DECLARATION -> "%pure-parser"')
@production('META_DECLARATION -> "%token-table"')
@production('META_DECLARATION -> "%verbose"')
@production('META_DECLARATION -> "%yacc"')
def ignored_declaration(self):
pass
# Productions
@production('PRODUCTION_DECL -> identifier ":" PRODUCTION_RIGHT+("|") ";"')
def production_decl(self, left, right):
self.allProductions.append((left, right))
@production('PRODUCTION_RIGHT -> SYMBOL*')
def production_right(self, symbols):
names = list()
indexes = dict()
for symbol in symbols:
if symbol.argname is None:
names.append((symbol.name, None))
else:
index = indexes.get(symbol.argname, 0)
argname = symbol.argname if index == 0 else '%s_%d' % (symbol.argname, index + 1)
indexes[symbol.argname] = index + 1
names.append((symbol.name, argname))
return dict(names=names, action=None, precedence=None)
@production('PRODUCTION_RIGHT -> PRODUCTION_RIGHT semantic_action')
def production_right_action(self, prod, action):
if prod['action'] is not None:
raise RuntimeError('Duplicate semantic action "%s"' % action)
prod['action'] = action
return prod
@production('PRODUCTION_RIGHT -> PRODUCTION_RIGHT "%prec" identifier')
def production_right_prec(self, prod, prec):
if prod['precedence'] is not None:
raise RuntimeError('Duplicate precedence declaration "%s"' % prec)
prod['precedence'] = prec
return prod
@production('SYMBOL -> identifier')
def symbol_from_identifier(self, tok):
return Symbol(tok, None if tok in self.allTokens else tok)
@production('SYMBOL -> litteral_token')
def symbol_from_litteral(self, tok):
return Symbol('"%s"' % tok, None)
def newSentence(self, result):
self.stream.write('from ptk.lexer import ReLexer, token\n')
self.stream.write('from ptk.parser import LRParser, production, leftAssoc, rightAssoc, nonAssoc\n')
self.stream.write('\n')
for assocType, tokens in self.precedences:
self.stream.write('@%s(%s)\n' % ({'%left': 'leftAssoc', '%right': 'rightAssoc', '%nonassoc': 'nonAssoc'}[assocType],
', '.join([repr(tok) for tok in tokens])))
self.stream.write('class Parser(LRParser, ReLexer):\n')
if self.yaccStartSymbol is not None:
self.stream.write(' startSymbol = %s\n' % repr(self.yaccStartSymbol))
self.stream.write('\n')
self.stream.write(' # Lexer\n')
for name in self.allTokens:
self.stream.write('\n')
self.stream.write(' @token(r\'\')\n')
self.stream.write(' def %s(self, tok):\n' % name)
self.stream.write(' pass\n')
methodIndexes = dict()
def methodName(name):
index = methodIndexes.get(name, 0)
methodIndexes[name] = index + 1
return name if index == 0 else '%s_%d' % (name, index + 1)
for name, prods in self.allProductions:
for prod in prods:
if not self.options.compact:
self.stream.write('\n')
if prod['action'] is not None:
for line in prod['action'].split('\n'):
self.stream.write(' # %s\n' % line)
symnames = []
for aname, argname in prod['names']:
symnames.append(aname if argname is None or not self.options.arguments else '%s<%s>' % (aname, argname))
self.stream.write(' @production(\'%s -> %s\'' % (name, ' '.join(symnames)))
if prod['precedence'] is not None:
self.stream.write(', priority=%s' % repr(prod['precedence']))
self.stream.write(')\n')
if not self.options.compact:
self.stream.write(' def %s(self' % methodName(name))
if self.options.arguments:
for aname, argname in prod['names']:
if argname is not None:
self.stream.write(', %s' % argname)
self.stream.write('):\n')
self.stream.write(' pass\n')
if self.options.compact:
self.stream.write(' def %s(self):\n' % methodName(name))
self.stream.write(' pass\n')
self.stream.write('\n')
def main(filename):
import time
options, filenames = Options.create()
for filename in filenames:
with codecs.getreader('utf_8')(open(filename, 'rb')) as fileobj:
output = sys.stdout if options.filename == '-' else codecs.getwriter('utf_8')(open(options.filename, 'wb'))
parser = YaccParser(options, output)
t0 = time.time()
try:
parser.parse(fileobj.read())
except ParseError as exc:
six.print_('Parse error: %s' % exc)
tokens = exc.expecting()
if tokens:
six.print_('Was expecting %s' % ', '.join(map(repr, sorted(tokens))))
sys.exit(1)
finally:
print('== Parsed file in %d ms.' % int(1000 * (time.time() - t0)))
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.WARNING, format='%(asctime)-15s %(levelname)-8s %(name)-15s %(message)s')
import sys
main(sys.argv[1])
python-ptk-1.3.1/setup.cfg 0000664 0000000 0000000 00000000047 12650717014 0015456 0 ustar 00root root 0000000 0000000 [metadata]
description-file=README.rst
python-ptk-1.3.1/setup.py 0000664 0000000 0000000 00000001734 12650717014 0015353 0 ustar 00root root 0000000 0000000 # -*- coding: UTF-8 -*-
# (c) Jérôme Laheurte 2015
# See LICENSE.txt
import six
from distutils.core import setup
from ptk.meta import version, PackageInfo
setup(
name=six.u('ptk'),
packages=['ptk'],
version=version,
description=PackageInfo.short_description,
author=PackageInfo.author_name,
author_email=PackageInfo.author_email,
url=PackageInfo.project_url,
download_url=PackageInfo.download_url,
keywords=six.u('parser parsing compiler lr slr').split(),
classifiers=[
six.u('Development Status :: 5 - Production/Stable'),
six.u('Intended Audience :: Developers'),
six.u('License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)'),
six.u('Operating System :: OS Independent'),
six.u('Programming Language :: Python'),
six.u('Topic :: Software Development :: Compilers'),
six.u('Topic :: Software Development :: Libraries :: Python Modules'),
],
install_requires=['six']
)
python-ptk-1.3.1/tests/ 0000775 0000000 0000000 00000000000 12650717014 0014776 5 ustar 00root root 0000000 0000000 python-ptk-1.3.1/tests/base.py 0000664 0000000 0000000 00000000220 12650717014 0016254 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)-15s %(levelname)-8s %(name)-15s %(message)s')
python-ptk-1.3.1/tests/test_all.py 0000775 0000000 0000000 00000000645 12650717014 0017167 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import unittest
from test_utils import *
from test_regex import *
from test_regex_tokenizer import *
from test_regex_parser import *
from test_lexer import *
from test_grammar import *
from test_parser import *
try:
import twisted
except ImportError:
pass
else:
from test_deferred_lexer import *
from test_deferred_parser import *
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_deferred_lexer.py 0000664 0000000 0000000 00000004607 12650717014 0021375 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import base, unittest
import six
from test_lexer import EOF, LexerBasicTestCaseMixin, PositionTestCaseMixin, TokenTypeTestCaseMixin, \
LexerByteTestCaseMixin, LexerUnicodeTestCaseMixin, LexerConsumerTestCaseMixin, \
LexerInheritanceTestCaseMixin, LexerUnterminatedTokenTestCaseMixin, LexerLengthTestCaseMixin, \
LexerPriorityTestCaseMixin, LexerEOFTestCaseMixin
from ptk.deferred_lexer import DeferredLexer
from twisted.internet.defer import succeed
class DeferredLexerTestCase(unittest.TestCase):
lexerClass = DeferredLexer
def setUp(self):
self.tokens = list()
def feed(self, tok):
if tok is EOF:
self.tokens = tuple(self.tokens)
else:
self.tokens.append(tok)
return succeed(None)
def doLex(self, inputString):
if six.PY3 and isinstance(inputString, bytes):
inputString = [chr(c).encode('ascii') for c in inputString]
for char in inputString:
d = self.lexer.deferFeed(char)
self.assertTrue(d.called)
d = self.lexer.deferFeed(EOF)
self.assertTrue(d.called)
return self.tokens
class NoResultLexer(DeferredLexer):
def deferNewToken(self, tok):
return succeed(None)
class DeferredLexerBasicTestCase(LexerBasicTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerPositionTestCase(PositionTestCaseMixin, DeferredLexerTestCase):
lexerClass = NoResultLexer
class DeferredLexerTokenTypeTestCase(TokenTypeTestCaseMixin, DeferredLexerTestCase):
lexerClass = NoResultLexer
class DeferredLexerByteTestCase(LexerByteTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerUnicodeTestCase(LexerUnicodeTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerConsumerTestCase(LexerConsumerTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerInheritanceTestCase(LexerInheritanceTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerUnterminatedTokenTestCase(LexerUnterminatedTokenTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerLengthTestCase(LexerLengthTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerPriorityTestCase(LexerPriorityTestCaseMixin, DeferredLexerTestCase):
pass
class DeferredLexerEOFTestCase(LexerEOFTestCaseMixin, DeferredLexerTestCase):
pass
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_deferred_parser.py 0000664 0000000 0000000 00000022365 12650717014 0021553 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import six
import base, unittest
from ptk.parser import ParseError, leftAssoc, rightAssoc, nonAssoc
from ptk.deferred_parser import DeferredLRParser
from ptk.lexer import token, EOF
from ptk.deferred_lexer import DeferredLexer
from ptk.grammar import production
from twisted.internet.defer import succeed
# XXXTODO factorize this with test_parser.py
class TestedDeferredParser(DeferredLRParser, DeferredLexer):
def __init__(self, *args, **kwargs):
self.seen = list()
super(TestedDeferredParser, self).__init__(*args, **kwargs)
@token(six.u('[1-9][0-9]*'))
def number(self, tok):
tok.value = int(tok.value)
@production(six.u('E -> E "+" E'))
def sum(self, left, right):
self.seen.append(('+', left, right))
return succeed(left + right)
@production(six.u('E -> E "*" E'))
def mult(self, left, right):
self.seen.append(('*', left, right))
return succeed(left * right)
@production('E -> number')
def litteral(self, n):
return succeed(n)
def deferNewSentence(self, sentence):
return succeed(None)
class DeferredParserTestCase(unittest.TestCase):
def parse(self, inputString):
def realizeDeferred(d):
self.assertTrue(d.called)
results = [None, None]
def success(result):
results[0] = result
def error(reason):
results[1] = reason
d.addCallback(success)
d.addErrback(error)
if results[1] is not None:
raise results[1].value
return results[0]
for char in inputString:
d = self.parser.deferFeed(char)
realizeDeferred(d)
d = self.parser.deferFeed(EOF)
realizeDeferred(d)
class DefaultShiftReduceDeferredTestCase(DeferredParserTestCase):
def setUp(self):
self.parser = TestedDeferredParser()
def test_shift(self):
self.parse(six.u('2+3*4'))
self.assertEqual(self.parser.seen, [('*', 3, 4), ('+', 2, 12)])
class DefaultReduceReduceDeferredTestCase(DeferredParserTestCase):
def setUp(self):
class Parser(DeferredLRParser, DeferredLexer):
def __init__(self, *args, **kwargs):
self.seen = list()
super(Parser, self).__init__(*args, **kwargs)
@token(six.u('[a-zA-Z]+'))
def word(self, tok):
pass
@production(six.u('sequence -> maybeword | sequence word |'))
def seq(self):
self.seen.append('seq')
@production(six.u('maybeword -> word |'))
def maybe(self):
self.seen.append('maybe')
def deferNewSentence(self, sentence):
return succeed(None)
self.parser = Parser()
def test_reduce(self):
self.parse(six.u(''))
self.assertEqual(self.parser.seen, ['seq'])
class LeftAssociativityDeferredTestCase(DeferredParserTestCase):
def setUp(self):
@leftAssoc('+')
class Parser(TestedDeferredParser):
pass
self.parser = Parser()
def test_assoc(self):
self.parse(six.u('1+2+3'))
self.assertEqual(self.parser.seen, [('+', 1, 2), ('+', 3, 3)])
class RightAssociativityDeferredTestCase(DeferredParserTestCase):
def setUp(self):
@rightAssoc('+')
class Parser(TestedDeferredParser):
pass
self.parser = Parser()
def test_assoc(self):
self.parse(six.u('1+2+3'))
self.assertEqual(self.parser.seen, [('+', 2, 3), ('+', 1, 5)])
class PrecedenceDeferredTestCase(DeferredParserTestCase):
def setUp(self):
@leftAssoc('+')
@leftAssoc('*')
@nonAssoc('<')
class Parser(TestedDeferredParser):
@production(six.u('E -> E "<" E'))
def inf(self):
return succeed(None)
self.parser = Parser()
def test_shift(self):
self.parse('2+3*4')
self.assertEqual(self.parser.seen, [('*', 3, 4), ('+', 2, 12)])
def test_reduce(self):
self.parse('2*3+4')
self.assertEqual(self.parser.seen, [('*', 2, 3), ('+', 6, 4)])
def test_error(self):
try:
self.parse('2 < 3 < 4')
except ParseError:
pass
else:
self.fail()
class OverridePrecedenceDeferredTestCase(DeferredParserTestCase):
def setUp(self):
@leftAssoc('+')
@leftAssoc('mult')
class Parser(DeferredLRParser, DeferredLexer):
def __init__(self, *args, **kwargs):
self.seen = list()
super(Parser, self).__init__(*args, **kwargs)
@token(six.u('[1-9][0-9]*'))
def number(self, tok):
tok.value = int(tok.value)
@production(six.u('E -> E "+" E'))
def sum(self, left, right):
self.seen.append(('+', left, right))
return succeed(left + right)
@production(six.u('E -> E "*" E'), priority='mult')
def mult(self, left, right):
self.seen.append(('*', left, right))
return succeed(left * right)
@production(six.u('E -> number'))
def litteral(self, n):
return succeed(n)
def deferNewSentence(self, sentence):
return succeed(None)
self.parser = Parser()
def test_shift(self):
self.parse('2+3*4')
self.assertEqual(self.parser.seen, [('*', 3, 4), ('+', 2, 12)])
def test_reduce(self):
self.parse('2*3+4')
self.assertEqual(self.parser.seen, [('*', 2, 3), ('+', 6, 4)])
class ListDeferredTestCase(DeferredParserTestCase):
def setUp(self):
class Parser(DeferredLRParser, DeferredLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier*')
def idlist(self, tokens):
return succeed(tokens)
def deferNewSentence(self, symbol):
self.testCase.seen = symbol
return succeed(None)
self.parser = Parser(self)
self.seen = None
def test_empty(self):
self.parse('')
self.assertEqual(self.seen, [])
def test_items(self):
self.parse('a b c')
self.assertEqual(self.seen, ['a', 'b', 'c'])
class NonEmptyListDeferredTestCase(DeferredParserTestCase):
def setUp(self):
class Parser(DeferredLRParser, DeferredLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier+')
def idlist(self, tokens):
return succeed(tokens)
def deferNewSentence(self, symbol):
self.testCase.seen = symbol
return succeed(None)
self.parser = Parser(self)
self.seen = None
def test_empty(self):
try:
self.parse('')
except ParseError:
pass
else:
self.fail('Got %s' % self.seen)
def test_items(self):
self.parse('a b c')
self.assertEqual(self.seen, ['a', 'b', 'c'])
class SeparatorListDeferredTestCase(DeferredParserTestCase):
def setUp(self):
class Parser(DeferredLRParser, DeferredLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier+("|")')
def idlist(self, tokens):
return succeed(tokens)
def deferNewSentence(self, symbol):
self.testCase.seen = symbol
return succeed(None)
self.parser = Parser(self)
self.seen = None
def test_items(self):
self.parse('a | b | c')
self.assertEqual(self.seen, ['a', 'b', 'c'])
class AtMostOneDeferredTestCase(DeferredParserTestCase):
def setUp(self):
class Parser(DeferredLRParser, DeferredLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier?')
def idlist(self, tok):
return succeed(tok)
def deferNewSentence(self, symbol):
self.testCase.seen = symbol
return succeed(None)
self.parser = Parser(self)
self.seen = 1
def test_none(self):
self.parse('')
self.assertEqual(self.seen, None)
def test_value(self):
self.parse('a')
self.assertEqual(self.seen, 'a')
def test_error(self):
try:
self.parse('a b')
except ParseError:
pass
else:
self.fail('Got %s' % self.seen)
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_grammar.py 0000664 0000000 0000000 00000015463 12650717014 0020046 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import base, unittest
from ptk.grammar import Production, GrammarError, Grammar, production
from ptk.lexer import token
from ptk.parser import ProductionParser
from ptk.utils import callbackByName
class GrammarUnderTest(Grammar):
tokens = set()
@classmethod
def addTokenType(cls, name, callback, regex, types=None):
if types is None:
cls.tokens.add(name)
else:
for name in types:
cls.tokens.add(name)
@classmethod
def tokenTypes(cls):
return cls.tokens
@classmethod
def _createProductionParser(cls, name, priority):
return ProductionParser(callbackByName(name), priority, cls)
class ProductionParserTestCase(unittest.TestCase):
def setUp(self):
class TestGrammar(GrammarUnderTest):
tokens = set()
self.parser = ProductionParser(None, None, TestGrammar)
self.grammarClass = TestGrammar
def assertHasProduction(self, prods, prod):
for aProduction in prods:
if prod == (aProduction.name, aProduction.right):
return
self.fail('Production %s not found in %s' % (prod, prods))
def _parse(self, string):
self.parser.parse(string)
return self.grammarClass.productions()
def test_production_name(self):
prod, = self._parse('test -> A')
self.assertEqual(prod.name, 'test')
def test_production_callback(self):
prod, = self._parse('test -> A')
self.assertEqual(prod.callback, None)
def test_empty_production(self):
prod, = self._parse('test -> ')
self.assertEqual(prod.right, [])
def test_empty_end(self):
prod1, prod2 = self._parse('test -> A | ')
self.assertEqual(prod1.right, ['A'])
self.assertEqual(prod2.right, [])
def test_empty_start(self):
prod1, prod2 = self._parse('test -> | A')
self.assertEqual(prod1.right, [])
self.assertEqual(prod2.right, ['A'])
def test_order(self):
prod, = self._parse('test -> A B')
self.assertEqual(prod.right, ['A', 'B'])
def test_escape_litteral(self):
prod, = self._parse(r'test -> "spam\"foo"')
self.assertEqual(prod.right, [r'spam"foo'])
def _findListSym(self, prods):
for prod in prods:
if prod.name != 'test':
return prod.name
self.fail('Cannot find list symbol in %s' % repr(prods))
def test_list(self):
prods = self._parse('test -> A* "+"')
self.assertEqual(len(prods), 4, repr(prods))
listSym = self._findListSym(prods)
self.assertHasProduction(prods, ('test', ['+']))
self.assertHasProduction(prods, (listSym, ['A']))
self.assertHasProduction(prods, (listSym, [listSym, 'A']))
self.assertHasProduction(prods, ('test', [listSym, '+']))
def test_list_not_empty(self):
prods = self._parse('test -> A+')
listSym = self._findListSym(prods)
self.assertEqual(len(prods), 3, repr(prods))
self.assertHasProduction(prods, (listSym, ['A']))
self.assertHasProduction(prods, (listSym, [listSym, 'A']))
self.assertHasProduction(prods, ('test', [listSym]))
def test_list_with_separator(self):
prods = self._parse('test -> A+(pipe)')
self.assertEqual(len(prods), 3, repr(prods))
listSym = self._findListSym(prods)
self.assertHasProduction(prods, (listSym, ['A']))
self.assertHasProduction(prods, (listSym, [listSym, 'pipe', 'A']))
self.assertHasProduction(prods, ('test', [listSym]))
def test_list_with_litteral_separator(self):
prods = self._parse('test -> A+("|")')
self.assertEqual(len(prods), 3, repr(prods))
listSym = self._findListSym(prods)
self.assertHasProduction(prods, (listSym, ['A']))
self.assertHasProduction(prods, (listSym, [listSym, '|', 'A']))
self.assertHasProduction(prods, ('test', [listSym]))
def test_atmostone(self):
prods = self._parse('test -> A?')
self.assertEqual(len(prods), 2)
self.assertHasProduction(prods, ('test', []))
self.assertHasProduction(prods, ('test', ['A']))
class ProductionTestCase(unittest.TestCase):
def setUp(self):
# A -> B C
self.production = Production('A', self.callback)
self.production.addSymbol('B', 'b')
self.production.addSymbol('C')
self.calls = list()
def callback(self, grammar, **kwargs):
self.calls.append(kwargs)
return 42
def test_duplicate(self):
try:
self.production.addSymbol('D', 'b')
except GrammarError:
pass
else:
self.fail()
def test_kwargs(self):
cb, kwargs = self.production.apply([1, 2])
cb(self, **kwargs)
self.assertEqual(self.calls, [{'b': 1}])
class GrammarTestCase(unittest.TestCase):
def test_production(self):
class G(GrammarUnderTest):
@production('A -> B')
def a(self):
pass
prod, = G.productions()
self.assertEqual(prod.name, 'A')
self.assertEqual(prod.right, ['B'])
def test_start_symbol(self):
class G(GrammarUnderTest):
@production('A -> B')
def a(self):
pass
@production('C -> D')
def c(self):
pass
grammar = G()
self.assertEqual(grammar.startSymbol, 'A')
def test_duplicate_name(self):
class G(GrammarUnderTest):
@production('A -> B')
def a1(self):
pass
@production('A -> C')
def a2(self):
pass
prod1, prod2 = G.productions()
self.assertEqual(prod1.name, 'A')
self.assertEqual(prod1.right, ['B'])
self.assertEqual(prod2.name, 'A')
self.assertEqual(prod2.right, ['C'])
def test_token_name(self):
try:
class G(GrammarUnderTest):
tokens = set(['spam'])
@production('spam -> spam')
def spam(self):
pass
except GrammarError:
pass
else:
self.fail()
def test_duplicate_production_1(self):
class G(GrammarUnderTest):
@production('A -> B|B')
def a(self):
pass
try:
G()
except GrammarError:
pass
else:
self.fail()
def test_duplicate_production_2(self):
class G(GrammarUnderTest):
@production('A -> B')
def a1(self):
pass
@production('A -> B')
def a2(self):
pass
try:
G()
except GrammarError:
pass
else:
self.fail()
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_lexer.py 0000664 0000000 0000000 00000031243 12650717014 0017531 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import six
import base, unittest
from ptk.lexer import LexerError, token, EOF
from ptk.lexer import ProgressiveLexer, ReLexer
class LexerUnderTestMixin(object):
def __init__(self, testCase):
self.testCase = testCase
super(LexerUnderTestMixin, self).__init__()
def newToken(self, tok):
self.testCase.feed(tok)
def deferNewToken(self, tok):
self.testCase.feed(tok)
from twisted.internet.defer import succeed
return succeed(None)
class LexerTestCase(unittest.TestCase):
def setUp(self):
self.tokens = list()
def feed(self, tok):
if tok is EOF:
self.tokens = tuple(self.tokens) # So that any more tokens will raise an exception
else:
self.tokens.append(tok)
def doLex(self, inputString):
self.lexer.parse(inputString)
return self.tokens
class ProgressiveLexerTestCase(LexerTestCase):
lexerClass = ProgressiveLexer
class ReLexerTestCase(LexerTestCase):
lexerClass = ReLexer
class LexerBasicTestCaseMixin(object):
def setUp(self):
super(LexerBasicTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('[a-zA-Z]+')
def ID(self, tok):
pass
@token('[0-9]+')
def NUMBER(self, tok):
tok.value = int(tok.value)
@token('\n', types=[EOF])
def EOL(self, tok):
tok.type = EOF
@token('0x[a-fA-F0-9]+', types=['NUMBER'])
def HEX(self, tok):
tok.type = 'NUMBER'
tok.value = int(tok.value, 16)
@token(r'\+\+')
def INC(self, tok):
tok.type = None
self.lexer = TestedLexer(self)
def test_single(self):
self.assertEqual(self.doLex('abc'), (('ID', 'abc'),))
def test_ignore_leading(self):
self.assertEqual(self.doLex(' abc'), (('ID', 'abc'),))
def test_ignore_middle(self):
self.assertEqual(self.doLex('a bc'), (('ID', 'a'), ('ID', 'bc')))
def test_ignore_trailing(self):
self.assertEqual(self.doLex('abc '), (('ID', 'abc'),))
def test_value(self):
self.assertEqual(self.doLex('42'), (('NUMBER', 42),))
def test_forced_value_eof(self):
self.assertEqual(self.doLex('abc\n'), (('ID', 'abc'),))
def test_forced_value(self):
self.assertEqual(self.doLex('0xf'), (('NUMBER', 15),))
def test_ignore(self):
self.assertEqual(self.doLex('a++b'), (('ID', 'a'), ('ID', 'b')))
def test_tokenvalues(self):
self.assertEqual(self.lexer.tokenTypes(), set(['ID', 'NUMBER', 'INC']))
class ProgressiveLexerBasicTestCase(LexerBasicTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerBasicTestCase(LexerBasicTestCaseMixin, ReLexerTestCase):
pass
class PositionTestCaseMixin(object):
def setUp(self):
super(PositionTestCaseMixin, self).setUp()
class TestedLexer(self.lexerClass):
@staticmethod
def ignore(char):
return char in [' ', '\n']
@token('[a-z]')
def letter(self, tok):
pass
def newToken(self, tok):
pass
self.lexer = TestedLexer()
def test_position(self):
try:
self.doLex('ab\ncd0aa')
except LexerError as exc:
self.assertEqual(exc.lineno, 2)
self.assertEqual(exc.colno, 3)
else:
self.fail()
class ProgressiveLexerPositionTestCase(PositionTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerPositionTestCase(PositionTestCaseMixin, ReLexerTestCase):
pass
class TokenTypeTestCaseMixin(object):
def setUp(self):
super(TokenTypeTestCaseMixin, self).setUp()
class TestedLexer(self.lexerClass):
def __init__(self, testCase):
self.testCase = testCase
super(TestedLexer, self).__init__()
@token('[a-z]', types=['LETTER'])
def letter(self, tok):
self.testCase.assertTrue(tok.type is None)
def newToken(self, tok):
pass
self.lexer = TestedLexer(self)
def test_none(self):
self.doLex('a')
def test_funcname(self):
self.assertFalse('letter' in self.lexer.tokenTypes())
def test_types(self):
self.assertTrue('LETTER' in self.lexer.tokenTypes())
class ProgressiveLexerTokenTypeTestCase(TokenTypeTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerTokenTypeTestCase(TokenTypeTestCaseMixin, ReLexerTestCase):
pass
class LexerByteTestCaseMixin(object):
def setUp(self):
super(LexerByteTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token(six.b('[a-zA-Z]+'))
def ID(self, tok):
pass
self.lexer = TestedLexer(self)
def test_byte_regex_gives_byte_token_value(self):
tok, = self.doLex(six.b('foo'))
if six.PY2:
self.assertTrue(isinstance(tok.value, str))
else:
self.assertTrue(isinstance(tok.value, bytes))
class ProgressiveLexerByteTestCase(LexerByteTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerByteTestCase(LexerByteTestCaseMixin, ReLexerTestCase):
pass
class LexerUnicodeTestCaseMixin(object):
def setUp(self):
super(LexerUnicodeTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token(six.u('[a-zA-Z]+'))
def ID(self, tok):
pass
self.lexer = TestedLexer(self)
def test_unicode_regex_gives_unicode_token_value(self):
tok, = self.doLex(six.u('foo'))
if six.PY2:
self.assertTrue(isinstance(tok.value, unicode))
else:
self.assertTrue(isinstance(tok.value, str))
class ProgressiveLexerUnicodeTestCase(LexerUnicodeTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerUnicodeTestCase(LexerUnicodeTestCaseMixin, ReLexerTestCase):
pass
class LexerUnambiguousTestCase(ProgressiveLexerTestCase):
def setUp(self):
super(LexerUnambiguousTestCase, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('a')
def ID(self, tok):
pass
self.lexer = TestedLexer(self)
def test_unambiguous(self):
# If we arrive in a final state without any outgoing transition, it should be an instant match.
self.lexer.feed('a')
self.assertEqual(self.tokens, [('ID', 'a')]) # Still a list because no EOF
class LexerConsumerTestCaseMixin(object):
def setUp(self):
super(LexerConsumerTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('[a-zA-Z0-9]+')
def ID(self, tok):
pass
@token('"')
def STR(self, tok):
class StringBuilder(object):
def __init__(self):
self.value = six.StringIO()
self.state = 0
def feed(self, char):
if self.state == 0:
if char == '\\':
self.state = 1
elif char == '"':
return 'STR', self.value.getvalue()
else:
self.value.write(char)
elif self.state == 1:
self.value.write(char)
self.state = 0
self.setConsumer(StringBuilder())
self.lexer = TestedLexer(self)
def test_string(self):
self.assertEqual(self.doLex(r'ab"foo\"spam"eggs'), (('ID', 'ab'), ('STR', 'foo"spam'), ('ID', 'eggs')))
class ProgressiveLexerConsumerTestCase(LexerConsumerTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerConsumerTestCase(LexerConsumerTestCaseMixin, ReLexerTestCase):
pass
class LexerDuplicateTokenNameTestCaseMixin(object):
def test_dup(self):
try:
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('a')
def ID(self, tok):
pass
@token('b')
def ID(self, tok):
pass
except TypeError:
pass
else:
self.fail()
class ProgressiveLexerDuplicateTokenNameTestCase(LexerDuplicateTokenNameTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerDuplicateTokenNameTestCase(LexerDuplicateTokenNameTestCaseMixin, ReLexerTestCase):
pass
class LexerInheritanceTestCaseMixin(object):
def setUp(self):
super(LexerInheritanceTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('[0-9]')
def digit(self, tok):
pass
class ChildLexer(TestedLexer):
def digit(self, tok):
tok.value = int(tok.value)
self.lexer = ChildLexer(self)
def test_inherit(self):
self.assertEqual(self.doLex('4'), (('digit', 4),))
class ProgressiveLexerInheritanceTestCase(LexerInheritanceTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerInheritanceTestCase(LexerInheritanceTestCaseMixin, ReLexerTestCase):
pass
class LexerUnterminatedTokenTestCaseMixin(object):
def setUp(self):
super(LexerUnterminatedTokenTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('abc')
def ID(self, tok):
pass
self.lexer = TestedLexer(self)
def test_simple(self):
self.assertEqual(self.doLex('abc'), (('ID', 'abc'),))
def test_unterminated(self):
try:
self.doLex('ab')
except LexerError:
pass
else:
self.fail()
class ProgressiveLexerUnterminatedTokenTestCase(LexerUnterminatedTokenTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerUnterminatedTokenTestCase(LexerUnterminatedTokenTestCaseMixin, ReLexerTestCase):
pass
class LexerLengthTestCaseMixin(object):
def setUp(self):
super(LexerLengthTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('<|=')
def LT(self, tok):
pass
@token('<=')
def LTE(self, tok):
pass
self.lexer = TestedLexer(self)
def test_longest(self):
self.assertEqual(self.doLex('<='), (('LTE', '<='),))
class ProgressiveLexerLengthTestCase(LexerLengthTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerLengthTestCase(LexerLengthTestCaseMixin, ReLexerTestCase):
pass
class LexerPriorityTestCaseMixin(object):
def setUp(self):
super(LexerPriorityTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('a|b')
def A(self, tok):
pass
@token('b|c')
def B(self, tok):
pass
self.lexer = TestedLexer(self)
def test_priority(self):
self.assertEqual(self.doLex('b'), (('A', 'b'),))
class ProgressiveLexerPriorityTestCase(LexerPriorityTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerPriorityTestCase(LexerPriorityTestCaseMixin, ReLexerTestCase):
pass
class LexerRemainingCharactersTestCase(ProgressiveLexerTestCase):
def setUp(self):
super(LexerRemainingCharactersTestCase, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token('abc')
def ID1(self, tok):
pass
@token('aba')
def ID2(self, tok):
pass
self.lexer = TestedLexer(self)
def test_remain(self):
self.assertEqual(self.doLex('abaaba'), (('ID2', 'aba'), ('ID2', 'aba')))
class LexerEOFTestCaseMixin(object):
def setUp(self):
super(LexerEOFTestCaseMixin, self).setUp()
class TestedLexer(LexerUnderTestMixin, self.lexerClass):
@token(r'[0-9]+')
def NUMBER(self, tok):
tok.value = int(tok.value)
@token(r'\n')
def EOL(self, tok):
tok.type = EOF
self.lexer = TestedLexer(self)
def test_eol_is_eof(self):
self.lexer.parse('42\n')
self.assertTrue(isinstance(self.tokens, tuple))
class ProgressiveLexerEOFTestCase(LexerEOFTestCaseMixin, ProgressiveLexerTestCase):
pass
class ReLexerEOFTestCase(LexerEOFTestCaseMixin, ReLexerTestCase):
pass
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_parser.py 0000664 0000000 0000000 00000022437 12650717014 0017713 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import six
import base, unittest
from ptk.parser import LRParser, ParseError, leftAssoc, rightAssoc, nonAssoc
from ptk.lexer import ProgressiveLexer, token, EOF
from ptk.grammar import production
class TestedParser(LRParser, ProgressiveLexer):
def __init__(self, *args, **kwargs):
self.seen = list()
super(TestedParser, self).__init__(*args, **kwargs)
@token(six.u('[1-9][0-9]*'))
def number(self, tok):
tok.value = int(tok.value)
@production(six.u('E -> E "+" E'))
def sum(self, left, right):
self.seen.append(('+', left, right))
return left + right
@production(six.u('E -> E "*" E'))
def mult(self, left, right):
self.seen.append(('*', left, right))
return left * right
@production('E -> number')
def litteral(self, n):
return n
def newSentence(self, sentence):
pass
class DefaultShiftReduceTestCase(unittest.TestCase):
def setUp(self):
self.parser = TestedParser()
def test_shift(self):
self.parser.parse(six.u('2+3*4'))
self.assertEqual(self.parser.seen, [('*', 3, 4), ('+', 2, 12)])
class DefaultReduceReduceTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
def __init__(self, *args, **kwargs):
self.seen = list()
super(Parser, self).__init__(*args, **kwargs)
@token(six.u('[a-zA-Z]+'))
def word(self, tok):
pass
@production(six.u('sequence -> maybeword | sequence word |'))
def seq(self):
self.seen.append('seq')
@production(six.u('maybeword -> word |'))
def maybe(self):
self.seen.append('maybe')
def newSentence(self, sentence):
pass
self.parser = Parser()
def test_reduce(self):
self.parser.parse(six.u(''))
self.assertEqual(self.parser.seen, ['seq'])
class LeftAssociativityTestCase(unittest.TestCase):
def setUp(self):
@leftAssoc('+')
class Parser(TestedParser):
pass
self.parser = Parser()
def test_assoc(self):
self.parser.parse(six.u('1+2+3'))
self.assertEqual(self.parser.seen, [('+', 1, 2), ('+', 3, 3)])
class RightAssociativityTestCase(unittest.TestCase):
def setUp(self):
@rightAssoc('+')
class Parser(TestedParser):
pass
self.parser = Parser()
def test_assoc(self):
self.parser.parse(six.u('1+2+3'))
self.assertEqual(self.parser.seen, [('+', 2, 3), ('+', 1, 5)])
class PrecedenceTestCase(unittest.TestCase):
def setUp(self):
@leftAssoc('+')
@leftAssoc('*')
@nonAssoc('<')
class Parser(TestedParser):
@production(six.u('E -> E "<" E'))
def inf(self):
pass
self.parser = Parser()
def test_shift(self):
self.parser.parse('2+3*4')
self.assertEqual(self.parser.seen, [('*', 3, 4), ('+', 2, 12)])
def test_reduce(self):
self.parser.parse('2*3+4')
self.assertEqual(self.parser.seen, [('*', 2, 3), ('+', 6, 4)])
def test_error(self):
try:
self.parser.parse('2 < 3 < 4')
except ParseError:
pass
else:
self.fail()
class OverridePrecedenceTestCase(unittest.TestCase):
def setUp(self):
@leftAssoc('+')
@leftAssoc('mult')
class Parser(LRParser, ProgressiveLexer):
def __init__(self, *args, **kwargs):
self.seen = list()
super(Parser, self).__init__(*args, **kwargs)
@token(six.u('[1-9][0-9]*'))
def number(self, tok):
tok.value = int(tok.value)
@production(six.u('E -> E "+" E'))
def sum(self, left, right):
self.seen.append(('+', left, right))
return left + right
@production(six.u('E -> E "*" E'), priority='mult')
def mult(self, left, right):
self.seen.append(('*', left, right))
return left * right
@production(six.u('E -> number'))
def litteral(self, n):
return n
def newSentence(self, sentence):
pass
self.parser = Parser()
def test_shift(self):
self.parser.parse('2+3*4')
self.assertEqual(self.parser.seen, [('*', 3, 4), ('+', 2, 12)])
def test_reduce(self):
self.parser.parse('2*3+4')
self.assertEqual(self.parser.seen, [('*', 2, 3), ('+', 6, 4)])
class LRTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
@token('a')
def identifier(self, tok):
pass
@production('S -> G "=" D | D')
def s(self):
pass
@production('G -> "*" D | identifier')
def g(self):
pass
@production('D -> G')
def d(self):
pass
self.parser = Parser()
def test_no_conflicts(self):
self.assertEqual(self.parser.nSR, 0)
class ListTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier*')
def idlist(self, tokens):
return tokens
def newSentence(self, symbol):
self.testCase.seen = symbol
self.parser = Parser(self)
self.seen = None
def test_empty(self):
self.parser.parse('')
self.assertEqual(self.seen, [])
def test_items(self):
self.parser.parse('a b c')
self.assertEqual(self.seen, ['a', 'b', 'c'])
class NonEmptyListTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier+')
def idlist(self, tokens):
return tokens
def newSentence(self, symbol):
self.testCase.seen = symbol
self.parser = Parser(self)
self.seen = None
def test_empty(self):
try:
self.parser.parse('')
except ParseError:
pass
else:
self.fail('Got %s' % self.seen)
def test_items(self):
self.parser.parse('a b c')
self.assertEqual(self.seen, ['a', 'b', 'c'])
class SeparatorListTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier+("|")')
def idlist(self, tokens):
return tokens
def newSentence(self, symbol):
self.testCase.seen = symbol
self.parser = Parser(self)
self.seen = None
def test_items(self):
self.parser.parse('a | b | c')
self.assertEqual(self.seen, ['a', 'b', 'c'])
class AtMostOneTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
def __init__(self, testCase):
super(Parser, self).__init__()
self.testCase = testCase
@token('[a-z]+')
def identifier(self, tok):
pass
@production('L -> identifier?')
def idlist(self, tok):
return tok
def newSentence(self, symbol):
self.testCase.seen = symbol
self.parser = Parser(self)
self.seen = 1
def test_none(self):
self.parser.parse('')
self.assertEqual(self.seen, None)
def test_value(self):
self.parser.parse('a')
self.assertEqual(self.seen, 'a')
def test_error(self):
try:
self.parser.parse('a b')
except ParseError:
pass
else:
self.fail('Got %s' % self.seen)
class InheritanceTestCase(unittest.TestCase):
def setUp(self):
class Parser(LRParser, ProgressiveLexer):
def __init__(self):
self.seen = None
super(Parser, self).__init__()
@token('[0-9]')
def digit(self, tok):
tok.value = int(tok.value)
@production('E -> digit')
def start(self, d):
pass
def newSentence(self, symbol):
self.seen = symbol
class Child(Parser):
def start(self, d):
return d
self.parser = Child()
def test_override(self):
self.parser.parse('4')
self.assertEqual(self.parser.seen, 4)
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_regex.py 0000664 0000000 0000000 00000005257 12650717014 0017532 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import six
import base, unittest
from ptk.regex import RegularExpression, LitteralCharacterClass
class RegexTest(unittest.TestCase):
def test_deadend(self):
rx = RegularExpression.concat(
RegularExpression.fromClass(LitteralCharacterClass('<')),
RegularExpression.fromClass(LitteralCharacterClass('='))
)
rx.start()
rx.feed('<')
self.assertFalse(rx.isDeadEnd())
def test_newline(self):
rx = RegularExpression.fromClass(LitteralCharacterClass('\n'))
self.assertTrue(rx.match('\n'))
def test_class(self):
rx = RegularExpression.fromClass(LitteralCharacterClass('a'))
self.assertTrue(rx.match('a'))
self.assertFalse(rx.match('b'))
def test_concat(self):
rx = RegularExpression.concat(
RegularExpression.fromClass(LitteralCharacterClass('a')),
RegularExpression.fromClass(LitteralCharacterClass('b')),
RegularExpression.fromClass(LitteralCharacterClass('c'))
)
self.assertTrue(rx.match('abc'))
self.assertFalse(rx.match('ab'))
def test_union(self):
rx = RegularExpression.union(
RegularExpression.fromClass(LitteralCharacterClass('a')),
RegularExpression.fromClass(LitteralCharacterClass('b')),
RegularExpression.fromClass(LitteralCharacterClass('c'))
)
self.assertTrue(rx.match('a'))
self.assertTrue(rx.match('b'))
self.assertTrue(rx.match('c'))
self.assertFalse(rx.match('d'))
def test_kleene(self):
rx = RegularExpression.kleene(RegularExpression.fromClass(LitteralCharacterClass('a')))
self.assertTrue(rx.match(''))
self.assertTrue(rx.match('a'))
self.assertTrue(rx.match('aa'))
self.assertFalse(rx.match('ab'))
def test_exponent(self):
rx = RegularExpression.exponent(RegularExpression.fromClass(LitteralCharacterClass('a')), 2, 3)
self.assertFalse(rx.match('a'))
self.assertTrue(rx.match('aa'))
self.assertTrue(rx.match('aaa'))
self.assertFalse(rx.match('aaaa'))
def test_exponent_min(self):
rx = RegularExpression.exponent(RegularExpression.fromClass(LitteralCharacterClass('a')), 2)
self.assertFalse(rx.match('a'))
self.assertTrue(rx.match('aa'))
self.assertTrue(rx.match('aaa'))
def test_exponent_null(self):
rx = RegularExpression.exponent(RegularExpression.fromClass(LitteralCharacterClass('a')), 0, 1)
self.assertTrue(rx.match(''))
self.assertTrue(rx.match('a'))
self.assertFalse(rx.match('aa'))
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_regex_parser.py 0000664 0000000 0000000 00000010764 12650717014 0021105 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import six
import base, unittest
from ptk.regex import RegexTokenizer, RegexParser, RegularExpression, LitteralCharacterClass, \
RegexParseError, buildRegex
class RegexParserTestCaseMixin(object):
# It's a bit of a PITA to test for RegularExpression objects equality, so we check
# matched strings
def _parse(self, rx):
return buildRegex(rx)
def _match(self, rx, s):
return rx.match(s)
def test_newline(self):
rx = self._parse(r'\n')
self.assertTrue(self._match(rx, '\n'))
def test_concat(self):
rx = self._parse('ab')
self.assertFalse(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'ab'))
self.assertFalse(self._match(rx, 'abc'))
def test_union(self):
rx = self._parse('a|b')
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'b'))
self.assertFalse(self._match(rx, 'ab'))
self.assertFalse(self._match(rx, 'c'))
def test_kleene(self):
rx = self._parse('a*')
self.assertTrue(self._match(rx, ''))
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'aa'))
self.assertFalse(self._match(rx, 'b'))
def test_closure(self):
rx = self._parse('a+')
self.assertFalse(self._match(rx, ''))
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'aa'))
self.assertFalse(self._match(rx, 'b'))
def test_exp_single(self):
rx = self._parse('a{2}')
self.assertFalse(self._match(rx, ''))
self.assertFalse(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'aa'))
self.assertFalse(self._match(rx, 'aaa'))
def test_exp_both(self):
rx = self._parse('a{2-3}')
self.assertFalse(self._match(rx, ''))
self.assertFalse(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'aa'))
self.assertTrue(self._match(rx, 'aaa'))
self.assertFalse(self._match(rx, 'aaaa'))
def test_class(self):
rx = self._parse('[a-c]')
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'b'))
self.assertTrue(self._match(rx, 'c'))
self.assertFalse(self._match(rx, 'd'))
def test_any(self):
rx = self._parse('.')
self.assertTrue(self._match(rx, 'U'))
self.assertFalse(self._match(rx, '\n'))
def test_prio_1(self):
rx = self._parse('a|b*')
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'b'))
self.assertTrue(self._match(rx, 'bb'))
self.assertFalse(self._match(rx, 'ab'))
def test_prio_2(self):
rx = self._parse('ab*')
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'ab'))
self.assertTrue(self._match(rx, 'abb'))
self.assertFalse(self._match(rx, 'abab'))
def test_prio_3(self):
rx = self._parse('a|bc')
self.assertTrue(self._match(rx, 'a'))
self.assertTrue(self._match(rx, 'bc'))
self.assertFalse(self._match(rx, 'ac'))
def test_paren(self):
rx = self._parse('(ab)*')
self.assertTrue(self._match(rx, 'ab'))
self.assertTrue(self._match(rx, 'abab'))
self.assertFalse(self._match(rx, 'abb'))
def test_extra_tokens(self):
try:
rx = self._parse('ab(')
except RegexParseError:
pass
else:
self.fail()
def test_missing_paren(self):
try:
rx = self._parse('(a')
except RegexParseError:
pass
else:
self.fail()
class RegexParserUnicodeTestCase(RegexParserTestCaseMixin, unittest.TestCase):
def _parse(self, rx):
if six.PY2 and isinstance(rx, str):
rx = rx.decode('UTF-8')
return super(RegexParserUnicodeTestCase, self)._parse(rx)
def _match(self, rx, s):
if six.PY2 and isinstance(s, str):
s = s.decode('UTF-8')
return super(RegexParserUnicodeTestCase, self)._match(rx, s)
class RegexParserBytesTestCase(RegexParserTestCaseMixin, unittest.TestCase):
def _parse(self, rx):
if six.PY3 and isinstance(rx, str):
rx = rx.encode('UTF-8')
return super(RegexParserBytesTestCase, self)._parse(rx)
def _match(self, rx, s):
if six.PY3 and isinstance(s, str):
s = s.encode('UTF-8')
return super(RegexParserBytesTestCase, self)._match(rx, s)
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_regex_tokenizer.py 0000664 0000000 0000000 00000020706 12650717014 0021620 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import six
import base, unittest
from ptk.regex import TokenizeError, RegexTokenizer, \
BackslashAtEndOfInputError, UnterminatedClassError, \
InvalidClassError, InvalidExponentError, \
CharacterClass, RegexCharacterClass, LitteralCharacterClass, \
AnyCharacterClass, ExponentToken, TokenizeError
class TokenizerTestCase(unittest.TestCase):
def _tokenize(self, regex):
if six.PY2 and isinstance(regex, str):
regex = regex.decode('UTF-8')
tokenizer = RegexTokenizer(regex)
return list(tokenizer.tokens())
class BasicTestCase(TokenizerTestCase):
def test_close_bracket(self):
try:
self._tokenize('foo]')
except TokenizeError:
pass
else:
self.fail('Did not raise TokenizeError')
def test_close_brace(self):
try:
self._tokenize('foo}')
except TokenizeError:
pass
else:
self.fail('Did not raise TokenizeError')
class ConcatTestCase(TokenizerTestCase):
def test_concat(self):
self.assertEqual(self._tokenize('abc'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('a'))),
(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('b'))),
(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('c')))])
def test_escape(self):
self.assertEqual(self._tokenize(r'\[\n'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('['))),
(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('\n')))])
def test_error(self):
try:
self._tokenize('spam\\')
except BackslashAtEndOfInputError:
pass
else:
self.fail('Did not raise BackslashAtEndOfInputError')
class RangeTestCase(TokenizerTestCase):
def test_cache(self):
rx1 = RegexCharacterClass(six.u('[a-z]'))
rx2 = RegexCharacterClass(six.u('[a-z]'))
self.assertTrue(rx1._rx is rx2._rx)
def test_unterminated(self):
try:
self._tokenize('[acb')
except UnterminatedClassError:
pass
else:
self.fail('Did not raise UnterminatedClassError')
def test_invalid(self):
try:
self._tokenize('[b-a]')
except InvalidClassError:
pass
else:
self.fail('Did not raise InvalidClassError')
def _test_range(self, rx, testin, testout):
tokens = self._tokenize(rx)
self.assertEqual(len(tokens), 1)
type_, value = tokens[0]
self.assertEqual(type_, RegexTokenizer.TOK_CLASS)
self.assertTrue(isinstance(value, CharacterClass))
for item in testin:
item = item.decode('UTF-8') if six.PY2 and isinstance(item, str) else item
self.assertTrue(item in value, '"%s" should match "%s"' % (item, rx))
for item in testout:
item = item.decode('UTF-8') if six.PY2 and isinstance(item, str) else item
self.assertFalse(item in value, '"%s" should not match "%s"' % (item, rx))
def test_simple(self):
self._test_range('[acb]', ['a', 'b', 'c'], [' ', 'd'])
def test_range(self):
self._test_range('[a-d]', ['a', 'b', 'c', 'd'], [' ', 'e'])
def test_ranges(self):
self._test_range('[a-cf-h]', ['a', 'b', 'c', 'f', 'g', 'h'], [' ', 'd', 'e', 'i'])
def test_minus(self):
self._test_range('[-a-c]', ['-', 'a', 'b', 'c'], [' ', 'd'])
def test_special(self):
self._test_range('[a|]', ['a', '|'], [' ', 'b'])
def test_escape(self):
self._test_range(r'[a\]]', ['a', ']'], [' ', 'b'])
def test_escape_start(self):
self._test_range(r'[\]-^]', [']', '^'], ['a'])
def test_escape_end(self):
self._test_range(r'[\\-\]]', ['\\', ']'], ['a'])
def test_class_w(self):
self._test_range(r'\w', [six.u('\u00E9')], ['~'])
def test_class_d_class(self):
self._test_range(r'[\wa]', [six.u('\u00E9'), 'a'], ['~'])
def test_class_d(self):
self._test_range(r'\d', ['0'], ['a'])
def test_any(self):
self.assertEqual(self._tokenize('a.'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('a'))),
(RegexTokenizer.TOK_CLASS, AnyCharacterClass())])
class ExponentTestCase(TokenizerTestCase):
def test_invalid(self):
try:
self._tokenize('a{b}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_invalid_2(self):
try:
self._tokenize('a{1b}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_invalid_3(self):
try:
self._tokenize('a{1-2b}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_invalid_end(self):
try:
self._tokenize('a{1-a}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_unterminated(self):
try:
self._tokenize('a{1')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_unterminated_value(self):
try:
self._tokenize('a{1-}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_start(self):
try:
self._tokenize('a{-2}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_invert(self):
try:
self._tokenize('a{2-1}')
except InvalidExponentError:
pass
else:
self.fail('Did not raise InvalidExponentError')
def test_single_value(self):
self.assertEqual(self._tokenize('a{42}'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('a'))),
(RegexTokenizer.TOK_EXPONENT, ExponentToken(42, 42))])
def test_interval(self):
self.assertEqual(self._tokenize('a{13-15}'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('a'))),
(RegexTokenizer.TOK_EXPONENT, ExponentToken(13, 15))])
def test_kleene(self):
self.assertEqual(self._tokenize('a*'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('a'))),
(RegexTokenizer.TOK_EXPONENT, ExponentToken(0, None))])
def test_closure(self):
self.assertEqual(self._tokenize('a+'), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('a'))),
(RegexTokenizer.TOK_EXPONENT, ExponentToken(1, None))])
class SymbolTestMixin(object):
token = None # Subclass responsibility
def test_start(self):
self.assertEqual(self._tokenize(r'{symbol}s'.format(symbol=self.symbol[1])), [self.symbol,
(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('s')))])
def test_middle(self):
self.assertEqual(self._tokenize(r's{symbol}e'.format(symbol=self.symbol[1])), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('s'))),
self.symbol,
(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('e')))])
def test_end(self):
self.assertEqual(self._tokenize(r's{symbol}'.format(symbol=self.symbol[1])), [(RegexTokenizer.TOK_CLASS, LitteralCharacterClass(six.u('s'))),
self.symbol])
class LParenTestCase(SymbolTestMixin, TokenizerTestCase):
symbol = (RegexTokenizer.TOK_LPAREN, six.u('('))
class RParenTestCase(SymbolTestMixin, TokenizerTestCase):
symbol = (RegexTokenizer.TOK_RPAREN, six.u(')'))
class UnionTestCase(SymbolTestMixin, TokenizerTestCase):
symbol = (RegexTokenizer.TOK_UNION, six.u('|'))
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tests/test_utils.py 0000664 0000000 0000000 00000002771 12650717014 0017556 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import six
import base, unittest, copy
from ptk.utils import memoize, Singleton
# I don't know what discover does, but it makes this test fail...
## class MemoizeTestCase(unittest.TestCase):
## def setUp(self):
## self.calls = list()
## @memoize
## def compute(self, v):
## self.calls.append(v)
## return v
## def test_memoize(self):
## self.compute(42)
## self.compute(13)
## self.assertEqual(self.compute(42), 42)
## self.assertEqual(self.calls, [42, 13])
class SingletonUnderTest(six.with_metaclass(Singleton, object)):
__reprval__ = six.u('$')
def __init__(self):
self.value = 42
class SingletonTestCase(unittest.TestCase):
def test_instance(self):
self.assertFalse(isinstance(SingletonUnderTest, type))
def test_init(self):
self.assertEqual(SingletonUnderTest.value, 42)
def test_order(self):
values = ['spam', SingletonUnderTest]
values.sort()
self.assertEqual(values, [SingletonUnderTest, 'spam'])
def test_copy(self):
self.assertTrue(copy.copy(SingletonUnderTest) is SingletonUnderTest)
def test_deepcopy(self):
self.assertTrue(copy.deepcopy(SingletonUnderTest) is SingletonUnderTest)
def test_repr(self):
self.assertEqual(repr(SingletonUnderTest), '$')
def test_eq(self):
self.assertNotEqual(SingletonUnderTest, SingletonUnderTest.__class__())
if __name__ == '__main__':
unittest.main()
python-ptk-1.3.1/tox.ini 0000664 0000000 0000000 00000000216 12650717014 0015146 0 ustar 00root root 0000000 0000000 [tox]
envlist=py27,py32,py33,py34,py35
skip_missing_interpreters=true
[testenv]
deps=discover
six
twisted
commands=discover
changedir=tests