././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1690683572.4013605 pyparsing-3.1.1/.coveragerc0000644000000000000000000000017014461344264012605 0ustar00[run] # This line instructs only the code contained in the "pyparsing.py" to be measured for coverage. source=pyparsing ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0678387 pyparsing-3.1.1/.gitignore0000644000000000000000000000435214412577542012465 0ustar00# Created by https://www.gitignore.io/api/python,pycharm working/* ### PyCharm ### # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff: .idea # CMake cmake-build-debug/ # Mongo Explorer plugin: .idea/**/mongoSettings.xml ## File-based project format: *.iws ## Plugin-specific files: # IntelliJ /out/ # mpeltonen/sbt-idea plugin .idea_modules/ # JIRA plugin atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml # Ruby plugin and RubyMine /.rakeTasks # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties ### PyCharm Patch ### # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 *.iml # modules.xml .idea/misc.xml *.ipr # Sonarlint plugin .idea/sonarlint ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # End of https://www.gitignore.io/api/python,pycharm # For developers on OSX .DS_Store examples/verilog/ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0678387 pyparsing-3.1.1/.pre-commit-config.yaml0000644000000000000000000000017614412577542014756 0ustar00repos: - repo: https://github.com/python/black rev: stable hooks: - id: black language_version: python3.6 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0678387 pyparsing-3.1.1/BUILDING.md0000644000000000000000000000142014412577542012205 0ustar00# BUILDING pyparsing uses the [flit](https://flit.readthedocs.io/) build system that is compliant with [PEP 517](https://www.python.org/dev/peps/pep-0517/). Therefore, any PEP 517-compliant tools can be used to build it. ## Building using flit To build the distribution files using flit, type: ``` $ flit build ``` The generated sdist and wheel will be placed in `dist/` directory. ## Building using build [build](https://github.com/pypa/build) is a generic builder for PEP 517 projects. To build the distribution files using build, type: ``` $ pyproject-build ``` The generated sdist and wheel will be placed in `dist/` directory. ## Testing pyparsing uses [tox](https://tox.wiki/en/latest/) to run tests. In order to run the complete test suite, type: ``` $ tox ``` ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1690670157.8383648 pyparsing-3.1.1/CHANGES0000644000000000000000000052413014461312116011455 0ustar00========== Change Log ========== NOTE: In the future release 3.2.0, use of many of the pre-PEP8 methods (such as `ParserElement.parseString`) will start to raise `DeprecationWarnings`. 3.2.0 should get released some time later in 2023. I currently plan to completely drop the pre-PEP8 methods in pyparsing 4.0, though we won't see that release until at least late 2023 if not 2024. So there is plenty of time to convert existing parsers to the new function names before the old functions are completely removed. (Big help from Devin J. Pohly in structuring the code to enable this peaceful transition.) Version 3.2.0 will also discontinue support for Python versions 3.6 and 3.7. Version 3.1.1 - July, 2023 -------------------------- - Fixed regression in Word(min), reported by Ricardo Coccioli, good catch! (Issue #502) - Fixed bug in bad exception messages raised by Forward expressions. PR submitted by Kyle Sunden, thanks for your patience and collaboration on this (#493). - Fixed regression in SkipTo, where ignored expressions were not checked when looking for the target expression. Reported by catcombo, Issue #500. - Fixed type annotation for enable_packrat, PR submitted by Mike Urbach, thanks! (Issue #498) - Some general internal code cleanup. (Instigated by Michal Čihař, Issue #488) Version 3.1.0 - June, 2023 -------------------------- - Added `tag_emitter.py` to examples. This example demonstrates how to insert tags into your parsed results that are not part of the original parsed text. Version 3.1.0b2 - May, 2023 --------------------------- - Updated `create_diagram()` code to be compatible with railroad-diagrams package version 3.0. Fixes Issue #477 (railroad diagrams generated with black bars), reported by Sam Morley-Short. - Fixed bug in `NotAny`, where parse actions on the negated expr were not being run. This could cause `NotAny` to incorrectly fail if the expr would normally match, but would fail to match if a condition used as a parse action returned False. Fixes Issue #482, raised by byaka, thank you! - Fixed `create_diagram()` to accept keyword args, to be passed through to the `template.render()` method to generate the output HTML (PR submitted by Aussie Schnore, good catch!) - Fixed bug in `python_quoted_string` regex. - Added `examples/bf.py` Brainf*ck parser/executor example. Illustrates using a pyparsing grammar to parse language syntax, and attach executable AST nodes to the parsed results. Version 3.1.0b1 - April, 2023 ----------------------------- - Added support for Python 3.12. - API CHANGE: A slight change has been implemented when unquoting a quoted string parsed using the `QuotedString` class. Formerly, when unquoting and processing whitespace markers such as \t and \n, these substitutions would occur first, and then any additional '\' escaping would be done on the resulting string. This would parse "\\n" as "\". Now escapes and whitespace markers are all processed in a single pass working left to right, so the quoted string "\\n" would get unquoted to "\n" (a backslash followed by "n"). Fixes issue #474 raised by jakeanq, thanks! - Added named field "url" to `pyparsing.common.url`, returning the entire parsed URL string. - Fixed bug when parse actions returned an empty string for an expression that had a results name, that the results name was not saved. That is: expr = Literal("X").add_parse_action(lambda tokens: "")("value") result = expr.parse_string("X") print(result["value"]) would raise a `KeyError`. Now empty strings will be saved with the associated results name. Raised in Issue #470 by Nicco Kunzmann, thank you. - Fixed bug in `SkipTo` where ignore expressions were not properly handled while scanning for the target expression. Issue #475, reported by elkniwt, thanks (this bug has been there for a looooong time!). - Updated `ci.yml` permissions to limit default access to source - submitted by Joyce Brum of Google. Thanks so much! - Updated the `lucene_grammar.py` example (better support for '*' and '?' wildcards) and corrected the test cases - brought to my attention by Elijah Nicol, good catch! Version 3.1.0a1 - March, 2023 ----------------------------- - API ENHANCEMENT: `Optional(expr)` may now be written as `expr | ""` This will make this code: "{" + Optional(Literal("A") | Literal("a")) + "}" writable as: "{" + (Literal("A") | Literal("a") | "") + "}" Some related changes implemented as part of this work: - `Literal("")` now internally generates an `Empty()` (and no longer raises an exception) - `Empty` is now a subclass of `Literal` Suggested by Antony Lee (issue #412), PR (#413) by Devin J. Pohly. - Added new class property `identifier` to all Unicode set classes in `pyparsing.unicode`, using the class's values for `cls.identchars` and `cls.identbodychars`. Now Unicode-aware parsers that formerly wrote: ppu = pyparsing.unicode ident = Word(ppu.Greek.identchars, ppu.Greek.identbodychars) can now write: ident = ppu.Greek.identifier # or # ident = ppu.Ελληνικά.identifier - `ParseResults` now has a new method `deepcopy()`, in addition to the current `copy()` method. `copy()` only makes a shallow copy - any contained `ParseResults` are copied as references - changes in the copy will be seen as changes in the original. In many cases, a shallow copy is sufficient, but some applications require a deep copy. `deepcopy()` makes a deeper copy: any contained `ParseResults` or other mappings or containers are built with copies from the original, and do not get changed if the original is later changed. Addresses issue #463, reported by Bryn Pickering. - Reworked `delimited_list` function into the new `DelimitedList` class. `DelimitedList` has the same constructor interface as `delimited_list`, and in this release, `delimited_list` changes from a function to a synonym for `DelimitedList`. `delimited_list` and the older `delimitedList` method will be deprecated in a future release, in favor of `DelimitedList`. - Error messages from `MatchFirst` and `Or` expressions will try to give more details if one of the alternatives matches better than the others, but still fails. Question raised in Issue #464 by msdemlei, thanks! - Added new class method `ParserElement.using_each`, to simplify code that creates a sequence of `Literals`, `Keywords`, or other `ParserElement` subclasses. For instance, to define suppressible punctuation, you would previously write: LPAR, RPAR, LBRACE, RBRACE, SEMI = map(Suppress, "(){};") You can now write: LPAR, RPAR, LBRACE, RBRACE, SEMI = Suppress.using_each("(){};") `using_each` will also accept optional keyword args, which it will pass through to the class initializer. Here is an expression for single-letter variable names that might be used in an algebraic expression: algebra_var = MatchFirst( Char.using_each(string.ascii_lowercase, as_keyword=True) ) - Added new builtin `python_quoted_string`, which will match any form of single-line or multiline quoted strings defined in Python. (Inspired by discussion with Andreas Schörgenhumer in Issue #421.) - Extended `expr[]` notation for repetition of `expr` to accept a slice, where the slice's stop value indicates a `stop_on` expression: test = "BEGIN aaa bbb ccc END" BEGIN, END = Keyword.using_each("BEGIN END".split()) body_word = Word(alphas) expr = BEGIN + Group(body_word[...:END]) + END # equivalent to # expr = BEGIN + Group(ZeroOrMore(body_word, stop_on=END)) + END print(expr.parse_string(test)) Prints: ['BEGIN', ['aaa', 'bbb', 'ccc'], 'END'] - `ParserElement.validate()` is deprecated. It predates the support for left-recursive parsers, and was prone to false positives (warning that a grammar was invalid when it was in fact valid). It will be removed in a future pyparsing release. In its place, developers should use debugging and analytical tools, such as `ParserElement.set_debug()` and `ParserElement.create_diagram()`. (Raised in Issue #444, thanks Andrea Micheli!) - Added bool `embed` argument to `ParserElement.create_diagram()`. When passed as True, the resulting diagram will omit the ``, ``, and `` tags so that it can be embedded in other HTML source. (Useful when embedding a call to `create_diagram()` in a PyScript HTML page.) - Added `recurse` argument to `ParserElement.set_debug` to set the debug flag on an expression and all of its sub-expressions. Requested by multimeric in Issue #399. - Added '·' (Unicode MIDDLE DOT) to the set of Latin1.identbodychars. - Fixed bug in `Word` when `max=2`. Also added performance enhancement when specifying `exact` argument. Reported in issue #409 by panda-34, nice catch! - `Word` arguments are now validated if `min` and `max` are both given, that `min` <= `max`; raises `ValueError` if values are invalid. - Fixed bug in srange, when parsing escaped '/' and '\' inside a range set. - Fixed exception messages for some `ParserElements` with custom names, which instead showed their contained expression names. - Fixed bug in pyparsing.common.url, when input URL is not alone on an input line. Fixes Issue #459, reported by David Kennedy. - Multiple added and corrected type annotations. With much help from Stephen Rosen, thanks! - Some documentation and error message clarifications on pyparsing's keyword logic, cited by Basil Peace. - General docstring cleanup for Sphinx doc generation, PRs submitted by Devin J. Pohly. A dirty job, but someone has to do it - much appreciated! - `invRegex.py` example renamed to `inv_regex.py` and updated to PEP-8 variable and method naming. PR submitted by Ross J. Duff, thanks! - Removed examples `sparser.py` and `pymicko.py`, since each included its own GPL license in the header. Since this conflicts with pyparsing's MIT license, they were removed from the distribution to avoid confusion among those making use of them in their own projects. Version 3.0.9 - May, 2022 ------------------------- - Added Unicode set `BasicMultilingualPlane` (may also be referenced as `BMP`) representing the Basic Multilingual Plane (Unicode characters up to code point 65535). Can be used to parse most language characters, but omits emojis, wingdings, etc. Raised in discussion with Dave Tapley (issue #392). - To address mypy confusion of `pyparsing.Optional` and `typing.Optional` resulting in `error: "_SpecialForm" not callable` message reported in issue #365, fixed the import in `exceptions.py`. Nice sleuthing by Iwan Aucamp and Dominic Davis-Foster, thank you! (Removed definitions of `OptionalType`, `DictType`, and `IterableType` and replaced them with `typing.Optional`, `typing.Dict`, and `typing.Iterable` throughout.) - Fixed typo in jinja2 template for railroad diagrams, thanks for the catch Nioub (issue #388). - Removed use of deprecated `pkg_resources` package in railroad diagramming code (issue #391). - Updated `bigquery_view_parser.py` example to parse examples at https://cloud.google.com/bigquery/docs/reference/legacy-sql Version 3.0.8 - April, 2022 --------------------------- - API CHANGE: modified `pyproject.toml` to require Python version 3.6.8 or later for pyparsing 3.x. Earlier minor versions of 3.6 fail in evaluating the `version_info` class (implemented using `typing.NamedTuple`). If you are using an earlier version of Python 3.6, you will need to use pyparsing 2.4.7. - Improved pyparsing import time by deferring regex pattern compiles. PR submitted by Anthony Sottile to fix issue #362, thanks! - Updated build to use flit, PR by Michał Górny, added `BUILDING.md` doc and removed old Windows build scripts - nice cleanup work! - More type-hinting added for all arithmetic and logical operator methods in `ParserElement`. PR from Kazantcev Andrey, thank you. - Fixed `infix_notation`'s definitions of `lpar` and `rpar`, to accept parse expressions such that they do not get suppressed in the parsed results. PR submitted by Philippe Prados, nice work. - Fixed bug in railroad diagramming with expressions containing `Combine` elements. Reported by Jeremy White, thanks! - Added `show_groups` argument to `create_diagram` to highlight grouped elements with an unlabeled bounding box. - Added `unicode_denormalizer.py` to the examples as a demonstration of how Python's interpreter will accept Unicode characters in identifiers, but normalizes them back to ASCII so that identifiers `print` and `𝕡𝓻ᵢ𝓃𝘁` and `𝖕𝒓𝗂𝑛ᵗ` are all equivalent. - Removed imports of deprecated `sre_constants` module for catching exceptions when compiling regular expressions. PR submitted by Serhiy Storchaka, thank you. Version 3.0.7 - January, 2022 ----------------------------- - Fixed bug #345, in which delimitedList changed expressions in place using `expr.streamline()`. Reported by Kim Gräsman, thanks! - Fixed bug #346, when a string of word characters was passed to WordStart or `WordEnd` instead of just taking the default value. Originally posted as a question by Parag on StackOverflow, good catch! - Fixed bug #350, in which `White` expressions could fail to match due to unintended whitespace-skipping. Reported by Fu Hanxi, thank you! - Fixed bug #355, when a `QuotedString` is defined with characters in its quoteChar string containing regex-significant characters such as ., *, ?, [, ], etc. - Fixed bug in `ParserElement.run_tests` where comments would be displayed using `with_line_numbers`. - Added optional "min" and "max" arguments to `delimited_list`. PR submitted by Marius, thanks! - Added new API change note in `whats_new_in_pyparsing_3_0_0`, regarding a bug fix in the `bool()` behavior of `ParseResults`. Prior to pyparsing 3.0.x, the `ParseResults` class implementation of `__bool__` would return `False` if the `ParseResults` item list was empty, even if it contained named results. In 3.0.0 and later, `ParseResults` will return `True` if either the item list is not empty *or* if the named results dict is not empty. # generate an empty ParseResults by parsing a blank string with # a ZeroOrMore result = Word(alphas)[...].parse_string("") print(result.as_list()) print(result.as_dict()) print(bool(result)) # add a results name to the result result["name"] = "empty result" print(result.as_list()) print(result.as_dict()) print(bool(result)) Prints: [] {} False [] {'name': 'empty result'} True In previous versions, the second call to `bool()` would return `False`. - Minor enhancement to Word generation of internal regular expression, to emit consecutive characters in range, such as "ab", as "ab", not "a-b". - Fixed character ranges for search terms using non-Western characters in booleansearchparser, PR submitted by tc-yu, nice work! - Additional type annotations on public methods. Version 3.0.6 - November, 2021 ------------------------------ - Added `suppress_warning()` method to individually suppress a warning on a specific ParserElement. Used to refactor `original_text_for` to preserve internal results names, which, while undocumented, had been adopted by some projects. - Fix bug when `delimited_list` was called with a str literal instead of a parse expression. Version 3.0.5 - November, 2021 ------------------------------ - Added return type annotations for `col`, `line`, and `lineno`. - Fixed bug when `warn_ungrouped_named_tokens_in_collection` warning was raised when assigning a results name to an `original_text_for` expression. (Issue #110, would raise warning in packaging.) - Fixed internal bug where `ParserElement.streamline()` would not return self if already streamlined. - Changed `run_tests()` output to default to not showing line and column numbers. If line numbering is desired, call with `with_line_numbers=True`. Also fixed minor bug where separating line was not included after a test failure. Version 3.0.4 - October, 2021 ----------------------------- - Fixed bug in which `Dict` classes did not correctly return tokens as nested `ParseResults`, reported by and fix identified by Bu Sun Kim, many thanks!!! - Documented API-changing side-effect of converting `ParseResults` to use `__slots__` to pre-define instance attributes. This means that code written like this (which was allowed in pyparsing 2.4.7): result = Word(alphas).parseString("abc") result.xyz = 100 now raises this Python exception: AttributeError: 'ParseResults' object has no attribute 'xyz' To add new attribute values to ParseResults object in 3.0.0 and later, you must assign them using indexed notation: result["xyz"] = 100 You will still be able to access this new value as an attribute or as an indexed item. - Fixed bug in railroad diagramming where the vertical limit would count all expressions in a group, not just those that would create visible railroad elements. Version 3.0.3 - October, 2021 ----------------------------- - Fixed regex typo in `one_of` fix for `as_keyword=True`. - Fixed a whitespace-skipping bug, Issue #319, introduced as part of the revert of the `LineStart` changes. Reported by Marc-Alexandre Côté, thanks! - Added header column labeling > 100 in `with_line_numbers` - some input lines are longer than others. Version 3.0.2 - October, 2021 ----------------------------- - Reverted change in behavior with `LineStart` and `StringStart`, which changed the interpretation of when and how `LineStart` and `StringStart` should match when a line starts with spaces. In 3.0.0, the `xxxStart` expressions were not really treated like expressions in their own right, but as modifiers to the following expression when used like `LineStart() + expr`, so that if there were whitespace on the line before `expr` (which would match in versions prior to 3.0.0), the match would fail. 3.0.0 implemented this by automatically promoting `LineStart() + expr` to `AtLineStart(expr)`, which broke existing parsers that did not expect `expr` to necessarily be right at the start of the line, but only be the first token found on the line. This was reported as a regression in Issue #317. In 3.0.2, pyparsing reverts to the previous behavior, but will retain the new `AtLineStart` and `AtStringStart` expression classes, so that parsers can chose whichever behavior applies in their specific instance. Specifically: # matches expr if it is the first token on the line # (allows for leading whitespace) LineStart() + expr # matches only if expr is found in column 1 AtLineStart(expr) - Performance enhancement to `one_of` to always generate an internal `Regex`, even if `caseless` or `as_keyword` args are given as `True` (unless explicitly disabled by passing `use_regex=False`). - `IndentedBlock` class now works with `recursive` flag. By default, the results parsed by an `IndentedBlock` are grouped. This can be disabled by constructing the `IndentedBlock` with `grouped=False`. Version 3.0.1 - October, 2021 ----------------------------- - Fixed bug where `Word(max=n)` did not match word groups less than length 'n'. Thanks to Joachim Metz for catching this! - Fixed bug where `ParseResults` accidentally created recursive contents. Joachim Metz on this one also! - Fixed bug where `warn_on_multiple_string_args_to_oneof` warning is raised even when not enabled. Version 3.0.0 - October, 2021 ----------------------------- - A consolidated list of all the changes in the 3.0.0 release can be found in `docs/whats_new_in_3_0_0.rst`. (https://github.com/pyparsing/pyparsing/blob/master/docs/whats_new_in_3_0_0.rst) Version 3.0.0.final - October, 2021 ----------------------------------- - Added support for python `-W` warning option to call `enable_all_warnings`() at startup. Also detects setting of `PYPARSINGENABLEALLWARNINGS` environment variable to any non-blank value. (If using `-Wd` for testing, but wishing to disable pyparsing warnings, add `-Wi:::pyparsing`.) - Fixed named results returned by `url` to match fields as they would be parsed using `urllib.parse.urlparse`. - Early response to `with_line_numbers` was positive, with some requested enhancements: . added a trailing "|" at the end of each line (to show presence of trailing spaces); can be customized using `eol_mark` argument . added expand_tabs argument, to control calling str.expandtabs (defaults to True to match `parseString`) . added mark_spaces argument to support display of a printing character in place of spaces, or Unicode symbols for space and tab characters . added mark_control argument to support highlighting of control characters using '.' or Unicode symbols, such as "␍" and "␊". - Modified helpers `common_html_entity` and `replace_html_entity()` to use the HTML entity definitions from `html.entities.html5`. - Updated the class diagram in the pyparsing docs directory, along with the supporting .puml file (PlantUML markup) used to create the diagram. - Added global method `autoname_elements()` to call `set_name()` on all locally defined `ParserElements` that haven't been explicitly named using `set_name()`, using their local variable name. Useful for setting names on multiple elements when creating a railroad diagram. a = pp.Literal("a") b = pp.Literal("b").set_name("bbb") pp.autoname_elements() `a` will get named "a", while `b` will keep its name "bbb". Version 3.0.0rc2 - October, 2021 -------------------------------- - Added `url` expression to `pyparsing_common`. (Sample code posted by Wolfgang Fahl, very nice!) This new expression has been added to the `urlExtractorNew.py` example, to show how it extracts URL fields into separate results names. - Added method to `pyparsing_test` to help debugging, `with_line_numbers`. Returns a string with line and column numbers corresponding to values shown when parsing with expr.set_debug(): data = """\ A 100""" expr = pp.Word(pp.alphanums).set_name("word").set_debug() print(ppt.with_line_numbers(data)) expr[...].parseString(data) prints: 1 1234567890 1: A 2: 100 Match word at loc 3(1,4) A ^ Matched word -> ['A'] Match word at loc 11(2,7) 100 ^ Matched word -> ['100'] - Added new example `cuneiform_python.py` to demonstrate creating a new Unicode range, and writing a Cuneiform->Python transformer (inspired by zhpy). - Fixed issue #272, reported by PhasecoreX, when `LineStart`() expressions would match input text that was not necessarily at the beginning of a line. As part of this fix, two new classes have been added: AtLineStart and AtStringStart. The following expressions are equivalent: LineStart() + expr and AtLineStart(expr) StringStart() + expr and AtStringStart(expr) [`LineStart` and `StringStart` changes reverted in 3.0.2.] - Fixed `ParseFatalExceptions` failing to override normal exceptions or expression matches in `MatchFirst` expressions. Addresses issue #251, reported by zyp-rgb. - Fixed bug in which `ParseResults` replaces a collection type value with an invalid type annotation (as a result of changed behavior in Python 3.9). Addresses issue #276, reported by Rob Shuler, thanks. - Fixed bug in `ParseResults` when calling `__getattr__` for special double-underscored methods. Now raises `AttributeError` for non-existent results when accessing a name starting with '__'. Addresses issue #208, reported by Joachim Metz. - Modified debug fail messages to include the expression name to make it easier to sync up match vs success/fail debug messages. Version 3.0.0rc1 - September, 2021 ---------------------------------- - Railroad diagrams have been reformatted: . creating diagrams is easier - call expr.create_diagram("diagram_output.html") create_diagram() takes 3 arguments: . the filename to write the diagram HTML . optional 'vertical' argument, to specify the minimum number of items in a path to be shown vertically; default=3 . optional 'show_results_names' argument, to specify whether results name annotations should be shown; default=False . every expression that gets a name using `setName()` gets separated out as a separate subdiagram . results names can be shown as annotations to diagram items . `Each`, `FollowedBy`, and `PrecededBy` elements get [ALL], [LOOKAHEAD], and [LOOKBEHIND] annotations . removed annotations for Suppress elements . some diagram cleanup when a grammar contains Forward elements . check out the examples make_diagram.py and railroad_diagram_demo.py - Type annotations have been added to most public API methods and classes. - Better exception messages to show full word where an exception occurred. Word(alphas, alphanums)[...].parseString("ab1 123", parseAll=True) Was: pyparsing.ParseException: Expected end of text, found '1' (at char 4), (line:1, col:5) Now: pyparsing.exceptions.ParseException: Expected end of text, found '123' (at char 4), (line:1, col:5) - Suppress can be used to suppress text skipped using "...". source = "lead in START relevant text END trailing text" start_marker = Keyword("START") end_marker = Keyword("END") find_body = Suppress(...) + start_marker + ... + end_marker print(find_body.parseString(source).dump()) Prints: ['START', 'relevant text ', 'END'] - _skipped: ['relevant text '] - New string constants `identchars` and `identbodychars` to help in defining identifier Word expressions Two new module-level strings have been added to help when defining identifiers, `identchars` and `identbodychars`. Instead of writing:: import pyparsing as pp identifier = pp.Word(pp.alphas + "_", pp.alphanums + "_") you will be able to write:: identifier = pp.Word(pp.identchars, pp.identbodychars) Those constants have also been added to all the Unicode string classes:: import pyparsing as pp ppu = pp.pyparsing_unicode cjk_identifier = pp.Word(ppu.CJK.identchars, ppu.CJK.identbodychars) greek_identifier = pp.Word(ppu.Greek.identchars, ppu.Greek.identbodychars) - Added a caseless parameter to the `CloseMatch` class to allow for casing to be ignored when checking for close matches. (Issue #281) (PR by Adrian Edwards, thanks!) - Fixed bug in Located class when used with a results name. (Issue #294) - Fixed bug in `QuotedString` class when the escaped quote string is not a repeated character. (Issue #263) - `parseFile()` and `create_diagram()` methods now will accept `pathlib.Path` arguments. Version 3.0.0b3 - August, 2021 ------------------------------ - PEP-8 compatible names are being introduced in pyparsing version 3.0! All methods such as `parseString` have been replaced with the PEP-8 compliant name `parse_string`. In addition, arguments such as `parseAll` have been renamed to `parse_all`. For backward-compatibility, synonyms for all renamed methods and arguments have been added, so that existing pyparsing parsers will not break. These synonyms will be removed in a future release. In addition, the Optional class has been renamed to Opt, since it clashes with the common typing.Optional type specifier that is used in the Python type annotations. A compatibility synonym is defined for now, but will be removed in a future release. - HUGE NEW FEATURE - Support for left-recursive parsers! Following the method used in Python's PEG parser, pyparsing now supports left-recursive parsers when left recursion is enabled. import pyparsing as pp pp.ParserElement.enable_left_recursion() # a common left-recursion definition # define a list of items as 'list + item | item' # BNF: # item_list := item_list item | item # item := word of alphas item_list = pp.Forward() item = pp.Word(pp.alphas) item_list <<= item_list + item | item item_list.run_tests("""\ To parse or not to parse that is the question """) Prints: ['To', 'parse', 'or', 'not', 'to', 'parse', 'that', 'is', 'the', 'question'] Great work contributed by Max Fischer! - `delimited_list` now supports an additional flag `allow_trailing_delim`, to optionally parse an additional delimiter at the end of the list. Contributed by Kazantcev Andrey, thanks! - Removed internal comparison of results values against b"", which raised a `BytesWarning` when run with `python -bb`. Fixes issue #271 reported by Florian Bruhin, thank you! - Fixed STUDENTS table in sql2dot.py example, fixes issue #261 reported by legrandlegrand - much better. - Python 3.5 will not be supported in the pyparsing 3 releases. This will allow for future pyparsing releases to add parameter type annotations, and to take advantage of dict key ordering in internal results name tracking. Version 3.0.0b2 - December, 2020 -------------------------------- - API CHANGE `locatedExpr` is being replaced by the class `Located`. `Located` has the same constructor interface as `locatedExpr`, but fixes bugs in the returned `ParseResults` when the searched expression contains multiple tokens, or has internal results names. `locatedExpr` is deprecated, and will be removed in a future release. Version 3.0.0b1 - November, 2020 -------------------------------- - API CHANGE Diagnostic flags have been moved to an enum, `pyparsing.Diagnostics`, and they are enabled through module-level methods: - `pyparsing.enable_diag()` - `pyparsing.disable_diag()` - `pyparsing.enable_all_warnings()` - API CHANGE Most previous `SyntaxWarnings` that were warned when using pyparsing classes incorrectly have been converted to `TypeError` and `ValueError` exceptions, consistent with Python calling conventions. All warnings warned by diagnostic flags have been converted from `SyntaxWarnings` to `UserWarnings`. - To support parsers that are intended to generate native Python collection types such as lists and dicts, the `Group` and `Dict` classes now accept an additional boolean keyword argument `aslist` and `asdict` respectively. See the `jsonParser.py` example in the `pyparsing/examples` source directory for how to return types as `ParseResults` and as Python collection types, and the distinctions in working with the different types. In addition parse actions that must return a value of list type (which would normally be converted internally to a `ParseResults`) can override this default behavior by returning their list wrapped in the new `ParseResults.List` class: # this parse action tries to return a list, but pyparsing # will convert to a ParseResults def return_as_list_but_still_get_parse_results(tokens): return tokens.asList() # this parse action returns the tokens as a list, and pyparsing will # maintain its list type in the final parsing results def return_as_list(tokens): return ParseResults.List(tokens.asList()) This is the mechanism used internally by the `Group` class when defined using `aslist=True`. - A new `IndentedBlock` class is introduced, to eventually replace the current `indentedBlock` helper method. The interface is largely the same, however, the new class manages its own internal indentation stack, so it is no longer necessary to maintain an external `indentStack` variable. - API CHANGE Added `cache_hit` keyword argument to debug actions. Previously, if packrat parsing was enabled, the debug methods were not called in the event of cache hits. Now these methods will be called, with an added argument `cache_hit=True`. If you are using packrat parsing and enable debug on expressions using a custom debug method, you can add the `cache_hit=False` keyword argument, and your method will be called on packrat cache hits. If you choose not to add this keyword argument, the debug methods will fail silently, behaving as they did previously. - When using `setDebug` with packrat parsing enabled, packrat cache hits will now be included in the output, shown with a leading '*'. (Previously, cache hits and responses were not included in debug output.) For those using custom debug actions, see the previous item regarding an optional API change for those methods. - `setDebug` output will also show more details about what expression is about to be parsed (the current line of text being parsed, and the current parse position): Match integer at loc 0(1,1) 1 2 3 ^ Matched integer -> ['1'] The current debug location will also be indicated after whitespace has been skipped (was previously inconsistent, reported in Issue #244, by Frank Goyens, thanks!). - Modified the repr() output for `ParseResults` to include the class name as part of the output. This is to clarify for new pyparsing users who misread the repr output as a tuple of a list and a dict. pyparsing results will now read like: ParseResults(['abc', 'def'], {'qty': 100}] instead of just: (['abc', 'def'], {'qty': 100}] - Fixed bugs in Each when passed `OneOrMore` or `ZeroOrMore` expressions: . first expression match could be enclosed in an extra nesting level . out-of-order expressions now handled correctly if mixed with required expressions . results names are maintained correctly for these expressions - Fixed traceback trimming, and added `ParserElement.verbose_traceback` save/restore to `reset_pyparsing_context()`. - Default string for `Word` expressions now also include indications of `min` and `max` length specification, if applicable, similar to regex length specifications: Word(alphas) -> "W:(A-Za-z)" Word(nums) -> "W:(0-9)" Word(nums, exact=3) -> "W:(0-9){3}" Word(nums, min=2) -> "W:(0-9){2,...}" Word(nums, max=3) -> "W:(0-9){1,3}" Word(nums, min=2, max=3) -> "W:(0-9){2,3}" For expressions of the `Char` class (similar to `Word(..., exact=1)`, the expression is simply the character range in parentheses: Char(nums) -> "(0-9)" Char(alphas) -> "(A-Za-z)" - Removed `copy()` override in `Keyword` class which did not preserve definition of ident chars from the original expression. PR #233 submitted by jgrey4296, thanks! - In addition to `pyparsing.__version__`, there is now also a `pyparsing.__version_info__`, following the same structure and field names as in `sys.version_info`. Version 3.0.0a2 - June, 2020 ---------------------------- - Summary of changes for 3.0.0 can be found in "What's New in Pyparsing 3.0.0" documentation. - API CHANGE Changed result returned when parsing using `countedArray`, the array items are no longer returned in a doubly-nested list. - An excellent new enhancement is the new railroad diagram generator for documenting pyparsing parsers: import pyparsing as pp from pyparsing.diagram import to_railroad, railroad_to_html from pathlib import Path # define a simple grammar for parsing street addresses such # as "123 Main Street" # number word... number = pp.Word(pp.nums).setName("number") name = pp.Word(pp.alphas).setName("word")[1, ...] parser = number("house_number") + name("street") parser.setName("street address") # construct railroad track diagram for this parser and # save as HTML rr = to_railroad(parser) Path('parser_rr_diag.html').write_text(railroad_to_html(rr)) Very nice work provided by Michael Milton, thanks a ton! - Enhanced default strings created for Word expressions, now showing string ranges if possible. `Word(alphas)` would formerly print as `W:(ABCD...)`, now prints as `W:(A-Za-z)`. - Added `ignoreWhitespace(recurse:bool = True)`` and added a recurse argument to `leaveWhitespace`, both added to provide finer control over pyparsing's whitespace skipping. Also contributed by Michael Milton. - The unicode range definitions for the various languages were recalculated by interrogating the unicodedata module by character name, selecting characters that contained that language in their Unicode name. (Issue #227) Also, pyparsing_unicode.Korean was renamed to Hangul (Korean is also defined as a synonym for compatibility). - Enhanced `ParseResults` dump() to show both results names and list subitems. Fixes bug where adding a results name would hide lower-level structures in the `ParseResults`. - Added new __diag__ warnings: "warn_on_parse_using_empty_Forward" - warns that a Forward has been included in a grammar, but no expression was attached to it using '<<=' or '<<' "warn_on_assignment_to_Forward" - warns that a Forward has been created, but was probably later overwritten by erroneously using '=' instead of '<<=' (this is a common mistake when using Forwards) (**currently not working on PyPy**) - Added `ParserElement`.recurse() method to make it simpler for grammar utilities to navigate through the tree of expressions in a pyparsing grammar. - Fixed bug in `ParseResults` repr() which showed all matching entries for a results name, even if `listAllMatches` was set to False when creating the `ParseResults` originally. Reported by Nicholas42 on GitHub, good catch! (Issue #205) - Modified refactored modules to use relative imports, as pointed out by setuptools project member jaraco, thank you! - Off-by-one bug found in the roman_numerals.py example, a bug that has been there for about 14 years! PR submitted by Jay Pedersen, nice catch! - A simplified Lua parser has been added to the examples (lua_parser.py). - Added make_diagram.py to the examples directory to demonstrate creation of railroad diagrams for selected pyparsing examples. Also restructured some examples to make their parsers importable without running their embedded tests. Version 3.0.0a1 - April, 2020 ----------------------------- - Removed Py2.x support and other deprecated features. Pyparsing now requires Python 3.5 or later. If you are using an earlier version of Python, you must use a Pyparsing 2.4.x version Deprecated features removed: . `ParseResults.asXML()` - if used for debugging, switch to using `ParseResults.dump()`; if used for data transfer, use `ParseResults.asDict()` to convert to a nested Python dict, which can then be converted to XML or JSON or other transfer format . `operatorPrecedence` synonym for `infixNotation` - convert to calling `infixNotation` . `commaSeparatedList` - convert to using pyparsing_common.comma_separated_list . `upcaseTokens` and `downcaseTokens` - convert to using `pyparsing_common.upcaseTokens` and `downcaseTokens` . __compat__.collect_all_And_tokens will not be settable to False to revert to pre-2.3.1 results name behavior - review use of names for `MatchFirst` and Or expressions containing And expressions, as they will return the complete list of parsed tokens, not just the first one. Use `__diag__.warn_multiple_tokens_in_named_alternation` to help identify those expressions in your parsers that will have changed as a result. - Removed support for running `python setup.py test`. The setuptools maintainers consider the test command deprecated (see ). To run the Pyparsing test, use the command `tox`. - API CHANGE: The staticmethod `ParseException.explain` has been moved to `ParseBaseException.explain_exception`, and a new `explain` instance method added to `ParseBaseException`. This will make calls to `explain` much more natural: try: expr.parseString("...") except ParseException as pe: print(pe.explain()) - POTENTIAL API CHANGE: `ZeroOrMore` expressions that have results names will now include empty lists for their name if no matches are found. Previously, no named result would be present. Code that tested for the presence of any expressions using "if name in results:" will now always return True. This code will need to change to "if name in results and results[name]:" or just "if results[name]:". Also, any parser unit tests that check the `asDict()` contents will now see additional entries for parsers having named `ZeroOrMore` expressions, whose values will be `[]`. - POTENTIAL API CHANGE: Fixed a bug in which calls to `ParserElement.setDefaultWhitespaceChars` did not change whitespace definitions on any pyparsing built-in expressions defined at import time (such as `quotedString`, or those defined in pyparsing_common). This would lead to confusion when built-in expressions would not use updated default whitespace characters. Now a call to `ParserElement.setDefaultWhitespaceChars` will also go and update all pyparsing built-ins to use the new default whitespace characters. (Note that this will only modify expressions defined within the pyparsing module.) Prompted by work on a StackOverflow question posted by jtiai. - Expanded __diag__ and __compat__ to actual classes instead of just namespaces, to add some helpful behavior: - enable() and .disable() methods to give extra help when setting or clearing flags (detects invalid flag names, detects when trying to set a __compat__ flag that is no longer settable). Use these methods now to set or clear flags, instead of directly setting to True or False. import pyparsing as pp pp.__diag__.enable("warn_multiple_tokens_in_named_alternation") - __diag__.enable_all_warnings() is another helper that sets all "warn*" diagnostics to True. pp.__diag__.enable_all_warnings() - added new warning, "warn_on_match_first_with_lshift_operator" to warn when using '<<' with a '|' `MatchFirst` operator, which will create an unintended expression due to precedence of operations. Example: This statement will erroneously define the `fwd` expression as just `expr_a`, even though `expr_a | expr_b` was intended, since '<<' operator has precedence over '|': fwd << expr_a | expr_b To correct this, use the '<<=' operator (preferred) or parentheses to override operator precedence: fwd <<= expr_a | expr_b or fwd << (expr_a | expr_b) - Cleaned up default tracebacks when getting a `ParseException` when calling `parseString`. Exception traces should now stop at the call in `parseString`, and not include the internal traceback frames. (If the full traceback is desired, then set `ParserElement`.verbose_traceback to True.) - Fixed `FutureWarnings` that sometimes are raised when '[' passed as a character to Word. - New namespace, assert methods and classes added to support writing unit tests. - `assertParseResultsEquals` - `assertParseAndCheckList` - `assertParseAndCheckDict` - `assertRunTestResults` - `assertRaisesParseException` - `reset_pyparsing_context` context manager, to restore pyparsing config settings - Enhanced error messages and error locations when parsing fails on the Keyword or `CaselessKeyword` classes due to the presence of a preceding or trailing keyword character. Surfaced while working with metaperl on issue #201. - Enhanced the Regex class to be compatible with re's compiled with the re-equivalent regex module. Individual expressions can be built with regex compiled expressions using: import pyparsing as pp import regex # would use regex for this expression integer_parser = pp.Regex(regex.compile(r'\d+')) Inspired by PR submitted by bjrnfrdnnd on GitHub, very nice! - Fixed handling of `ParseSyntaxExceptions` raised as part of Each expressions, when sub-expressions contain '-' backtrack suppression. As part of resolution to a question posted by John Greene on StackOverflow. - Potentially *huge* performance enhancement when parsing Word expressions built from pyparsing_unicode character sets. Word now internally converts ranges of consecutive characters to regex character ranges (converting "0123456789" to "0-9" for instance), resulting in as much as 50X improvement in performance! Work inspired by a question posted by Midnighter on StackOverflow. - Improvements in select_parser.py, to include new SQL syntax from SQLite. PR submitted by Robert Coup, nice work! - Fixed bug in `PrecededBy` which caused infinite recursion, issue #127 submitted by EdwardJB. - Fixed bug in `CloseMatch` where end location was incorrectly computed; and updated partial_gene_match.py example. - Fixed bug in `indentedBlock` with a parser using two different types of nested indented blocks with different indent values, but sharing the same indent stack, submitted by renzbagaporo. - Fixed bug in Each when using Regex, when Regex expression would get parsed twice; issue #183 submitted by scauligi, thanks! - `BigQueryViewParser.py` added to examples directory, PR submitted by Michael Smedberg, nice work! - booleansearchparser.py added to examples directory, PR submitted by xecgr. Builds on searchparser.py, adding support for '*' wildcards and non-Western alphabets. - Fixed bug in delta_time.py example, when using a quantity of seconds/minutes/hours/days > 999. - Fixed bug in regex definitions for real and sci_real expressions in pyparsing_common. Issue #194, reported by Michael Wayne Goodman, thanks! - Fixed `FutureWarning` raised beginning in Python 3.7 for Regex expressions containing '[' within a regex set. - Minor reformatting of output from `runTests` to make embedded comments more visible. - And finally, many thanks to those who helped in the restructuring of the pyparsing code base as part of this release. Pyparsing now has more standard package structure, more standard unit tests, and more standard code formatting (using black). Special thanks to jdufresne, klahnakoski, mattcarmody, and ckeygusuz, to name just a few. Version 2.4.7 - April, 2020 --------------------------- - Backport of selected fixes from 3.0.0 work: . Each bug with Regex expressions . And expressions not properly constructing with generator . Traceback abbreviation . Bug in delta_time example . Fix regexen in pyparsing_common.real and .sci_real . Avoid FutureWarning on Python 3.7 or later . Cleanup output in runTests if comments are embedded in test string Version 2.4.6 - December, 2019 ------------------------------ - Fixed typos in White mapping of whitespace characters, to use correct "\u" prefix instead of "u\". - Fix bug in left-associative ternary operators defined using infixNotation. First reported on StackOverflow by user Jeronimo. - Backport of pyparsing_test namespace from 3.0.0, including TestParseResultsAsserts mixin class defining unittest-helper methods: . def assertParseResultsEquals( self, result, expected_list=None, expected_dict=None, msg=None) . def assertParseAndCheckList( self, expr, test_string, expected_list, msg=None, verbose=True) . def assertParseAndCheckDict( self, expr, test_string, expected_dict, msg=None, verbose=True) . def assertRunTestResults( self, run_tests_report, expected_parse_results=None, msg=None) . def assertRaisesParseException(self, exc_type=ParseException, msg=None) To use the methods in this mixin class, declare your unittest classes as: from pyparsing import pyparsing_test as ppt class MyParserTest(ppt.TestParseResultsAsserts, unittest.TestCase): ... Version 2.4.5 - November, 2019 ------------------------------ - NOTE: final release compatible with Python 2.x. - Fixed issue with reading README.rst as part of setup.py's initialization of the project's long_description, with a non-ASCII space character causing errors when installing from source on platforms where UTF-8 is not the default encoding. Version 2.4.4 - November, 2019 -------------------------------- - Unresolved symbol reference in 2.4.3 release was masked by stdout buffering in unit tests, thanks for the prompt heads-up, Ned Batchelder! Version 2.4.3 - November, 2019 ------------------------------ - Fixed a bug in ParserElement.__eq__ that would for some parsers create a recursion error at parser definition time. Thanks to Michael Clerx for the assist. (Addresses issue #123) - Fixed bug in indentedBlock where a block that ended at the end of the input string could cause pyparsing to loop forever. Raised as part of discussion on StackOverflow with geckos. - Backports from pyparsing 3.0.0: . __diag__.enable_all_warnings() . Fixed bug in PrecededBy which caused infinite recursion, issue #127 . support for using regex-compiled RE to construct Regex expressions Version 2.4.2 - July, 2019 -------------------------- - Updated the shorthand notation that has been added for repetition expressions: expr[min, max], with '...' valid as a min or max value: - expr[...] and expr[0, ...] are equivalent to ZeroOrMore(expr) - expr[1, ...] is equivalent to OneOrMore(expr) - expr[n, ...] or expr[n,] is equivalent to expr*n + ZeroOrMore(expr) (read as "n or more instances of expr") - expr[..., n] is equivalent to expr*(0, n) - expr[m, n] is equivalent to expr*(m, n) Note that expr[..., n] and expr[m, n] do not raise an exception if more than n exprs exist in the input stream. If this behavior is desired, then write expr[..., n] + ~expr. Better interpretation of [...] as ZeroOrMore raised by crowsonkb, thanks for keeping me in line! If upgrading from 2.4.1 or 2.4.1.1 and you have used `expr[...]` for `OneOrMore(expr)`, it must be updated to `expr[1, ...]`. - The defaults on all the `__diag__` switches have been set to False, to avoid getting alarming warnings. To use these diagnostics, set them to True after importing pyparsing. Example: import pyparsing as pp pp.__diag__.warn_multiple_tokens_in_named_alternation = True - Fixed bug introduced by the use of __getitem__ for repetition, overlooking Python's legacy implementation of iteration by sequentially calling __getitem__ with increasing numbers until getting an IndexError. Found during investigation of problem reported by murlock, merci! Version 2.4.2a1 - July, 2019 ---------------------------- It turns out I got the meaning of `[...]` absolutely backwards, so I've deleted 2.4.1 and am repushing this release as 2.4.2a1 for people to give it a try before I can call it ready to go. The `expr[...]` notation was pushed out to be synonymous with `OneOrMore(expr)`, but this is really counter to most Python notations (and even other internal pyparsing notations as well). It should have been defined to be equivalent to ZeroOrMore(expr). - Changed [...] to emit ZeroOrMore instead of OneOrMore. - Removed code that treats ParserElements like iterables. - Change all __diag__ switches to False. Version 2.4.1.1 - July 24, 2019 ------------------------------- This is a re-release of version 2.4.1 to restore the release history in PyPI, since the 2.4.1 release was deleted. There are 3 known issues in this release, which are fixed in the upcoming 2.4.2: - API change adding support for `expr[...]` - the original code in 2.4.1 incorrectly implemented this as OneOrMore. Code using this feature under this release should explicitly use `expr[0, ...]` for ZeroOrMore and `expr[1, ...]` for OneOrMore. In 2.4.2 you will be able to write `expr[...]` equivalent to `ZeroOrMore(expr)`. - Bug if composing And, Or, MatchFirst, or Each expressions using an expression. This only affects code which uses explicit expression construction using the And, Or, etc. classes instead of using overloaded operators '+', '^', and so on. If constructing an And using a single expression, you may get an error that "cannot multiply ParserElement by 0 or (0, 0)" or a Python `IndexError`. Change code like cmd = Or(Word(alphas)) to cmd = Or([Word(alphas)]) (Note that this is not the recommended style for constructing Or expressions.) - Some newly-added `__diag__` switches are enabled by default, which may give rise to noisy user warnings for existing parsers. You can disable them using: import pyparsing as pp pp.__diag__.warn_multiple_tokens_in_named_alternation = False pp.__diag__.warn_ungrouped_named_tokens_in_collection = False pp.__diag__.warn_name_set_on_empty_Forward = False pp.__diag__.warn_on_multiple_string_args_to_oneof = False pp.__diag__.enable_debug_on_named_expressions = False In 2.4.2 these will all be set to False by default. Version 2.4.1 - July, 2019 -------------------------- - NOTE: Deprecated functions and features that will be dropped in pyparsing 2.5.0 (planned next release): . support for Python 2 - ongoing users running with Python 2 can continue to use pyparsing 2.4.1 . ParseResults.asXML() - if used for debugging, switch to using ParseResults.dump(); if used for data transfer, use ParseResults.asDict() to convert to a nested Python dict, which can then be converted to XML or JSON or other transfer format . operatorPrecedence synonym for infixNotation - convert to calling infixNotation . commaSeparatedList - convert to using pyparsing_common.comma_separated_list . upcaseTokens and downcaseTokens - convert to using pyparsing_common.upcaseTokens and downcaseTokens . __compat__.collect_all_And_tokens will not be settable to False to revert to pre-2.3.1 results name behavior - review use of names for MatchFirst and Or expressions containing And expressions, as they will return the complete list of parsed tokens, not just the first one. Use __diag__.warn_multiple_tokens_in_named_alternation (described below) to help identify those expressions in your parsers that will have changed as a result. - A new shorthand notation has been added for repetition expressions: expr[min, max], with '...' valid as a min or max value: - expr[...] is equivalent to OneOrMore(expr) - expr[0, ...] is equivalent to ZeroOrMore(expr) - expr[1, ...] is equivalent to OneOrMore(expr) - expr[n, ...] or expr[n,] is equivalent to expr*n + ZeroOrMore(expr) (read as "n or more instances of expr") - expr[..., n] is equivalent to expr*(0, n) - expr[m, n] is equivalent to expr*(m, n) Note that expr[..., n] and expr[m, n] do not raise an exception if more than n exprs exist in the input stream. If this behavior is desired, then write expr[..., n] + ~expr. - '...' can also be used as short hand for SkipTo when used in adding parse expressions to compose an And expression. Literal('start') + ... + Literal('end') And(['start', ..., 'end']) are both equivalent to: Literal('start') + SkipTo('end')("_skipped*") + Literal('end') The '...' form has the added benefit of not requiring repeating the skip target expression. Note that the skipped text is returned with '_skipped' as a results name, and that the contents of `_skipped` will contain a list of text from all `...`s in the expression. - '...' can also be used as a "skip forward in case of error" expression: expr = "start" + (Word(nums).setName("int") | ...) + "end" expr.parseString("start 456 end") ['start', '456', 'end'] expr.parseString("start 456 foo 789 end") ['start', '456', 'foo 789 ', 'end'] - _skipped: ['foo 789 '] expr.parseString("start foo end") ['start', 'foo ', 'end'] - _skipped: ['foo '] expr.parseString("start end") ['start', '', 'end'] - _skipped: ['missing '] Note that in all the error cases, the '_skipped' results name is present, showing a list of the extra or missing items. This form is only valid when used with the '|' operator. - Improved exception messages to show what was actually found, not just what was expected. word = pp.Word(pp.alphas) pp.OneOrMore(word).parseString("aaa bbb 123", parseAll=True) Former exception message: pyparsing.ParseException: Expected end of text (at char 8), (line:1, col:9) New exception message: pyparsing.ParseException: Expected end of text, found '1' (at char 8), (line:1, col:9) - Added diagnostic switches to help detect and warn about common parser construction mistakes, or enable additional parse debugging. Switches are attached to the pyparsing.__diag__ namespace object: - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results name is defined on a MatchFirst or Or expression with one or more And subexpressions (default=True) - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results name is defined on a containing expression with ungrouped subexpressions that also have results names (default=True) - warn_name_set_on_empty_Forward - flag to enable warnings when a Forward is defined with a results name, but has no contents defined (default=False) - warn_on_multiple_string_args_to_oneof - flag to enable warnings when oneOf is incorrectly called with multiple str arguments (default=True) - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent calls to ParserElement.setName() (default=False) warn_multiple_tokens_in_named_alternation is intended to help those who currently have set __compat__.collect_all_And_tokens to False as a workaround for using the pre-2.3.1 code with named MatchFirst or Or expressions containing an And expression. - Added ParseResults.from_dict classmethod, to simplify creation of a ParseResults with results names using a dict, which may be nested. This makes it easy to add a sub-level of named items to the parsed tokens in a parse action. - Added asKeyword argument (default=False) to oneOf, to force keyword-style matching on the generated expressions. - ParserElement.runTests now accepts an optional 'file' argument to redirect test output to a file-like object (such as a StringIO, or opened file). Default is to write to sys.stdout. - conditionAsParseAction is a helper method for constructing a parse action method from a predicate function that simply returns a boolean result. Useful for those places where a predicate cannot be added using addCondition, but must be converted to a parse action (such as in infixNotation). May be used as a decorator if default message and exception types can be used. See ParserElement.addCondition for more details about the expected signature and behavior for predicate condition methods. - While investigating issue #93, I found that Or and addCondition could interact to select an alternative that is not the longest match. This is because Or first checks all alternatives for matches without running attached parse actions or conditions, orders by longest match, and then rechecks for matches with conditions and parse actions. Some expressions, when checking with conditions, may end up matching on a shorter token list than originally matched, but would be selected because of its original priority. This matching code has been expanded to do more extensive searching for matches when a second-pass check matches a smaller list than in the first pass. - Fixed issue #87, a regression in indented block. Reported by Renz Bagaporo, who submitted a very nice repro example, which makes the bug-fixing process a lot easier, thanks! - Fixed MemoryError issue #85 and #91 with str generation for Forwards. Thanks decalage2 and Harmon758 for your patience. - Modified setParseAction to accept None as an argument, indicating that all previously-defined parse actions for the expression should be cleared. - Modified pyparsing_common.real and sci_real to parse reals without leading integer digits before the decimal point, consistent with Python real number formats. Original PR #98 submitted by ansobolev. - Modified runTests to call postParse function before dumping out the parsed results - allows for postParse to add further results, such as indications of additional validation success/failure. - Updated statemachine example: refactored state transitions to use overridden classmethods; added Mixin class to simplify definition of application classes that "own" the state object and delegate to it to model state-specific properties and behavior. - Added example nested_markup.py, showing a simple wiki markup with nested markup directives, and illustrating the use of '...' for skipping over input to match the next expression. (This example uses syntax that is not valid under Python 2.) - Rewrote delta_time.py example (renamed from deltaTime.py) to fix some omitted formats and upgrade to latest pyparsing idioms, beginning with writing an actual BNF. - With the help and encouragement from several contributors, including Matěj Cepl and Cengiz Kaygusuz, I've started cleaning up the internal coding styles in core pyparsing, bringing it up to modern coding practices from pyparsing's early development days dating back to 2003. Whitespace has been largely standardized along PEP8 guidelines, removing extra spaces around parentheses, and adding them around arithmetic operators and after colons and commas. I was going to hold off on doing this work until after 2.4.1, but after cleaning up a few trial classes, the difference was so significant that I continued on to the rest of the core code base. This should facilitate future work and submitted PRs, allowing them to focus on substantive code changes, and not get sidetracked by whitespace issues. Version 2.4.0 - April, 2019 --------------------------- - Well, it looks like the API change that was introduced in 2.3.1 was more drastic than expected, so for a friendlier forward upgrade path, this release: . Bumps the current version number to 2.4.0, to reflect this incompatible change. . Adds a pyparsing.__compat__ object for specifying compatibility with future breaking changes. . Conditionalizes the API-breaking behavior, based on the value pyparsing.__compat__.collect_all_And_tokens. By default, this value will be set to True, reflecting the new bugfixed behavior. To set this value to False, add to your code: import pyparsing pyparsing.__compat__.collect_all_And_tokens = False . User code that is dependent on the pre-bugfix behavior can restore it by setting this value to False. In 2.5 and later versions, the conditional code will be removed and setting the flag to True or False in these later versions will have no effect. - Updated unitTests.py and simple_unit_tests.py to be compatible with "python setup.py test". To run tests using setup, do: python setup.py test python setup.py test -s unitTests.suite python setup.py test -s simple_unit_tests.suite Prompted by issue #83 and PR submitted by bdragon28, thanks. - Fixed bug in runTests handling '\n' literals in quoted strings. - Added tag_body attribute to the start tag expressions generated by makeHTMLTags, so that you can avoid using SkipTo to roll your own tag body expression: a, aEnd = pp.makeHTMLTags('a') link = a + a.tag_body("displayed_text") + aEnd for t in s.searchString(html_page): print(t.displayed_text, '->', t.startA.href) - indentedBlock failure handling was improved; PR submitted by TMiguelT, thanks! - Address Py2 incompatibility in simpleUnitTests, plus explain() and Forward str() cleanup; PRs graciously provided by eswald. - Fixed docstring with embedded '\w', which creates SyntaxWarnings in Py3.8, issue #80. - Examples: - Added example parser for rosettacode.org tutorial compiler. - Added example to show how an HTML table can be parsed into a collection of Python lists or dicts, one per row. - Updated SimpleSQL.py example to handle nested selects, reworked 'where' expression to use infixNotation. - Added include_preprocessor.py, similar to macroExpander.py. - Examples using makeHTMLTags use new tag_body expression when retrieving a tag's body text. - Updated examples that are runnable as unit tests: python setup.py test -s examples.antlr_grammar_tests python setup.py test -s examples.test_bibparse Version 2.3.1 - January, 2019 ----------------------------- - POSSIBLE API CHANGE: this release fixes a bug when results names were attached to a MatchFirst or Or object containing an And object. Previously, a results name on an And object within an enclosing MatchFirst or Or could return just the first token in the And. Now, all the tokens matched by the And are correctly returned. This may result in subtle changes in the tokens returned if you have this condition in your pyparsing scripts. - New staticmethod ParseException.explain() to help diagnose parse exceptions by showing the failing input line and the trace of ParserElements in the parser leading up to the exception. explain() returns a multiline string listing each element by name. (This is still an experimental method, and the method signature and format of the returned string may evolve over the next few releases.) Example: # define a parser to parse an integer followed by an # alphabetic word expr = pp.Word(pp.nums).setName("int") + pp.Word(pp.alphas).setName("word") try: # parse a string with a numeric second value instead of alpha expr.parseString("123 355") except pp.ParseException as pe: print(pp.ParseException.explain(pe)) Prints: 123 355 ^ ParseException: Expected word (at char 4), (line:1, col:5) __main__.ExplainExceptionTest pyparsing.And - {int word} pyparsing.Word - word explain() will accept any exception type and will list the function names and parse expressions in the stack trace. This is especially useful when an exception is raised in a parse action. Note: explain() is only supported under Python 3. - Fix bug in dictOf which could match an empty sequence, making it infinitely loop if wrapped in a OneOrMore. - Added unicode sets to pyparsing_unicode for Latin-A and Latin-B ranges. - Added ability to define custom unicode sets as combinations of other sets using multiple inheritance. class Turkish_set(pp.pyparsing_unicode.Latin1, pp.pyparsing_unicode.LatinA): pass turkish_word = pp.Word(Turkish_set.alphas) - Updated state machine import examples, with state machine demos for: . traffic light . library book checkin/checkout . document review/approval In the traffic light example, you can use the custom 'statemachine' keyword to define the states for a traffic light, and have the state classes auto-generated for you: statemachine TrafficLightState: Red -> Green Green -> Yellow Yellow -> Red Similar for state machines with named transitions, like the library book state example: statemachine LibraryBookState: New -(shelve)-> Available Available -(reserve)-> OnHold OnHold -(release)-> Available Available -(checkout)-> CheckedOut CheckedOut -(checkin)-> Available Once the classes are defined, then additional Python code can reference those classes to add class attributes, instance methods, etc. See the examples in examples/statemachine - Added an example parser for the decaf language. This language is used in CS compiler classes in many colleges and universities. - Fixup of docstrings to Sphinx format, inclusion of test files in the source package, and convert markdown to rst throughout the distribution, great job by Matěj Cepl! - Expanded the whitespace characters recognized by the White class to include all unicode defined spaces. Suggested in Issue #51 by rtkjbillo. - Added optional postParse argument to ParserElement.runTests() to add a custom callback to be called for test strings that parse successfully. Useful for running tests that do additional validation or processing on the parsed results. See updated chemicalFormulas.py example. - Removed distutils fallback in setup.py. If installing the package fails, please update to the latest version of setuptools. Plus overall project code cleanup (CRLFs, whitespace, imports, etc.), thanks Jon Dufresne! - Fix bug in CaselessKeyword, to make its behavior consistent with Keyword(caseless=True). Fixes Issue #65 reported by telesphore. Version 2.3.0 - October, 2018 ----------------------------- - NEW SUPPORT FOR UNICODE CHARACTER RANGES This release introduces the pyparsing_unicode namespace class, defining a series of language character sets to simplify the definition of alphas, nums, alphanums, and printables in the following language sets: . Arabic . Chinese . Cyrillic . Devanagari . Greek . Hebrew . Japanese (including Kanji, Katakana, and Hirigana subsets) . Korean . Latin1 (includes 7 and 8-bit Latin characters) . Thai . CJK (combination of Chinese, Japanese, and Korean sets) For example, your code can define words using: korean_word = Word(pyparsing_unicode.Korean.alphas) See their use in the updated examples greetingInGreek.py and greetingInKorean.py. This namespace class also offers access to these sets using their unicode identifiers. - POSSIBLE API CHANGE: Fixed bug where a parse action that explicitly returned the input ParseResults could add another nesting level in the results if the current expression had a results name. vals = pp.OneOrMore(pp.pyparsing_common.integer)("int_values") def add_total(tokens): tokens['total'] = sum(tokens) return tokens # this line can be removed vals.addParseAction(add_total) print(vals.parseString("244 23 13 2343").dump()) Before the fix, this code would print (note the extra nesting level): [244, 23, 13, 2343] - int_values: [244, 23, 13, 2343] - int_values: [244, 23, 13, 2343] - total: 2623 - total: 2623 With the fix, this code now prints: [244, 23, 13, 2343] - int_values: [244, 23, 13, 2343] - total: 2623 This fix will change the structure of ParseResults returned if a program defines a parse action that returns the tokens that were sent in. This is not necessary, and statements like "return tokens" in the example above can be safely deleted prior to upgrading to this release, in order to avoid the bug and get the new behavior. Reported by seron in Issue #22, nice catch! - POSSIBLE API CHANGE: Fixed a related bug where a results name erroneously created a second level of hierarchy in the returned ParseResults. The intent for accumulating results names into ParseResults is that, in the absence of Group'ing, all names get merged into a common namespace. This allows us to write: key_value_expr = (Word(alphas)("key") + '=' + Word(nums)("value")) result = key_value_expr.parseString("a = 100") and have result structured as {"key": "a", "value": "100"} instead of [{"key": "a"}, {"value": "100"}]. However, if a named expression is used in a higher-level non-Group expression that *also* has a name, a false sub-level would be created in the namespace: num = pp.Word(pp.nums) num_pair = ("[" + (num("A") + num("B"))("values") + "]") U = num_pair.parseString("[ 10 20 ]") print(U.dump()) Since there is no grouping, "A", "B", and "values" should all appear at the same level in the results, as: ['[', '10', '20', ']'] - A: '10' - B: '20' - values: ['10', '20'] Instead, an extra level of "A" and "B" show up under "values": ['[', '10', '20', ']'] - A: '10' - B: '20' - values: ['10', '20'] - A: '10' - B: '20' This bug has been fixed. Now, if this hierarchy is desired, then a Group should be added: num_pair = ("[" + pp.Group(num("A") + num("B"))("values") + "]") Giving: ['[', ['10', '20'], ']'] - values: ['10', '20'] - A: '10' - B: '20' But in no case should "A" and "B" appear in multiple levels. This bug-fix fixes that. If you have current code which relies on this behavior, then add or remove Groups as necessary to get your intended results structure. Reported by Athanasios Anastasiou. - IndexError's raised in parse actions will get explicitly reraised as ParseExceptions that wrap the original IndexError. Since IndexError sometimes occurs as part of pyparsing's normal parsing logic, IndexErrors that are raised during a parse action may have gotten silently reinterpreted as parsing errors. To retain the information from the IndexError, these exceptions will now be raised as ParseExceptions that reference the original IndexError. This wrapping will only be visible when run under Python3, since it emulates "raise ... from ..." syntax. Addresses Issue #4, reported by guswns0528. - Added Char class to simplify defining expressions of a single character. (Char("abc") is equivalent to Word("abc", exact=1)) - Added class PrecededBy to perform lookbehind tests. PrecededBy is used in the same way as FollowedBy, passing in an expression that must occur just prior to the current parse location. For fixed-length expressions like a Literal, Keyword, Char, or a Word with an `exact` or `maxLen` length given, `PrecededBy(expr)` is sufficient. For varying length expressions like a Word with no given maximum length, `PrecededBy` must be constructed with an integer `retreat` argument, as in `PrecededBy(Word(alphas, nums), retreat=10)`, to specify the maximum number of characters pyparsing must look backward to make a match. pyparsing will check all the values from 1 up to retreat characters back from the current parse location. When stepping backwards through the input string, PrecededBy does *not* skip over whitespace. PrecededBy can be created with a results name so that, even though it always returns an empty parse result, the result *can* include named results. Idea first suggested in Issue #30 by Freakwill. - Updated FollowedBy to accept expressions that contain named results, so that results names defined in the lookahead expression will be returned, even though FollowedBy always returns an empty list. Inspired by the same feature implemented in PrecededBy. Version 2.2.2 - September, 2018 ------------------------------- - Fixed bug in SkipTo, if a SkipTo expression that was skipping to an expression that returned a list (such as an And), and the SkipTo was saved as a named result, the named result could be saved as a ParseResults - should always be saved as a string. Issue #28, reported by seron. - Added simple_unit_tests.py, as a collection of easy-to-follow unit tests for various classes and features of the pyparsing library. Primary intent is more to be instructional than actually rigorous testing. Complex tests can still be added in the unitTests.py file. - New features added to the Regex class: - optional asGroupList parameter, returns all the capture groups as a list - optional asMatch parameter, returns the raw re.match result - new sub(repl) method, which adds a parse action calling re.sub(pattern, repl, parsed_result). Simplifies creating Regex expressions to be used with transformString. Like re.sub, repl may be an ordinary string (similar to using pyparsing's replaceWith), or may contain references to capture groups by group number, or may be a callable that takes an re match group and returns a string. For instance: expr = pp.Regex(r"([Hh]\d):\s*(.*)").sub(r"<\1>\2") expr.transformString("h1: This is the title") will return

This is the title

- Fixed omission of LICENSE file in source tarball, also added CODE_OF_CONDUCT.md per GitHub community standards. Version 2.2.1 - September, 2018 ------------------------------- - Applied changes necessary to migrate hosting of pyparsing source over to GitHub. Many thanks for help and contributions from hugovk, jdufresne, and cngkaygusuz among others through this transition, sorry it took me so long! - Fixed import of collections.abc to address DeprecationWarnings in Python 3.7. - Updated oc.py example to support function calls in arithmetic expressions; fixed regex for '==' operator; and added packrat parsing. Raised on the pyparsing wiki by Boris Marin, thanks! - Fixed bug in select_parser.py example, group_by_terms was not reported. Reported on SF bugs by Adam Groszer, thanks Adam! - Added "Getting Started" section to the module docstring, to guide new users to the most common starting points in pyparsing's API. - Fixed bug in Literal and Keyword classes, which erroneously raised IndexError instead of ParseException. Version 2.2.0 - March, 2017 --------------------------- - Bumped minor version number to reflect compatibility issues with OneOrMore and ZeroOrMore bugfixes in 2.1.10. (2.1.10 fixed a bug that was introduced in 2.1.4, but the fix could break code written against 2.1.4 - 2.1.9.) - Updated setup.py to address recursive import problems now that pyparsing is part of 'packaging' (used by setuptools). Patch submitted by Joshua Root, much thanks! - Fixed KeyError issue reported by Yann Bizeul when using packrat parsing in the Graphite time series database, thanks Yann! - Fixed incorrect usages of '\' in literals, as described in https://docs.python.org/3/whatsnew/3.6.html#deprecated-python-behavior Patch submitted by Ville Skyttä - thanks! - Minor internal change when using '-' operator, to be compatible with ParserElement.streamline() method. - Expanded infixNotation to accept a list or tuple of parse actions to attach to an operation. - New unit test added for dill support for storing pyparsing parsers. Ordinary Python pickle can be used to pickle pyparsing parsers as long as they do not use any parse actions. The 'dill' module is an extension to pickle which *does* support pickling of attached parse actions. Version 2.1.10 - October, 2016 ------------------------------- - Fixed bug in reporting named parse results for ZeroOrMore expressions, thanks Ethan Nash for reporting this! - Fixed behavior of LineStart to be much more predictable. LineStart can now be used to detect if the next parse position is col 1, factoring in potential leading whitespace (which would cause LineStart to fail). Also fixed a bug in col, which is used in LineStart, where '\n's were erroneously considered to be column 1. - Added support for multiline test strings in runTests. - Fixed bug in ParseResults.dump when keys were not strings. Also changed display of string values to show them in quotes, to help distinguish parsed numeric strings from parsed integers that have been converted to Python ints. Version 2.1.9 - September, 2016 ------------------------------- - Added class CloseMatch, a variation on Literal which matches "close" matches, that is, strings with at most 'n' mismatching characters. - Fixed bug in Keyword.setDefaultKeywordChars(), reported by Kobayashi Shinji - nice catch, thanks! - Minor API change in pyparsing_common. Renamed some of the common expressions to PEP8 format (to be consistent with the other pyparsing_common expressions): . signedInteger -> signed_integer . sciReal -> sci_real Also, in trying to stem the API bloat of pyparsing, I've copied some of the global expressions and helper parse actions into pyparsing_common, with the originals to be deprecated and removed in a future release: . commaSeparatedList -> pyparsing_common.comma_separated_list . upcaseTokens -> pyparsing_common.upcaseTokens . downcaseTokens -> pyparsing_common.downcaseTokens (I don't expect any other expressions, like the comment expressions, quotedString, or the Word-helping strings like alphas, nums, etc. to migrate to pyparsing_common - they are just too pervasive. As for the PEP8 vs camelCase naming, all the expressions are PEP8, while the parse actions in pyparsing_common are still camelCase. It's a small step - when pyparsing 3.0 comes around, everything will change to PEP8 snake case.) - Fixed Python3 compatibility bug when using dict keys() and values() in ParseResults.getName(). - After some prodding, I've reworked the unitTests.py file for pyparsing over the past few releases. It uses some variations on unittest to handle my testing style. The test now: . auto-discovers its test classes (while maintining their order of definition) . suppresses voluminous 'print' output for tests that pass Version 2.1.8 - August, 2016 ---------------------------- - Fixed issue in the optimization to _trim_arity, when the full stacktrace is retrieved to determine if a TypeError is raised in pyparsing or in the caller's parse action. Code was traversing the full stacktrace, and potentially encountering UnicodeDecodeError. - Fixed bug in ParserElement.inlineLiteralsUsing, causing infinite loop with Suppress. - Fixed bug in Each, when merging named results from multiple expressions in a ZeroOrMore or OneOrMore. Also fixed bug when ZeroOrMore expressions were erroneously treated as required expressions in an Each expression. - Added a few more inline doc examples. - Improved use of runTests in several example scripts. Version 2.1.7 - August, 2016 ---------------------------- - Fixed regression reported by Andrea Censi (surfaced in PyContracts tests) when using ParseSyntaxExceptions (raised when using operator '-') with packrat parsing. - Minor fix to oneOf, to accept all iterables, not just space-delimited strings and lists. (If you have a list or set of strings, it is not necessary to concat them using ' '.join to pass them to oneOf, oneOf will accept the list or set or generator directly.) Version 2.1.6 - August, 2016 ---------------------------- - *Major packrat upgrade*, inspired by patch provided by Tal Einat - many, many, thanks to Tal for working on this! Tal's tests show faster parsing performance (2X in some tests), *and* memory reduction from 3GB down to ~100MB! Requires no changes to existing code using packratting. (Uses OrderedDict, available in Python 2.7 and later. For Python 2.6 users, will attempt to import from ordereddict backport. If not present, will implement pure-Python Fifo dict.) - Minor API change - to better distinguish between the flexible numeric types defined in pyparsing_common, I've changed "numeric" (which parsed numbers of different types and returned int for ints, float for floats, etc.) and "number" (which parsed numbers of int or float type, and returned all floats) to "number" and "fnumber" respectively. I hope the "f" prefix of "fnumber" will be a better indicator of its internal conversion of parsed values to floats, while the generic "number" is similar to the flexible number syntax in other languages. Also fixed a bug in pyparsing_common.numeric (now renamed to pyparsing_common.number), integers were parsed and returned as floats instead of being retained as ints. - Fixed bug in upcaseTokens and downcaseTokens introduced in 2.1.5, when the parse action was used in conjunction with results names. Reported by Steven Arcangeli from the dql project, thanks for your patience, Steven! - Major change to docs! After seeing some comments on reddit about general issue with docs of Python modules, and thinking that I'm a little overdue in doing some doc tuneup on pyparsing, I decided to following the suggestions of the redditor and add more inline examples to the pyparsing reference documentation. I hope this addition will clarify some of the more common questions people have, especially when first starting with pyparsing/Python. - Deprecated ParseResults.asXML. I've never been too happy with this method, and it usually forces some unnatural code in the parsers in order to get decent tag names. The amount of guesswork that asXML has to do to try to match names with values should have been a red flag from day one. If you are using asXML, you will need to implement your own ParseResults->XML serialization. Or consider migrating to a more current format such as JSON (which is very easy to do: results_as_json = json.dumps(parse_result.asDict()) Hopefully, when I remove this code in a future version, I'll also be able to simplify some of the craziness in ParseResults, which IIRC was only there to try to make asXML work. - Updated traceParseAction parse action decorator to show the repr of the input and output tokens, instead of the str format, since str has been simplified to just show the token list content. (The change to ParseResults.__str__ occurred in pyparsing 2.0.4, but it seems that didn't make it into the release notes - sorry! Too many users, especially beginners, were confused by the "([token_list], {names_dict})" str format for ParseResults, thinking they were getting a tuple containing a list and a dict. The full form can be seen if using repr().) For tracing tokens in and out of parse actions, the more complete repr form provides important information when debugging parse actions. Version 2.1.5 - June, 2016 ------------------------------ - Added ParserElement.split() generator method, similar to re.split(). Includes optional arguments maxsplit (to limit the number of splits), and includeSeparators (to include the separating matched text in the returned output, default=False). - Added a new parse action construction helper tokenMap, which will apply a function and optional arguments to each element in a ParseResults. So this parse action: def lowercase_all(tokens): return [str(t).lower() for t in tokens] OneOrMore(Word(alphas)).setParseAction(lowercase_all) can now be written: OneOrMore(Word(alphas)).setParseAction(tokenMap(str.lower)) Also simplifies writing conversion parse actions like: integer = Word(nums).setParseAction(lambda t: int(t[0])) to just: integer = Word(nums).setParseAction(tokenMap(int)) If additional arguments are necessary, they can be included in the call to tokenMap, as in: hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16)) - Added more expressions to pyparsing_common: . IPv4 and IPv6 addresses (including long, short, and mixed forms of IPv6) . MAC address . ISO8601 date and date time strings (with named fields for year, month, etc.) . UUID (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) . hex integer (returned as int) . fraction (integer '/' integer, returned as float) . mixed integer (integer '-' fraction, or just fraction, returned as float) . stripHTMLTags (parse action to remove tags from HTML source) . parse action helpers convertToDate and convertToDatetime to do custom parse time conversions of parsed ISO8601 strings - runTests now returns a two-tuple: success if all tests succeed, and an output list of each test and its output lines. - Added failureTests argument (default=False) to runTests, so that tests can be run that are expected failures, and runTests' success value will return True only if all tests *fail* as expected. Also, parseAll now defaults to True. - New example numerics.py, shows samples of parsing integer and real numbers using locale-dependent formats: 4.294.967.295,000 4 294 967 295,000 4,294,967,295.000 Version 2.1.4 - May, 2016 ------------------------------ - Split out the '==' behavior in ParserElement, now implemented as the ParserElement.matches() method. Using '==' for string test purposes will be removed in a future release. - Expanded capabilities of runTests(). Will now accept embedded comments (default is Python style, leading '#' character, but customizable). Comments will be emitted along with the tests and test output. Useful during test development, to create a test string consisting only of test case description comments separated by blank lines, and then fill in the test cases. Will also highlight ParseFatalExceptions with "(FATAL)". - Added a 'pyparsing_common' class containing common/helpful little expressions such as integer, float, identifier, etc. I used this class as a sort of embedded namespace, to contain these helpers without further adding to pyparsing's namespace bloat. - Minor enhancement to traceParseAction decorator, to retain the parse action's name for the trace output. - Added optional 'fatal' keyword arg to addCondition, to indicate that a condition failure should halt parsing immediately. Version 2.1.3 - May, 2016 ------------------------------ - _trim_arity fix in 2.1.2 was very version-dependent on Py 3.5.0. Now works for Python 2.x, 3.3, 3.4, 3.5.0, and 3.5.1 (and hopefully beyond). Version 2.1.2 - May, 2016 ------------------------------ - Fixed bug in _trim_arity when pyparsing code is included in a PyInstaller, reported by maluwa. - Fixed catastrophic regex backtracking in implementation of the quoted string expressions (dblQuotedString, sglQuotedString, and quotedString). Reported on the pyparsing wiki by webpentest, good catch! (Also tuned up some other expressions susceptible to the same backtracking problem, such as cStyleComment, cppStyleComment, etc.) Version 2.1.1 - March, 2016 --------------------------- - Added support for assigning to ParseResults using slices. - Fixed bug in ParseResults.toDict(), in which dict values were always converted to dicts, even if they were just unkeyed lists of tokens. Reported on SO by Gerald Thibault, thanks Gerald! - Fixed bug in SkipTo when using failOn, reported by robyschek, thanks! - Fixed bug in Each introduced in 2.1.0, reported by AND patch and unit test submitted by robyschek, well done! - Removed use of functools.partial in replaceWith, as this creates an ambiguous signature for the generated parse action, which fails in PyPy. Reported by Evan Hubinger, thanks Evan! - Added default behavior to QuotedString to convert embedded '\t', '\n', etc. characters to their whitespace counterparts. Found during Q&A exchange on SO with Maxim. Version 2.1.0 - February, 2016 ------------------------------ - Modified the internal _trim_arity method to distinguish between TypeError's raised while trying to determine parse action arity and those raised within the parse action itself. This will clear up those confusing "() takes exactly 1 argument (0 given)" error messages when there is an actual TypeError in the body of the parse action. Thanks to all who have raised this issue in the past, and most recently to Michael Cohen, who sent in a proposed patch, and got me to finally tackle this problem. - Added compatibility for pickle protocols 2-4 when pickling ParseResults. In Python 2.x, protocol 0 was the default, and protocol 2 did not work. In Python 3.x, protocol 3 is the default, so explicitly naming protocol 0 or 1 was required to pickle ParseResults. With this release, all protocols 0-4 are supported. Thanks for reporting this on StackOverflow, Arne Wolframm, and for providing a nice simple test case! - Added optional 'stopOn' argument to ZeroOrMore and OneOrMore, to simplify breaking on stop tokens that would match the repetition expression. It is a common problem to fail to look ahead when matching repetitive tokens if the sentinel at the end also matches the repetition expression, as when parsing "BEGIN aaa bbb ccc END" with: "BEGIN" + OneOrMore(Word(alphas)) + "END" Since "END" matches the repetition expression "Word(alphas)", it will never get parsed as the terminating sentinel. Up until now, this has to be resolved by the user inserting their own negative lookahead: "BEGIN" + OneOrMore(~Literal("END") + Word(alphas)) + "END" Using stopOn, they can more easily write: "BEGIN" + OneOrMore(Word(alphas), stopOn="END") + "END" The stopOn argument can be a literal string or a pyparsing expression. Inspired by a question by Lamakaha on StackOverflow (and many previous questions with the same negative-lookahead resolution). - Added expression names for many internal and builtin expressions, to reduce name and error message overhead during parsing. - Converted helper lambdas to functions to refactor and add docstring support. - Fixed ParseResults.asDict() to correctly convert nested ParseResults values to dicts. - Cleaned up some examples, fixed typo in fourFn.py identified by aristotle2600 on reddit. - Removed keepOriginalText helper method, which was deprecated ages ago. Superceded by originalTextFor. - Same for the Upcase class, which was long ago deprecated and replaced with the upcaseTokens method. Version 2.0.7 - December, 2015 ------------------------------ - Simplified string representation of Forward class, to avoid memory and performance errors while building ParseException messages. Thanks, Will McGugan, Andrea Censi, and Martijn Vermaat for the bug reports and test code. - Cleaned up additional issues from enhancing the error messages for Or and MatchFirst, handling Unicode values in expressions. Fixes Unicode encoding issues in Python 2, thanks to Evan Hubinger for the bug report. - Fixed implementation of dir() for ParseResults - was leaving out all the defined methods and just adding the custom results names. - Fixed bug in ignore() that was introduced in pyparsing 1.5.3, that would not accept a string literal as the ignore expression. - Added new example parseTabularData.py to illustrate parsing of data formatted in columns, with detection of empty cells. - Updated a number of examples to more current Python and pyparsing forms. Version 2.0.6 - November, 2015 ------------------------------ - Fixed a bug in Each when multiple Optional elements are present. Thanks for reporting this, whereswalden on SO. - Fixed another bug in Each, when Optional elements have results names or parse actions, reported by Max Rothman - thank you, Max! - Added optional parseAll argument to runTests, whether tests should require the entire input string to be parsed or not (similar to parseAll argument to parseString). Plus a little neaten-up of the output on Python 2 (no stray ()'s). - Modified exception messages from MatchFirst and Or expressions. These were formerly misleading as they would only give the first or longest exception mismatch error message. Now the error message includes all the alternatives that were possible matches. Originally proposed by a pyparsing user, but I've lost the email thread - finally figured out a fairly clean way to do this. - Fixed a bug in Or, when a parse action on an alternative raises an exception, other potentially matching alternatives were not always tried. Reported by TheVeryOmni on the pyparsing wiki, thanks! - Fixed a bug to dump() introduced in 2.0.4, where list values were shown in duplicate. Version 2.0.5 - October, 2015 ----------------------------- - (&$(@#&$(@!!!! Some "print" statements snuck into pyparsing v2.0.4, breaking Python 3 compatibility! Fixed. Reported by jenshn, thanks! Version 2.0.4 - October, 2015 ----------------------------- - Added ParserElement.addCondition, to simplify adding parse actions that act primarily as filters. If the given condition evaluates False, pyparsing will raise a ParseException. The condition should be a method with the same method signature as a parse action, but should return a boolean. Suggested by Victor Porton, nice idea Victor, thanks! - Slight mod to srange to accept unicode literals for the input string, such as "[а-яА-Я]" instead of "[\u0430-\u044f\u0410-\u042f]". Thanks to Alexandr Suchkov for the patch! - Enhanced implementation of replaceWith. - Fixed enhanced ParseResults.dump() method when the results consists only of an unnamed array of sub-structure results. Reported by Robin Siebler, thanks for your patience and persistence, Robin! - Fixed bug in fourFn.py example code, where pi and e were defined using CaselessLiteral instead of CaselessKeyword. This was not a problem until adding a new function 'exp', and the leading 'e' of 'exp' was accidentally parsed as the mathematical constant 'e'. Nice catch, Tom Grydeland - thanks! - Adopt new-fangled Python features, like decorators and ternary expressions, per suggestions from Williamzjc - thanks William! (Oh yeah, I'm not supporting Python 2.3 with this code any more...) Plus, some additional code fixes/cleanup - thanks again! - Added ParserElement.runTests, a little test bench for quickly running an expression against a list of sample input strings. Basically, I got tired of writing the same test code over and over, and finally added it as a test point method on ParserElement. - Added withClass helper method, a simplified version of withAttribute for the common but annoying case when defining a filter on a div's class - made difficult because 'class' is a Python reserved word. Version 2.0.3 - October, 2014 ----------------------------- - Fixed escaping behavior in QuotedString. Formerly, only quotation marks (or characters designated as quotation marks in the QuotedString constructor) would be escaped. Now all escaped characters will be escaped, and the escaping backslashes will be removed. - Fixed regression in ParseResults.pop() - pop() was pretty much broken after I added *improvements* in 2.0.2. Reported by Iain Shelvington, thanks Iain! - Fixed bug in And class when initializing using a generator. - Enhanced ParseResults.dump() method to list out nested ParseResults that are unnamed arrays of sub-structures. - Fixed UnboundLocalError under Python 3.4 in oneOf method, reported on Sourceforge by aldanor, thanks! - Fixed bug in ParseResults __init__ method, when returning non-ParseResults types from parse actions that implement __eq__. Raised during discussion on the pyparsing wiki with cyrfer. Version 2.0.2 - April, 2014 --------------------------- - Extended "expr(name)" shortcut (same as "expr.setResultsName(name)") to accept "expr()" as a shortcut for "expr.copy()". - Added "locatedExpr(expr)" helper, to decorate any returned tokens with their location within the input string. Adds the results names locn_start and locn_end to the output parse results. - Added "pprint()" method to ParseResults, to simplify troubleshooting and prettified output. Now instead of importing the pprint module and then writing "pprint.pprint(result)", you can just write "result.pprint()". This method also accepts additional positional and keyword arguments (such as indent, width, etc.), which get passed through directly to the pprint method (see https://docs.python.org/2/library/pprint.html#pprint.pprint). - Removed deprecation warnings when using '<<' for Forward expression assignment. '<<=' is still preferred, but '<<' will be retained for cases where '<<=' operator is not suitable (such as in defining lambda expressions). - Expanded argument compatibility for classes and functions that take list arguments, to now accept generators as well. - Extended list-like behavior of ParseResults, adding support for append and extend. NOTE: if you have existing applications using these names as results names, you will have to access them using dict-style syntax: res["append"] and res["extend"] - ParseResults emulates the change in list vs. iterator semantics for methods like keys(), values(), and items(). Under Python 2.x, these methods will return lists, under Python 3.x, these methods will return iterators. - ParseResults now has a method haskeys() which returns True or False depending on whether any results names have been defined. This simplifies testing for the existence of results names under Python 3.x, which returns keys() as an iterator, not a list. - ParseResults now supports both list and dict semantics for pop(). If passed no argument or an integer argument, it will use list semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use dict semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in dict.pop(). - Fixed bug in markInputline, thanks for reporting this, Matt Grant! - Cleaned up my unit test environment, now runs with Python 2.6 and 3.3. Version 2.0.1 - July, 2013 -------------------------- - Removed use of "nonlocal" that prevented using this version of pyparsing with Python 2.6 and 2.7. This will make it easier to install for packages that depend on pyparsing, under Python versions 2.6 and later. Those using older versions of Python will have to manually install pyparsing 1.5.7. - Fixed implementation of <<= operator to return self; reported by Luc J. Bourhis, with patch fix by Mathias Mamsch - thanks, Luc and Mathias! Version 2.0.0 - November, 2012 ------------------------------ - Rather than release another combined Python 2.x/3.x release I've decided to start a new major version that is only compatible with Python 3.x (and consequently Python 2.7 as well due to backporting of key features). This version will be the main development path from now on, with little follow-on development on the 1.5.x path. - Operator '<<' is now deprecated, in favor of operator '<<=' for attaching parsing expressions to Forward() expressions. This is being done to address precedence of operations problems with '<<'. Operator '<<' will be removed in a future version of pyparsing. Version 1.5.7 - November, 2012 ----------------------------- - NOTE: This is the last release of pyparsing that will try to maintain compatibility with Python versions < 2.6. The next release of pyparsing will be version 2.0.0, using new Python syntax that will not be compatible for Python version 2.5 or older. - An awesome new example is included in this release, submitted by Luca DellOlio, for parsing ANTLR grammar definitions, nice work Luca! - Fixed implementation of ParseResults.__str__ to use Pythonic ''.join() instead of repeated string concatenation. This purportedly has been a performance issue under PyPy. - Fixed bug in ParseResults.__dir__ under Python 3, reported by Thomas Kluyver, thank you Thomas! - Added ParserElement.inlineLiteralsUsing static method, to override pyparsing's default behavior of converting string literals to Literal instances, to use other classes (such as Suppress or CaselessLiteral). - Added new operator '<<=', which will eventually replace '<<' for storing the contents of a Forward(). '<<=' does not have the same operator precedence problems that '<<' does. - 'operatorPrecedence' is being renamed 'infixNotation' as a better description of what this helper function creates. 'operatorPrecedence' is deprecated, and will be dropped entirely in a future release. - Added optional arguments lpar and rpar to operatorPrecedence, so that expressions that use it can override the default suppression of the grouping characters. - Added support for using single argument builtin functions as parse actions. Now you can write 'expr.setParseAction(len)' and get back the length of the list of matched tokens. Supported builtins are: sum, len, sorted, reversed, list, tuple, set, any, all, min, and max. A script demonstrating this feature is included in the examples directory. - Improved linking in generated docs, proposed on the pyparsing wiki by techtonik, thanks! - Fixed a bug in the definition of 'alphas', which was based on the string.uppercase and string.lowercase "constants", which in fact *aren't* constant, but vary with locale settings. This could make parsers locale-sensitive in a subtle way. Thanks to Kef Schecter for his diligence in following through on reporting and monitoring this bugfix! - Fixed a bug in the Py3 version of pyparsing, during exception handling with packrat parsing enabled, reported by Catherine Devlin - thanks Catherine! - Fixed typo in ParseBaseException.__dir__, reported anonymously on the SourceForge bug tracker, thank you Pyparsing User With No Name. - Fixed bug in srange when using '\x###' hex character codes. - Added optional 'intExpr' argument to countedArray, so that you can define your own expression that will evaluate to an integer, to be used as the count for the following elements. Allows you to define a countedArray with the count given in hex, for example, by defining intExpr as "Word(hexnums).setParseAction(int(t[0],16))". Version 1.5.6 - June, 2011 ---------------------------- - Cleanup of parse action normalizing code, to be more version-tolerant, and robust in the face of future Python versions - much thanks to Raymond Hettinger for this rewrite! - Removal of exception cacheing, addressing a memory leak condition in Python 3. Thanks to Michael Droettboom and the Cape Town PUG for their analysis and work on this problem! - Fixed bug when using packrat parsing, where a previously parsed expression would duplicate subsequent tokens - reported by Frankie Ribery on stackoverflow, thanks! - Added 'ungroup' helper method, to address token grouping done implicitly by And expressions, even if only one expression in the And actually returns any text - also inspired by stackoverflow discussion with Frankie Ribery! - Fixed bug in srange, which accepted escaped hex characters of the form '\0x##', but should be '\x##'. Both forms will be supported for backwards compatibility. - Enhancement to countedArray, accepting an optional expression to be used for matching the leading integer count - proposed by Mathias on the pyparsing mailing list, good idea! - Added the Verilog parser to the provided set of examples, under the MIT license. While this frees up this parser for any use, if you find yourself using it in a commercial purpose, please consider making a charitable donation as described in the parser's header. - Added the excludeChars argument to the Word class, to simplify defining a word composed of all characters in a large range except for one or two. Suggested by JesterEE on the pyparsing wiki. - Added optional overlap parameter to scanString, to return overlapping matches found in the source text. - Updated oneOf internal regular expression generation, with improved parse time performance. - Slight performance improvement in transformString, removing empty strings from the list of string fragments built while scanning the source text, before calling ''.join. Especially useful when using transformString to strip out selected text. - Enhanced form of using the "expr('name')" style of results naming, in lieu of calling setResultsName. If name ends with an '*', then this is equivalent to expr.setResultsName('name',listAllMatches=True). - Fixed up internal list flattener to use iteration instead of recursion, to avoid stack overflow when transforming large files. - Added other new examples: . protobuf parser - parses Google's protobuf language . btpyparse - a BibTex parser contributed by Matthew Brett, with test suite test_bibparse.py (thanks, Matthew!) . groupUsingListAllMatches.py - demo using trailing '*' for results names Version 1.5.5 - August, 2010 ---------------------------- - Typo in Python3 version of pyparsing, "builtin" should be "builtins". (sigh) Version 1.5.4 - August, 2010 ---------------------------- - Fixed __builtins__ and file references in Python 3 code, thanks to Greg Watson, saulspatz, sminos, and Mark Summerfield for reporting their Python 3 experiences. - Added new example, apicheck.py, as a sample of scanning a Tcl-like language for functions with incorrect number of arguments (difficult to track down in Tcl languages). This example uses some interesting methods for capturing exceptions while scanning through source code. - Added new example deltaTime.py, that takes everyday time references like "an hour from now", "2 days ago", "next Sunday at 2pm". Version 1.5.3 - June, 2010 -------------------------- - ======= NOTE: API CHANGE!!!!!!! =============== With this release, and henceforward, the pyparsing module is imported as "pyparsing" on both Python 2.x and Python 3.x versions. - Fixed up setup.py to auto-detect Python version and install the correct version of pyparsing - suggested by Alex Martelli, thanks, Alex! (and my apologies to all those who struggled with those spurious installation errors caused by my earlier fumblings!) - Fixed bug on Python3 when using parseFile, getting bytes instead of a str from the input file. - Fixed subtle bug in originalTextFor, if followed by significant whitespace (like a newline) - discovered by Francis Vidal, thanks! - Fixed very sneaky bug in Each, in which Optional elements were not completely recognized as optional - found by Tal Weiss, thanks for your patience. - Fixed off-by-1 bug in line() method when the first line of the input text was an empty line. Thanks to John Krukoff for submitting a patch! - Fixed bug in transformString if grammar contains Group expressions, thanks to patch submitted by barnabas79, nice work! - Fixed bug in originalTextFor in which trailing comments or otherwised ignored text got slurped in with the matched expression. Thanks to michael_ramirez44 on the pyparsing wiki for reporting this just in time to get into this release! - Added better support for summing ParseResults, see the new example, parseResultsSumExample.py. - Added support for composing a Regex using a compiled RE object; thanks to my new colleague, Mike Thornton! - In version 1.5.2, I changed the way exceptions are raised in order to simplify the stacktraces reported during parsing. An anonymous user posted a bug report on SF that this behavior makes it difficult to debug some complex parsers, or parsers nested within parsers. In this release I've added a class attribute ParserElement.verbose_stacktrace, with a default value of False. If you set this to True, pyparsing will report stacktraces using the pre-1.5.2 behavior. - New examples: . pymicko.py, a MicroC compiler submitted by Zarko Zivanov. (Note: this example is separately licensed under the GPLv3, and requires Python 2.6 or higher.) Thank you, Zarko! . oc.py, a subset C parser, using the BNF from the 1996 Obfuscated C Contest. . stateMachine2.py, a modified version of stateMachine.py submitted by Matt Anderson, that is compatible with Python versions 2.7 and above - thanks so much, Matt! . select_parser.py, a parser for reading SQLite SELECT statements, as specified at https://www.sqlite.org/lang_select.html this goes into much more detail than the simple SQL parser included in pyparsing's source code . excelExpr.py, a *simplistic* first-cut at a parser for Excel expressions, which I originally posted on comp.lang.python in January, 2010; beware, this parser omits many common Excel cases (addition of numbers represented as strings, references to named ranges) . cpp_enum_parser.py, a nice little parser posted my Mark Tolonen on comp.lang.python in August, 2009 (redistributed here with Mark's permission). Thanks a bunch, Mark! . partial_gene_match.py, a sample I posted to Stackoverflow.com, implementing a special variation on Literal that does "close" matching, up to a given number of allowed mismatches. The application was to find matching gene sequences, with allowance for one or two mismatches. . tagCapture.py, a sample showing how to use a Forward placeholder to enforce matching of text parsed in a previous expression. . matchPreviousDemo.py, simple demo showing how the matchPreviousLiteral helper method is used to match a previously parsed token. Version 1.5.2 - April, 2009 ------------------------------ - Added pyparsing_py3.py module, so that Python 3 users can use pyparsing by changing their pyparsing import statement to: import pyparsing_py3 Thanks for help from Patrick Laban and his friend Geremy Condra on the pyparsing wiki. - Removed __slots__ declaration on ParseBaseException, for compatibility with IronPython 2.0.1. Raised by David Lawler on the pyparsing wiki, thanks David! - Fixed bug in SkipTo/failOn handling - caught by eagle eye cpennington on the pyparsing wiki! - Fixed second bug in SkipTo when using the ignore constructor argument, reported by Catherine Devlin, thanks! - Fixed obscure bug reported by Eike Welk when using a class as a ParseAction with an errant __getitem__ method. - Simplified exception stack traces when reporting parse exceptions back to caller of parseString or parseFile - thanks to a tip from Peter Otten on comp.lang.python. - Changed behavior of scanString to avoid infinitely looping on expressions that match zero-length strings. Prompted by a question posted by ellisonbg on the wiki. - Enhanced classes that take a list of expressions (And, Or, MatchFirst, and Each) to accept generator expressions also. This can be useful when generating lists of alternative expressions, as in this case, where the user wanted to match any repetitions of '+', '*', '#', or '.', but not mixtures of them (that is, match '+++', but not '+-+'): codes = "+*#." format = MatchFirst(Word(c) for c in codes) Based on a problem posed by Denis Spir on the Python tutor list. - Added new example eval_arith.py, which extends the example simpleArith.py to actually evaluate the parsed expressions. Version 1.5.1 - October, 2008 ------------------------------- - Added new helper method originalTextFor, to replace the use of the current keepOriginalText parse action. Now instead of using the parse action, as in: fullName = Word(alphas) + Word(alphas) fullName.setParseAction(keepOriginalText) (in this example, we used keepOriginalText to restore any white space that may have been skipped between the first and last names) You can now write: fullName = originalTextFor(Word(alphas) + Word(alphas)) The implementation of originalTextFor is simpler and faster than keepOriginalText, and does not depend on using the inspect or imp modules. - Added optional parseAll argument to parseFile, to be consistent with parseAll argument to parseString. Posted by pboucher on the pyparsing wiki, thanks! - Added failOn argument to SkipTo, so that grammars can define literal strings or pyparsing expressions which, if found in the skipped text, will cause SkipTo to fail. Useful to prevent SkipTo from reading past terminating expression. Instigated by question posed by Aki Niimura on the pyparsing wiki. - Fixed bug in nestedExpr if multi-character expressions are given for nesting delimiters. Patch provided by new pyparsing user, Hans-Martin Gaudecker - thanks, H-M! - Removed dependency on xml.sax.saxutils.escape, and included internal implementation instead - proposed by Mike Droettboom on the pyparsing mailing list, thanks Mike! Also fixed erroneous mapping in replaceHTMLEntity of " to ', now correctly maps to ". (Also added support for mapping ' to '.) - Fixed typo in ParseResults.insert, found by Alejandro Dubrovsky, good catch! - Added __dir__() methods to ParseBaseException and ParseResults, to support new dir() behavior in Py2.6 and Py3.0. If dir() is called on a ParseResults object, the returned list will include the base set of attribute names, plus any results names that are defined. - Fixed bug in ParseResults.asXML(), in which the first named item within a ParseResults gets reported with an tag instead of with the correct results name. - Fixed bug in '-' error stop, when '-' operator is used inside a Combine expression. - Reverted generator expression to use list comprehension, for better compatibility with old versions of Python. Reported by jester/artixdesign on the SourceForge pyparsing discussion list. - Fixed bug in parseString(parseAll=True), when the input string ends with a comment or whitespace. - Fixed bug in LineStart and LineEnd that did not recognize any special whitespace chars defined using ParserElement.setDefault- WhitespaceChars, found while debugging an issue for Marek Kubica, thanks for the new test case, Marek! - Made Forward class more tolerant of subclassing. Version 1.5.0 - June, 2008 -------------------------- This version of pyparsing includes work on two long-standing FAQ's: support for forcing parsing of the complete input string (without having to explicitly append StringEnd() to the grammar), and a method to improve the mechanism of detecting where syntax errors occur in an input string with various optional and alternative paths. This release also includes a helper method to simplify definition of indentation-based grammars. With these changes (and the past few minor updates), I thought it was finally time to bump the minor rev number on pyparsing - so 1.5.0 is now available! Read on... - AT LAST!!! You can now call parseString and have it raise an exception if the expression does not parse the entire input string. This has been an FAQ for a LONG time. The parseString method now includes an optional parseAll argument (default=False). If parseAll is set to True, then the given parse expression must parse the entire input string. (This is equivalent to adding StringEnd() to the end of the expression.) The default value is False to retain backward compatibility. Inspired by MANY requests over the years, most recently by ecir-hana on the pyparsing wiki! - Added new operator '-' for composing grammar sequences. '-' behaves just like '+' in creating And expressions, but '-' is used to mark grammar structures that should stop parsing immediately and report a syntax error, rather than just backtracking to the last successful parse and trying another alternative. For instance, running the following code: port_definition = Keyword("port") + '=' + Word(nums) entity_definition = Keyword("entity") + "{" + Optional(port_definition) + "}" entity_definition.parseString("entity { port 100 }") pyparsing fails to detect the missing '=' in the port definition. But, since this expression is optional, pyparsing then proceeds to try to match the closing '}' of the entity_definition. Not finding it, pyparsing reports that there was no '}' after the '{' character. Instead, we would like pyparsing to parse the 'port' keyword, and if not followed by an equals sign and an integer, to signal this as a syntax error. This can now be done simply by changing the port_definition to: port_definition = Keyword("port") - '=' + Word(nums) Now after successfully parsing 'port', pyparsing must also find an equals sign and an integer, or it will raise a fatal syntax exception. By judicious insertion of '-' operators, a pyparsing developer can have their grammar report much more informative syntax error messages. Patches and suggestions proposed by several contributors on the pyparsing mailing list and wiki - special thanks to Eike Welk and Thomas/Poldy on the pyparsing wiki! - Added indentedBlock helper method, to encapsulate the parse actions and indentation stack management needed to keep track of indentation levels. Use indentedBlock to define grammars for indentation-based grouping grammars, like Python's. indentedBlock takes up to 3 parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple indentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the current level; set to False for block of left-most statements (default=True) A valid block must contain at least one indented statement. - Fixed bug in nestedExpr in which ignored expressions needed to be set off with whitespace. Reported by Stefaan Himpe, nice catch! - Expanded multiplication of an expression by a tuple, to accept tuple values of None: . expr*(n,None) or expr*(n,) is equivalent to expr*n + ZeroOrMore(expr) (read as "at least n instances of expr") . expr*(None,n) is equivalent to expr*(0,n) (read as "0 to n instances of expr") . expr*(None,None) is equivalent to ZeroOrMore(expr) . expr*(1,None) is equivalent to OneOrMore(expr) Note that expr*(None,n) does not raise an exception if more than n exprs exist in the input stream; that is, expr*(None,n) does not enforce a maximum number of expr occurrences. If this behavior is desired, then write expr*(None,n) + ~expr - Added None as a possible operator for operatorPrecedence. None signifies "no operator", as in multiplying m times x in "y=mx+b". - Fixed bug in Each, reported by Michael Ramirez, in which the order of terms in the Each affected the parsing of the results. Problem was due to premature grouping of the expressions in the overall Each during grammar construction, before the complete Each was defined. Thanks, Michael! - Also fixed bug in Each in which Optional's with default values were not getting the defaults added to the results of the overall Each expression. - Fixed a bug in Optional in which results names were not assigned if a default value was supplied. - Cleaned up Py3K compatibility statements, including exception construction statements, and better equivalence between _ustr and basestring, and __nonzero__ and __bool__. Version 1.4.11 - February, 2008 ------------------------------- - With help from Robert A. Clark, this version of pyparsing is compatible with Python 3.0a3. Thanks for the help, Robert! - Added WordStart and WordEnd positional classes, to support expressions that must occur at the start or end of a word. Proposed by piranha on the pyparsing wiki, good idea! - Added matchOnlyAtCol helper parser action, to simplify parsing log or data files that have optional fields that are column dependent. Inspired by a discussion thread with hubritic on comp.lang.python. - Added withAttribute.ANY_VALUE as a match-all value when using withAttribute. Used to ensure that an attribute is present, without having to match on the actual attribute value. - Added get() method to ParseResults, similar to dict.get(). Suggested by new pyparsing user, Alejandro Dubrovksy, thanks! - Added '==' short-cut to see if a given string matches a pyparsing expression. For instance, you can now write: integer = Word(nums) if "123" == integer: # do something print [ x for x in "123 234 asld".split() if x==integer ] # prints ['123', '234'] - Simplified the use of nestedExpr when using an expression for the opening or closing delimiters. Now the content expression will not have to explicitly negate closing delimiters. Found while working with dfinnie on GHOP Task #277, thanks! - Fixed bug when defining ignorable expressions that are later enclosed in a wrapper expression (such as ZeroOrMore, OneOrMore, etc.) - found while working with Prabhu Gurumurthy, thanks Prahbu! - Fixed bug in withAttribute in which keys were automatically converted to lowercase, making it impossible to match XML attributes with uppercase characters in them. Using with- Attribute requires that you reference attributes in all lowercase if parsing HTML, and in correct case when parsing XML. - Changed '<<' operator on Forward to return None, since this is really used as a pseudo-assignment operator, not as a left-shift operator. By returning None, it is easier to catch faulty statements such as a << b | c, where precedence of operations causes the '|' operation to be performed *after* inserting b into a, so no alternation is actually implemented. The correct form is a << (b | c). With this change, an error will be reported instead of silently clipping the alternative term. (Note: this may break some existing code, but if it does, the code had a silent bug in it anyway.) Proposed by wcbarksdale on the pyparsing wiki, thanks! - Several unit tests were added to pyparsing's regression suite, courtesy of the Google Highly-Open Participation Contest. Thanks to all who administered and took part in this event! Version 1.4.10 - December 9, 2007 --------------------------------- - Fixed bug introduced in v1.4.8, parse actions were called for intermediate operator levels, not just the deepest matching operation level. Again, big thanks to Torsten Marek for helping isolate this problem! Version 1.4.9 - December 8, 2007 -------------------------------- - Added '*' multiplication operator support when creating grammars, accepting either an integer, or a two-integer tuple multiplier, as in: ipAddress = Word(nums) + ('.'+Word(nums))*3 usPhoneNumber = Word(nums) + ('-'+Word(nums))*(1,2) If multiplying by a tuple, the two integer values represent min and max multiples. Suggested by Vincent of eToy.com, great idea, Vincent! - Fixed bug in nestedExpr, original version was overly greedy! Thanks to Michael Ramirez for raising this issue. - Fixed internal bug in ParseResults - when an item was deleted, the key indices were not updated. Thanks to Tim Mitchell for posting a bugfix patch to the SF bug tracking system! - Fixed internal bug in operatorPrecedence - when the results of a right-associative term were sent to a parse action, the wrong tokens were sent. Reported by Torsten Marek, nice job! - Added pop() method to ParseResults. If pop is called with an integer or with no arguments, it will use list semantics and update the ParseResults' list of tokens. If pop is called with a non-integer (a string, for instance), then it will use dict semantics and update the ParseResults' internal dict. Suggested by Donn Ingle, thanks Donn! - Fixed quoted string built-ins to accept '\xHH' hex characters within the string. Version 1.4.8 - October, 2007 ----------------------------- - Added new helper method nestedExpr to easily create expressions that parse lists of data in nested parentheses, braces, brackets, etc. - Added withAttribute parse action helper, to simplify creating filtering parse actions to attach to expressions returned by makeHTMLTags and makeXMLTags. Use withAttribute to qualify a starting tag with one or more required attribute values, to avoid false matches on common tags such as or
. - Added new examples nested.py and withAttribute.py to demonstrate the new features. - Added performance speedup to grammars using operatorPrecedence, instigated by Stefan Reichör - thanks for the feedback, Stefan! - Fixed bug/typo when deleting an element from a ParseResults by using the element's results name. - Fixed whitespace-skipping bug in wrapper classes (such as Group, Suppress, Combine, etc.) and when using setDebug(), reported by new pyparsing user dazzawazza on SourceForge, nice job! - Added restriction to prevent defining Word or CharsNotIn expressions with minimum length of 0 (should use Optional if this is desired), and enhanced docstrings to reflect this limitation. Issue was raised by Joey Tallieu, who submitted a patch with a slightly different solution. Thanks for taking the initiative, Joey, and please keep submitting your ideas! - Fixed bug in makeHTMLTags that did not detect HTML tag attributes with no '= value' portion (such as ""), reported by hamidh on the pyparsing wiki - thanks! - Fixed minor bug in makeHTMLTags and makeXMLTags, which did not accept whitespace in closing tags. Version 1.4.7 - July, 2007 -------------------------- - NEW NOTATION SHORTCUT: ParserElement now accepts results names using a notational shortcut, following the expression with the results name in parentheses. So this: stats = "AVE:" + realNum.setResultsName("average") + \ "MIN:" + realNum.setResultsName("min") + \ "MAX:" + realNum.setResultsName("max") can now be written as this: stats = "AVE:" + realNum("average") + \ "MIN:" + realNum("min") + \ "MAX:" + realNum("max") The intent behind this change is to make it simpler to define results names for significant fields within the expression, while keeping the grammar syntax clean and uncluttered. - Fixed bug when packrat parsing is enabled, with cached ParseResults being updated by subsequent parsing. Reported on the pyparsing wiki by Kambiz, thanks! - Fixed bug in operatorPrecedence for unary operators with left associativity, if multiple operators were given for the same term. - Fixed bug in example simpleBool.py, corrected precedence of "and" vs. "or" operations. - Fixed bug in Dict class, in which keys were converted to strings whether they needed to be or not. Have narrowed this logic to convert keys to strings only if the keys are ints (which would confuse __getitem__ behavior for list indexing vs. key lookup). - Added ParserElement method setBreak(), which will invoke the pdb module's set_trace() function when this expression is about to be parsed. - Fixed bug in StringEnd in which reading off the end of the input string raises an exception - should match. Resolved while answering a question for Shawn on the pyparsing wiki. Version 1.4.6 - April, 2007 --------------------------- - Simplified constructor for ParseFatalException, to support common exception construction idiom: raise ParseFatalException, "unexpected text: 'Spanish Inquisition'" - Added method getTokensEndLoc(), to be called from within a parse action, for those parse actions that need both the starting *and* ending location of the parsed tokens within the input text. - Enhanced behavior of keepOriginalText so that named parse fields are preserved, even though tokens are replaced with the original input text matched by the current expression. Also, cleaned up the stack traversal to be more robust. Suggested by Tim Arnold - thanks, Tim! - Fixed subtle bug in which countedArray (and similar dynamic expressions configured in parse actions) failed to match within Or, Each, FollowedBy, or NotAny. Reported by Ralf Vosseler, thanks for your patience, Ralf! - Fixed Unicode bug in upcaseTokens and downcaseTokens parse actions, scanString, and default debugging actions; reported (and patch submitted) by Nikolai Zamkovoi, spasibo! - Fixed bug when saving a tuple as a named result. The returned token list gave the proper tuple value, but accessing the result by name only gave the first element of the tuple. Reported by Poromenos, nice catch! - Fixed bug in makeHTMLTags/makeXMLTags, which failed to match tag attributes with namespaces. - Fixed bug in SkipTo when setting include=True, to have the skipped-to tokens correctly included in the returned data. Reported by gunars on the pyparsing wiki, thanks! - Fixed typobug in OnceOnly.reset method, omitted self argument. Submitted by eike welk, thanks for the lint-picking! - Added performance enhancement to Forward class, suggested by akkartik on the pyparsing Wiki discussion, nice work! - Added optional asKeyword to Word constructor, to indicate that the given word pattern should be matched only as a keyword, that is, it should only match if it is within word boundaries. - Added S-expression parser to examples directory. - Added macro substitution example to examples directory. - Added holaMundo.py example, excerpted from Marco Alfonso's blog - muchas gracias, Marco! - Modified internal cyclic references in ParseResults to use weakrefs; this should help reduce the memory footprint of large parsing programs, at some cost to performance (3-5%). Suggested by bca48150 on the pyparsing wiki, thanks! - Enhanced the documentation describing the vagaries and idiosyncrasies of parsing strings with embedded tabs, and the impact on: . parse actions . scanString . col and line helper functions (Suggested by eike welk in response to some unexplained inconsistencies between parsed location and offsets in the input string.) - Cleaned up internal decorators to preserve function names, docstrings, etc. Version 1.4.5 - December, 2006 ------------------------------ - Removed debugging print statement from QuotedString class. Sorry for not stripping this out before the 1.4.4 release! - A significant performance improvement, the first one in a while! For my Verilog parser, this version of pyparsing is about double the speed - YMMV. - Added support for pickling of ParseResults objects. (Reported by Jeff Poole, thanks Jeff!) - Fixed minor bug in makeHTMLTags that did not recognize tag attributes with embedded '-' or '_' characters. Also, added support for passing expressions to makeHTMLTags and makeXMLTags, and used this feature to define the globals anyOpenTag and anyCloseTag. - Fixed error in alphas8bit, I had omitted the y-with-umlaut character. - Added punc8bit string to complement alphas8bit - it contains all the non-alphabetic, non-blank 8-bit characters. - Added commonHTMLEntity expression, to match common HTML "ampersand" codes, such as "<", ">", "&", " ", and """. This expression also defines a results name 'entity', which can be used to extract the entity field (that is, "lt", "gt", etc.). Also added built-in parse action replaceHTMLEntity, which can be attached to commonHTMLEntity to translate "<", ">", "&", " ", and """ to "<", ">", "&", " ", and "'". - Added example, htmlStripper.py, that strips HTML tags and scripts from HTML pages. It also translates common HTML entities to their respective characters. Version 1.4.4 - October, 2006 ------------------------------- - Fixed traceParseAction decorator to also trap and record exception returns from parse actions, and to handle parse actions with 0, 1, 2, or 3 arguments. - Enhanced parse action normalization to support using classes as parse actions; that is, the class constructor is called at parse time and the __init__ function is called with 0, 1, 2, or 3 arguments. If passing a class as a parse action, the __init__ method must use one of the valid parse action parameter list formats. (This technique is useful when using pyparsing to compile parsed text into a series of application objects - see the new example simpleBool.py.) - Fixed bug in ParseResults when setting an item using an integer index. (Reported by Christopher Lambacher, thanks!) - Fixed whitespace-skipping bug, patch submitted by Paolo Losi - grazie, Paolo! - Fixed bug when a Combine contained an embedded Forward expression, reported by cie on the pyparsing wiki - good catch! - Fixed listAllMatches bug, when a listAllMatches result was nested within another result. (Reported by don pasquale on comp.lang.python, well done!) - Fixed bug in ParseResults items() method, when returning an item marked as listAllMatches=True - Fixed bug in definition of cppStyleComment (and javaStyleComment) in which '//' line comments were not continued to the next line if the line ends with a '\'. (Reported by eagle-eyed Ralph Corderoy!) - Optimized re's for cppStyleComment and quotedString for better re performance - also provided by Ralph Corderoy, thanks! - Added new example, indentedGrammarExample.py, showing how to define a grammar using indentation to show grouping (as Python does for defining statement nesting). Instigated by an e-mail discussion with Andrew Dalke, thanks Andrew! - Added new helper operatorPrecedence (based on e-mail list discussion with Ralph Corderoy and Paolo Losi), to facilitate definition of grammars for expressions with unary and binary operators. For instance, this grammar defines a 6-function arithmetic expression grammar, with unary plus and minus, proper operator precedence,and right- and left-associativity: expr = operatorPrecedence( operand, [("!", 1, opAssoc.LEFT), ("^", 2, opAssoc.RIGHT), (oneOf("+ -"), 1, opAssoc.RIGHT), (oneOf("* /"), 2, opAssoc.LEFT), (oneOf("+ -"), 2, opAssoc.LEFT),] ) Also added example simpleArith.py and simpleBool.py to provide more detailed code samples using this new helper method. - Added new helpers matchPreviousLiteral and matchPreviousExpr, for creating adaptive parsing expressions that match the same content as was parsed in a previous parse expression. For instance: first = Word(nums) matchExpr = first + ":" + matchPreviousLiteral(first) will match "1:1", but not "1:2". Since this matches at the literal level, this will also match the leading "1:1" in "1:10". In contrast: first = Word(nums) matchExpr = first + ":" + matchPreviousExpr(first) will *not* match the leading "1:1" in "1:10"; the expressions are evaluated first, and then compared, so "1" is compared with "10". - Added keepOriginalText parse action. Sometimes pyparsing's whitespace-skipping leaves out too much whitespace. Adding this parse action will restore any internal whitespace for a parse expression. This is especially useful when defining expressions for scanString or transformString applications. - Added __add__ method for ParseResults class, to better support using Python sum built-in for summing ParseResults objects returned from scanString. - Added reset method for the new OnlyOnce class wrapper for parse actions (to allow a grammar to be used multiple times). - Added optional maxMatches argument to scanString and searchString, to short-circuit scanning after 'n' expression matches are found. Version 1.4.3 - July, 2006 ------------------------------ - Fixed implementation of multiple parse actions for an expression (added in 1.4.2). . setParseAction() reverts to its previous behavior, setting one (or more) actions for an expression, overwriting any action or actions previously defined . new method addParseAction() appends one or more parse actions to the list of parse actions attached to an expression Now it is harder to accidentally append parse actions to an expression, when what you wanted to do was overwrite whatever had been defined before. (Thanks, Jean-Paul Calderone!) - Simplified interface to parse actions that do not require all 3 parse action arguments. Very rarely do parse actions require more than just the parsed tokens, yet parse actions still require all 3 arguments including the string being parsed and the location within the string where the parse expression was matched. With this release, parse actions may now be defined to be called as: . fn(string,locn,tokens) (the current form) . fn(locn,tokens) . fn(tokens) . fn() The setParseAction and addParseAction methods will internally decorate the provided parse actions with compatible wrappers to conform to the full (string,locn,tokens) argument sequence. - REMOVED SUPPORT FOR RETURNING PARSE LOCATION FROM A PARSE ACTION. I announced this in March, 2004, and gave a final warning in the last release. Now you can return a tuple from a parse action, and it will be treated like any other return value (i.e., the tuple will be substituted for the incoming tokens passed to the parse action, which is useful when trying to parse strings into tuples). - Added setFailAction method, taking a callable function fn that takes the arguments fn(s,loc,expr,err) where: . s - string being parsed . loc - location where expression match was attempted and failed . expr - the parse expression that failed . err - the exception thrown The function returns no values. It may throw ParseFatalException if it is desired to stop parsing immediately. (Suggested by peter21081944 on wikispaces.com) - Added class OnlyOnce as helper wrapper for parse actions. OnlyOnce only permits a parse action to be called one time, after which all subsequent calls throw a ParseException. - Added traceParseAction decorator to help debug parse actions. Simply insert "@traceParseAction" ahead of the definition of your parse action, and each invocation will be displayed, along with incoming arguments, and returned value. - Fixed bug when copying ParserElements using copy() or setResultsName(). (Reported by Dan Thill, great catch!) - Fixed bug in asXML() where token text contains <, >, and & characters - generated XML now escapes these as <, > and &. (Reported by Jacek Sieka, thanks!) - Fixed bug in SkipTo() when searching for a StringEnd(). (Reported by Pete McEvoy, thanks Pete!) - Fixed "except Exception" statements, the most critical added as part of the packrat parsing enhancement. (Thanks, Erick Tryzelaar!) - Fixed end-of-string infinite looping on LineEnd and StringEnd expressions. (Thanks again to Erick Tryzelaar.) - Modified setWhitespaceChars to return self, to be consistent with other ParserElement modifiers. (Suggested by Erick Tryzelaar.) - Fixed bug/typo in new ParseResults.dump() method. - Fixed bug in searchString() method, in which only the first token of an expression was returned. searchString() now returns a ParseResults collection of all search matches. - Added example program removeLineBreaks.py, a string transformer that converts text files with hard line-breaks into one with line breaks only between paragraphs. - Added example program listAllMatches.py, to illustrate using the listAllMatches option when specifying results names (also shows new support for passing lists to oneOf). - Added example program linenoExample.py, to illustrate using the helper methods lineno, line, and col, and returning objects from a parse action. - Added example program parseListString.py, to which can parse the string representation of a Python list back into a true list. Taken mostly from my PyCon presentation examples, but now with support for tuple elements, too! Version 1.4.2 - April 1, 2006 (No foolin'!) ------------------------------------------- - Significant speedup from memoizing nested expressions (a technique known as "packrat parsing"), thanks to Chris Lesniewski-Laas! Your mileage may vary, but my Verilog parser almost doubled in speed to over 600 lines/sec! This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method ParserElement.enablePackrat(). If your program uses psyco to "compile as you go", you must call enablePackrat before calling psyco.full(). If you do not do this, Python will crash. For best results, call enablePackrat() immediately after importing pyparsing. - Added new helper method countedArray(expr), for defining patterns that start with a leading integer to indicate the number of array elements, followed by that many elements, matching the given expr parse expression. For instance, this two-liner: wordArray = countedArray(Word(alphas)) print wordArray.parseString("3 Practicality beats purity")[0] returns the parsed array of words: ['Practicality', 'beats', 'purity'] The leading token '3' is suppressed, although it is easily obtained from the length of the returned array. (Inspired by e-mail discussion with Ralf Vosseler.) - Added support for attaching multiple parse actions to a single ParserElement. (Suggested by Dan "Dang" Griffith - nice idea, Dan!) - Added support for asymmetric quoting characters in the recently-added QuotedString class. Now you can define your own quoted string syntax like "<>". To define this custom form of QuotedString, your code would define: dblAngleQuotedString = QuotedString('<<',endQuoteChar='>>') QuotedString also supports escaped quotes, escape character other than '\', and multiline. - Changed the default value returned internally by Optional, so that None can be used as a default value. (Suggested by Steven Bethard - I finally saw the light!) - Added dump() method to ParseResults, to make it easier to list out and diagnose values returned from calling parseString. - A new example, a search query string parser, submitted by Steven Mooij and Rudolph Froger - a very interesting application, thanks! - Added an example that parses the BNF in Python's Grammar file, in support of generating Python grammar documentation. (Suggested by J H Stovall.) - A new example, submitted by Tim Cera, of a flexible parser module, using a simple config variable to adjust parsing for input formats that have slight variations - thanks, Tim! - Added an example for parsing Roman numerals, showing the capability of parse actions to "compile" Roman numerals into their integer values during parsing. - Added a new docs directory, for additional documentation or help. Currently, this includes the text and examples from my recent presentation at PyCon. - Fixed another typo in CaselessKeyword, thanks Stefan Behnel. - Expanded oneOf to also accept tuples, not just lists. This really should be sufficient... - Added deprecation warnings when tuple is returned from a parse action. Looking back, I see that I originally deprecated this feature in March, 2004, so I'm guessing people really shouldn't have been using this feature - I'll drop it altogether in the next release, which will allow users to return a tuple from a parse action (which is really handy when trying to reconstuct tuples from a tuple string representation!). Version 1.4.1 - February, 2006 ------------------------------ - Converted generator expression in QuotedString class to list comprehension, to retain compatibility with Python 2.3. (Thanks, Titus Brown for the heads-up!) - Added searchString() method to ParserElement, as an alternative to using "scanString(instring).next()[0][0]" to search through a string looking for a substring matching a given parse expression. (Inspired by e-mail conversation with Dave Feustel.) - Modified oneOf to accept lists of strings as well as a single string of space-delimited literals. (Suggested by Jacek Sieka - thanks!) - Removed deprecated use of Upcase in pyparsing test code. (Also caught by Titus Brown.) - Removed lstrip() call from Literal - too aggressive in stripping whitespace which may be valid for some grammars. (Point raised by Jacek Sieka). Also, made Literal more robust in the event of passing an empty string. - Fixed bug in replaceWith when returning None. - Added cautionary documentation for Forward class when assigning a MatchFirst expression, as in: fwdExpr << a | b | c Precedence of operators causes this to be evaluated as: (fwdExpr << a) | b | c thereby leaving b and c out as parseable alternatives. Users must explicitly group the values inserted into the Forward: fwdExpr << (a | b | c) (Suggested by Scot Wilcoxon - thanks, Scot!) Version 1.4 - January 18, 2006 ------------------------------ - Added Regex class, to permit definition of complex embedded expressions using regular expressions. (Enhancement provided by John Beisley, great job!) - Converted implementations of Word, oneOf, quoted string, and comment helpers to utilize regular expression matching. Performance improvements in the 20-40% range. - Added QuotedString class, to support definition of non-standard quoted strings (Suggested by Guillaume Proulx, thanks!) - Added CaselessKeyword class, to streamline grammars with, well, caseless keywords (Proposed by Stefan Behnel, thanks!) - Fixed bug in SkipTo, when using an ignoreable expression. (Patch provided by Anonymous, thanks, whoever-you-are!) - Fixed typo in NoMatch class. (Good catch, Stefan Behnel!) - Fixed minor bug in _makeTags(), using string.printables instead of pyparsing.printables. - Cleaned up some of the expressions created by makeXXXTags helpers, to suppress extraneous <> characters. - Added some grammar definition-time checking to verify that a grammar is being built using proper ParserElements. - Added examples: . LAparser.py - linear algebra C preprocessor (submitted by Mike Ellis, thanks Mike!) . wordsToNum.py - converts word description of a number back to the original number (such as 'one hundred and twenty three' -> 123) . updated fourFn.py to support unary minus, added BNF comments Version 1.3.3 - September 12, 2005 ---------------------------------- - Improved support for Unicode strings that would be returned using srange. Added greetingInKorean.py example, for a Korean version of "Hello, World!" using Unicode. (Thanks, June Kim!) - Added 'hexnums' string constant (nums+"ABCDEFabcdef") for defining hexadecimal value expressions. - NOTE: ===THIS CHANGE MAY BREAK EXISTING CODE=== Modified tag and results definitions returned by makeHTMLTags(), to better support the looseness of HTML parsing. Tags to be parsed are now caseless, and keys generated for tag attributes are now converted to lower case. Formerly, makeXMLTags("XYZ") would return a tag with results name of "startXYZ", this has been changed to "startXyz". If this tag is matched against '', the matched keys formerly would be "Abc", "DEF", and "ghi"; keys are now converted to lower case, giving keys of "abc", "def", and "ghi". These changes were made to try to address the lax case sensitivity agreement between start and end tags in many HTML pages. No changes were made to makeXMLTags(), which assumes more rigorous parsing rules. Also, cleaned up case-sensitivity bugs in closing tags, and switched to using Keyword instead of Literal class for tags. (Thanks, Steve Young, for getting me to look at these in more detail!) - Added two helper parse actions, upcaseTokens and downcaseTokens, which will convert matched text to all uppercase or lowercase, respectively. - Deprecated Upcase class, to be replaced by upcaseTokens parse action. - Converted messages sent to stderr to use warnings module, such as when constructing a Literal with an empty string, one should use the Empty() class or the empty helper instead. - Added ' ' (space) as an escapable character within a quoted string. - Added helper expressions for common comment types, in addition to the existing cStyleComment (/*...*/) and htmlStyleComment () . dblSlashComment = // ... (to end of line) . cppStyleComment = cStyleComment or dblSlashComment . javaStyleComment = cppStyleComment . pythonStyleComment = # ... (to end of line) Version 1.3.2 - July 24, 2005 ----------------------------- - Added Each class as an enhanced version of And. 'Each' requires that all given expressions be present, but may occur in any order. Special handling is provided to group ZeroOrMore and OneOrMore elements that occur out-of-order in the input string. You can also construct 'Each' objects by joining expressions with the '&' operator. When using the Each class, results names are strongly recommended for accessing the matched tokens. (Suggested by Pradam Amini - thanks, Pradam!) - Stricter interpretation of 'max' qualifier on Word elements. If the 'max' attribute is specified, matching will fail if an input field contains more than 'max' consecutive body characters. For example, previously, Word(nums,max=3) would match the first three characters of '0123456', returning '012' and continuing parsing at '3'. Now, when constructed using the max attribute, Word will raise an exception with this string. - Cleaner handling of nested dictionaries returned by Dict. No longer necessary to dereference sub-dictionaries as element [0] of their parents. === NOTE: THIS CHANGE MAY BREAK SOME EXISTING CODE, BUT ONLY IF PARSING NESTED DICTIONARIES USING THE LITTLE-USED DICT CLASS === (Prompted by discussion thread on the Python Tutor list, with contributions from Danny Yoo, Kent Johnson, and original post by Liam Clarke - thanks all!) Version 1.3.1 - June, 2005 ---------------------------------- - Added markInputline() method to ParseException, to display the input text line location of the parsing exception. (Thanks, Stefan Behnel!) - Added setDefaultKeywordChars(), so that Keyword definitions using a custom keyword character set do not all need to add the keywordChars constructor argument (similar to setDefaultWhitespaceChars()). (suggested by rzhanka on the SourceForge pyparsing forum.) - Simplified passing debug actions to setDebugAction(). You can now pass 'None' for a debug action if you want to take the default debug behavior. To suppress a particular debug action, you can pass the pyparsing method nullDebugAction. - Refactored parse exception classes, moved all behavior to ParseBaseException, and the former ParseException is now a subclass of ParseBaseException. Added a second subclass, ParseFatalException, as a subclass of ParseBaseException. User-defined parse actions can raise ParseFatalException if a data inconsistency is detected (such as a begin-tag/end-tag mismatch), and this will stop all parsing immediately. (Inspired by e-mail thread with Michele Petrazzo - thanks, Michelle!) - Added helper methods makeXMLTags and makeHTMLTags, that simplify the definition of XML or HTML tag parse expressions for a given tagname. Both functions return a pair of parse expressions, one for the opening tag (that is, '') and one for the closing tag (''). The opening tagame also recognizes any attribute definitions that have been included in the opening tag, as well as an empty tag (one with a trailing '/', as in '' which is equivalent to ''). makeXMLTags uses stricter XML syntax for attributes, requiring that they be enclosed in double quote characters - makeHTMLTags is more lenient, and accepts single-quoted strings or any contiguous string of characters up to the next whitespace character or '>' character. Attributes can be retrieved as dictionary or attribute values of the returned results from the opening tag. - Added example minimath2.py, a refinement on fourFn.py that adds an interactive session and support for variables. (Thanks, Steven Siew!) - Added performance improvement, up to 20% reduction! (Found while working with Wolfgang Borgert on performance tuning of his TTCN3 parser.) - And another performance improvement, up to 25%, when using scanString! (Found while working with Henrik Westlund on his C header file scanner.) - Updated UML diagrams to reflect latest class/method changes. Version 1.3 - March, 2005 ---------------------------------- - Added new Keyword class, as a special form of Literal. Keywords must be followed by whitespace or other non-keyword characters, to distinguish them from variables or other identifiers that just happen to start with the same characters as a keyword. For instance, the input string containing "ifOnlyIfOnly" will match a Literal("if") at the beginning and in the middle, but will fail to match a Keyword("if"). Keyword("if") will match only strings such as "if only" or "if(only)". (Proposed by Wolfgang Borgert, and Berteun Damman separately requested this on comp.lang.python - great idea!) - Added setWhitespaceChars() method to override the characters to be skipped as whitespace before matching a particular ParseElement. Also added the class-level method setDefaultWhitespaceChars(), to allow users to override the default set of whitespace characters (space, tab, newline, and return) for all subsequently defined ParseElements. (Inspired by Klaas Hofstra's inquiry on the Sourceforge pyparsing forum.) - Added helper parse actions to support some very common parse action use cases: . replaceWith(replStr) - replaces the matching tokens with the provided replStr replacement string; especially useful with transformString() . removeQuotes - removes first and last character from string enclosed in quotes (note - NOT the same as the string strip() method, as only a single character is removed at each end) - Added copy() method to ParseElement, to make it easier to define different parse actions for the same basic parse expression. (Note, copy is implicitly called when using setResultsName().) (The following changes were posted to CVS as Version 1.2.3 - October-December, 2004) - Added support for Unicode strings in creating grammar definitions. (Big thanks to Gavin Panella!) - Added constant alphas8bit to include the following 8-bit characters: ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþ - Added srange() function to simplify definition of Word elements, using regexp-like '[A-Za-z0-9]' syntax. This also simplifies referencing common 8-bit characters. - Fixed bug in Dict when a single element Dict was embedded within another Dict. (Thanks Andy Yates for catching this one!) - Added 'formatted' argument to ParseResults.asXML(). If set to False, suppresses insertion of whitespace for pretty-print formatting. Default equals True for backward compatibility. - Added setDebugActions() function to ParserElement, to allow user-defined debugging actions. - Added support for escaped quotes (either in \', \", or doubled quote form) to the predefined expressions for quoted strings. (Thanks, Ero Carrera!) - Minor performance improvement (~5%) converting "char in string" tests to "char in dict". (Suggested by Gavin Panella, cool idea!) Version 1.2.2 - September 27, 2004 ---------------------------------- - Modified delimitedList to accept an expression as the delimiter, instead of only accepting strings. - Modified ParseResults, to convert integer field keys to strings (to avoid confusion with list access). - Modified Combine, to convert all embedded tokens to strings before combining. - Fixed bug in MatchFirst in which parse actions would be called for expressions that only partially match. (Thanks, John Hunter!) - Fixed bug in fourFn.py example that fixes right-associativity of ^ operator. (Thanks, Andrea Griffini!) - Added class FollowedBy(expression), to look ahead in the input string without consuming tokens. - Added class NoMatch that never matches any input. Can be useful in debugging, and in very specialized grammars. - Added example pgn.py, for parsing chess game files stored in Portable Game Notation. (Thanks, Alberto Santini!) Version 1.2.1 - August 19, 2004 ------------------------------- - Added SkipTo(expression) token type, simplifying grammars that only want to specify delimiting expressions, and want to match any characters between them. - Added helper method dictOf(key,value), making it easier to work with the Dict class. (Inspired by Pavel Volkovitskiy, thanks!). - Added optional argument listAllMatches (default=False) to setResultsName(). Setting listAllMatches to True overrides the default modal setting of tokens to results names; instead, the results name acts as an accumulator for all matching tokens within the local repetition group. (Suggested by Amaury Le Leyzour - thanks!) - Fixed bug in ParseResults, throwing exception when trying to extract slice, or make a copy using [:]. (Thanks, Wilson Fowlie!) - Fixed bug in transformString() when the input string contains 's (Thanks, Rick Walia!). - Fixed bug in returning tokens from un-Grouped And's, Or's and MatchFirst's, where too many tokens would be included in the results, confounding parse actions and returned results. - Fixed bug in naming ParseResults returned by And's, Or's, and Match First's. - Fixed bug in LineEnd() - matching this token now correctly consumes and returns the end of line "\n". - Added a beautiful example for parsing Mozilla calendar files (Thanks, Petri Savolainen!). - Added support for dynamically modifying Forward expressions during parsing. Version 1.2 - 20 June 2004 -------------------------- - Added definition for htmlComment to help support HTML scanning and parsing. - Fixed bug in generating XML for Dict classes, in which trailing item was duplicated in the output XML. - Fixed release bug in which scanExamples.py was omitted from release files. - Fixed bug in transformString() when parse actions are not defined on the outermost parser element. - Added example urlExtractor.py, as another example of using scanString and parse actions. Version 1.2beta3 - 4 June 2004 ------------------------------ - Added White() token type, analogous to Word, to match on whitespace characters. Use White in parsers with significant whitespace (such as configuration file parsers that use indentation to indicate grouping). Construct White with a string containing the whitespace characters to be matched. Similar to Word, White also takes optional min, max, and exact parameters. - As part of supporting whitespace-signficant parsing, added parseWithTabs() method to ParserElement, to override the default behavior in parseString of automatically expanding tabs to spaces. To retain tabs during parsing, call parseWithTabs() before calling parseString(), parseFile() or scanString(). (Thanks, Jean-Guillaume Paradis for catching this, and for your suggestions on whitespace-significant parsing.) - Added transformString() method to ParseElement, as a complement to scanString(). To use transformString, define a grammar and attach a parse action to the overall grammar that modifies the returned token list. Invoking transformString() on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. transformString() returns the resulting transformed string. (Note: transformString() does *not* automatically expand tabs to spaces.) Also added scanExamples.py to the examples directory to show sample uses of scanString() and transformString(). - Removed group() method that was introduced in beta2. This turns out NOT to be equivalent to nesting within a Group() object, and I'd prefer not to sow more seeds of confusion. - Fixed behavior of asXML() where tags for groups were incorrectly duplicated. (Thanks, Brad Clements!) - Changed beta version message to display to stderr instead of stdout, to make asXML() easier to use. (Thanks again, Brad.) Version 1.2beta2 - 19 May 2004 ------------------------------ - *** SIMPLIFIED API *** - Parse actions that do not modify the list of tokens no longer need to return a value. This simplifies those parse actions that use the list of tokens to update a counter or record or display some of the token content; these parse actions can simply end without having to specify 'return toks'. - *** POSSIBLE API INCOMPATIBILITY *** - Fixed CaselessLiteral bug, where the returned token text was not the original string (as stated in the docs), but the original string converted to upper case. (Thanks, Dang Griffith!) **NOTE: this may break some code that relied on this erroneous behavior. Users should scan their code for uses of CaselessLiteral.** - *** POSSIBLE CODE INCOMPATIBILITY *** - I have renamed the internal attributes on ParseResults from 'dict' and 'list' to '__tokdict' and '__toklist', to avoid collisions with user-defined data fields named 'dict' and 'list'. Any client code that accesses these attributes directly will need to be modified. Hopefully the implementation of methods such as keys(), items(), len(), etc. on ParseResults will make such direct attribute accessess unnecessary. - Added asXML() method to ParseResults. This greatly simplifies the process of parsing an input data file and generating XML-structured data. - Added getName() method to ParseResults. This method is helpful when a grammar specifies ZeroOrMore or OneOrMore of a MatchFirst or Or expression, and the parsing code needs to know which expression matched. (Thanks, Eric van der Vlist, for this idea!) - Added items() and values() methods to ParseResults, to better support using ParseResults as a Dictionary. - Added parseFile() as a convenience function to parse the contents of an entire text file. Accepts either a file name or a file object. (Thanks again, Dang!) - Added group() method to And, Or, and MatchFirst, as a short-cut alternative to enclosing a construct inside a Group object. - Extended fourFn.py to support exponentiation, and simple built-in functions. - Added EBNF parser to examples, including a demo where it parses its own EBNF! (Thanks to Seo Sanghyeon!) - Added Delphi Form parser to examples, dfmparse.py, plus a couple of sample Delphi forms as tests. (Well done, Dang!) - Another performance speedup, 5-10%, inspired by Dang! Plus about a 20% speedup, by pre-constructing and cacheing exception objects instead of constructing them on the fly. - Fixed minor bug when specifying oneOf() with 'caseless=True'. - Cleaned up and added a few more docstrings, to improve the generated docs. Version 1.1.2 - 21 Mar 2004 --------------------------- - Fixed minor bug in scanString(), so that start location is at the start of the matched tokens, not at the start of the whitespace before the matched tokens. - Inclusion of HTML documentation, generated using Epydoc. Reformatted some doc strings to better generate readable docs. (Beautiful work, Ed Loper, thanks for Epydoc!) - Minor performance speedup, 5-15% - And on a process note, I've used the unittest module to define a series of unit tests, to help avoid the embarrassment of the version 1.1 snafu. Version 1.1.1 - 6 Mar 2004 -------------------------- - Fixed critical bug introduced in 1.1, which broke MatchFirst(!) token matching. **THANK YOU, SEO SANGHYEON!!!** - Added "from future import __generators__" to permit running under pre-Python 2.3. - Added example getNTPservers.py, showing how to use pyparsing to extract a text pattern from the HTML of a web page. Version 1.1 - 3 Mar 2004 ------------------------- - ***Changed API*** - While testing out parse actions, I found that the value of loc passed in was not the starting location of the matched tokens, but the location of the next token in the list. With this version, the location passed to the parse action is now the starting location of the tokens that matched. A second part of this change is that the return value of parse actions no longer needs to return a tuple containing both the location and the parsed tokens (which may optionally be modified); parse actions only need to return the list of tokens. Parse actions that return a tuple are deprecated; they will still work properly for conversion/compatibility, but this behavior will be removed in a future version. - Added validate() method, to help diagnose infinite recursion in a grammar tree. validate() is not 100% fool-proof, but it can help track down nasty infinite looping due to recursively referencing the same grammar construct without some intervening characters. - Cleaned up default listing of some parse element types, to more closely match ordinary BNF. Instead of the form :[contents-list], some changes are: . And(token1,token2,token3) is "{ token1 token2 token3 }" . Or(token1,token2,token3) is "{ token1 ^ token2 ^ token3 }" . MatchFirst(token1,token2,token3) is "{ token1 | token2 | token3 }" . Optional(token) is "[ token ]" . OneOrMore(token) is "{ token }..." . ZeroOrMore(token) is "[ token ]..." - Fixed an infinite loop in oneOf if the input string contains a duplicated option. (Thanks Brad Clements) - Fixed a bug when specifying a results name on an Optional token. (Thanks again, Brad Clements) - Fixed a bug introduced in 1.0.6 when I converted quotedString to use CharsNotIn; I accidentally permitted quoted strings to span newlines. I have fixed this in this version to go back to the original behavior, in which quoted strings do *not* span newlines. - Fixed minor bug in HTTP server log parser. (Thanks Jim Richardson) Version 1.0.6 - 13 Feb 2004 ---------------------------- - Added CharsNotIn class (Thanks, Lee SangYeong). This is the opposite of Word, in that it is constructed with a set of characters *not* to be matched. (This enhancement also allowed me to clean up and simplify some of the definitions for quoted strings, cStyleComment, and restOfLine.) - **MINOR API CHANGE** - Added joinString argument to the __init__ method of Combine (Thanks, Thomas Kalka). joinString defaults to "", but some applications might choose some other string to use instead, such as a blank or newline. joinString was inserted as the second argument to __init__, so if you have code that specifies an adjacent value, without using 'adjacent=', this code will break. - Modified LineStart to recognize the start of an empty line. - Added optional caseless flag to oneOf(), to create a list of CaselessLiteral tokens instead of Literal tokens. - Added some enhancements to the SQL example: . Oracle-style comments (Thanks to Harald Armin Massa) . simple WHERE clause - Minor performance speedup - 5-15% Version 1.0.5 - 19 Jan 2004 ---------------------------- - Added scanString() generator method to ParseElement, to support regex-like pattern-searching - Added items() list to ParseResults, to return named results as a list of (key,value) pairs - Fixed memory overflow in asList() for deeply nested ParseResults (Thanks, Sverrir Valgeirsson) - Minor performance speedup - 10-15% Version 1.0.4 - 8 Jan 2004 --------------------------- - Added positional tokens StringStart, StringEnd, LineStart, and LineEnd - Added commaSeparatedList to pre-defined global token definitions; also added commasep.py to the examples directory, to demonstrate the differences between parsing comma-separated data and simple line-splitting at commas - Minor API change: delimitedList does not automatically enclose the list elements in a Group, but makes this the responsibility of the caller; also, if invoked using 'combine=True', the list delimiters are also included in the returned text (good for scoped variables, such as a.b.c or a::b::c, or for directory paths such as a/b/c) - Performance speed-up again, 30-40% - Added httpServerLogParser.py to examples directory, as this is a common parsing task Version 1.0.3 - 23 Dec 2003 --------------------------- - Performance speed-up again, 20-40% - Added Python distutils installation setup.py, etc. (thanks, Dave Kuhlman) Version 1.0.2 - 18 Dec 2003 --------------------------- - **NOTE: Changed API again!!!** (for the last time, I hope) + Renamed module from parsing to pyparsing, to better reflect Python linkage. - Also added dictExample.py to examples directory, to illustrate usage of the Dict class. Version 1.0.1 - 17 Dec 2003 --------------------------- - **NOTE: Changed API!** + Renamed 'len' argument on Word.__init__() to 'exact' - Performance speed-up, 10-30% Version 1.0.0 - 15 Dec 2003 --------------------------- - Initial public release Version 0.1.1 thru 0.1.17 - October-November, 2003 -------------------------------------------------- - initial development iterations: - added Dict, Group - added helper methods oneOf, delimitedList - added helpers quotedString (and double and single), restOfLine, cStyleComment - added MatchFirst as an alternative to the slower Or - added UML class diagram - fixed various logic bugs ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/CODE_OF_CONDUCT.rst0000644000000000000000000000641514412577542013506 0ustar00Contributor Covenant Code of Conduct ==================================== Our Pledge ---------- In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Our Standards ------------- Examples of behavior that contributes to creating a positive environment include: - Using welcoming and inclusive language - Being respectful of differing viewpoints and experiences - Gracefully accepting constructive criticism - Focusing on what is best for the community - Showing empathy towards other community members Examples of unacceptable behavior by participants include: - The use of sexualized language or imagery and unwelcome sexual attention or advances - Trolling, insulting/derogatory comments, and personal or political attacks - Public or private harassment - Publishing others’ private information, such as a physical or electronic address, without explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting Our Responsibilities -------------------- Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Scope ----- This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at pyparsing@mail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership. Attribution ----------- This Code of Conduct is adapted from the `Contributor Covenant `__, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/CONTRIBUTING.md0000644000000000000000000001535314412577542012731 0ustar00# CONTRIBUTING Thank you for your interest in working on pyparsing! Pyparsing has become a popular module for creating simple text parsing and data scraping applications. It has been incorporated in several widely-used packages, and is often used by beginners as part of their first Python project. ## Raising questions / asking for help If you have a question on using pyparsing, there are a number of resources available online. - [StackOverflow](https://stackoverflow.com/questions/tagged/pyparsing) - about 10 years of SO questions and answers can be searched on StackOverflow, tagged with the `pyparsing` tag. Note that some of the older posts will refer to features in Python 2, or to versions and coding practices for pyparsing that have been replaced by newer classes and coding idioms. - [pyparsing sub-reddit](https://www.reddit.com/r/pyparsing/) - still very lightly attended, but open to anyone wishing to post questions or links related to pyparsing. An alternative channel to StackOverflow for asking questions. - [online docs](https://pyparsing-docs.readthedocs.io/en/latest/index.html) and a separately maintained set of class library docs [here](https://pyparsing-doc.neocities.org/) - These docs are auto-generated from the docstrings embedded in the pyparsing classes, so they can also be viewed in the interactive Python console's and Jupyter Notebook's `help` commands. - [the pyparsing Wikispaces archive](https://github.com/pyparsing/wikispaces_archive) - Before hosting on GitHub, pyparsing had a separate wiki on the wikispaces.com website. In 2018 this page was discontinued. The discussion content archive has been reformatted into Markdown and can be viewed by year at the GitHub repository. Just as with some of the older questions on StackOverflow, some of these older posts may reflect out-of-date pyparsing and Python features. - [submit an issue](https://github.com/pyparsing/pyparsing/issues) - If you have a problem with pyparsing that looks like an actual bug, or have an idea for a feature to add to pyparsing please submit an issue on GitHub. Some pyparsing behavior may be counter-intuitive, so try to review some of the other resources first, or some of the other open and closed issues. Or post your question on SO or reddit. But don't wait until you are desperate and frustrated - just ask! :) ## Submitting examples If you have an example you wish to submit, please follow these guidelines. - **License - Submitted example code must be available for distribution with the rest of pyparsing under the MIT open source license.** - Please follow PEP8 name and coding guidelines, and use the black formatter to auto-format code. - Examples should import pyparsing and the common namespace classes as: import pyparsing as pp # if necessary ppc = pp.pyparsing_common ppu = pp.pyparsing_unicode - Submitted examples *must* be Python 3.6.8 or later compatible. (It is acceptable if examples use Python features added after 3.6) - Where possible use operators to create composite parse expressions: expr = expr_a + expr_b | expr_c instead of: expr = pp.MatchFirst([pp.And([expr_a, expr_b]), expr_c]) Exception: if using a generator to create an expression: import keyword python_keywords = keyword.kwlist any_keyword = pp.MatchFirst(pp.Keyword(kw) for kw in python_keywords)) - Learn [Common Pitfalls When Writing Parsers](https://github.com/pyparsing/pyparsing/wiki/Common-Pitfalls-When-Writing-Parsers) and how to avoid them when developing new examples. - See additional notes under [Some Coding Points](#some-coding-points). ## Submitting changes If you are considering proposing updates to pyparsing, please bear in mind the following guidelines. Please review [_The Zen of Pyparsing_ and _The Zen of Pyparsing Development_](https://github.com/pyparsing/pyparsing/wiki/Zen) article on the pyparsing wiki, to get a general feel for the historical and future approaches to pyparsing's design, and intended developer experience as an embedded DSL. If you are using new Python features or changing usage of the Python stdlib, please check that they work as intended on prior versions of Python (currently back to Python 3.6.8). ## Some design points - Minimize additions to the module namespace. Over time, pyparsing's namespace has acquired a *lot* of names. New features have been encapsulated into namespace classes to try to hold back the name flooding when importing pyparsing. - New operator overloads for ParserElement will need to show broad applicability, and should be related to parser construction. - Performance tuning should focus on parse time performance. Optimizing parser definition performance is secondary. - New external dependencies will require substantial justification, and if included, will need to be guarded for `ImportError`s raised if the external module is not installed. ## Some coding points These coding styles are encouraged whether submitting code for core pyparsing or for submitting an example. - PEP8 - pyparsing has historically been very non-compliant with many PEP8 guidelines, especially those regarding name casing. I had just finished several years of Java and Smalltalk development, and camel case seemed to be the future trend in coding styles. As of version 3.0.0, pyparsing is moving over to PEP8 naming, while maintaining compatibility with existing parser code by defining synonyms using the legacy names. These names will be retained until a future release (probably 4.0), to provide a migration path for current pyparsing-dependent applications - DO NOT MODIFY OR REMOVE THESE NAMES. See more information at the [PEP8 wiki page](https://github.com/pyparsing/pyparsing/wiki/PEP-8-planning). - No backslashes for line continuations. Continuation lines for expressions in ()'s should start with the continuing operator: really_long_line = (something + some_other_long_thing + even_another_long_thing) - Maximum line length is 120 characters. (Black will override this.) - Changes to core pyparsing must be compatible back to Py3.6 without conditionalizing. Later Py3 features may be used in examples by way of illustration. - str.format() statements should use named format arguments (unless this proves to be a slowdown at parse time). - List, tuple, and dict literals should include a trailing comma after the last element, which reduces changeset clutter when another element gets added to the end. - New features should be accompanied by updates to unitTests.py and a bullet in the CHANGES file. - Do not modify pyparsing_archive.py. This file is kept as a reference artifact from when pyparsing was distributed as a single source file. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/LICENSE0000644000000000000000000000177714412577542011512 0ustar00Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1687654186.082798 pyparsing-3.1.1/README.rst0000644000000000000000000000705214445707452012165 0ustar00PyParsing -- A Python Parsing Module ==================================== |Version| |Build Status| |Coverage| |License| |Python Versions| |Snyk Score| Introduction ============ The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. The pyparsing module provides a library of classes that client code uses to construct the grammar directly in Python code. *[Since first writing this description of pyparsing in late 2003, this technique for developing parsers has become more widespread, under the name Parsing Expression Grammars - PEGs. See more information on PEGs* `here `__ *.]* Here is a program to parse ``"Hello, World!"`` (or any greeting of the form ``"salutation, addressee!"``): .. code:: python from pyparsing import Word, alphas greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print(hello, "->", greet.parseString(hello)) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operator definitions. The parsed results returned from ``parseString()`` is a collection of type ``ParseResults``, which can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle ``"Hello,World!"``, ``"Hello , World !"``, etc.) - quoted strings - embedded comments The examples directory includes a simple SQL parser, simple CORBA IDL parser, a config file parser, a chemical formula parser, and a four- function algebraic notation parser, among many others. Documentation ============= There are many examples in the online docstrings of the classes and methods in pyparsing. You can find them compiled into `online docs `__. Additional documentation resources and project info are listed in the online `GitHub wiki `__. An entire directory of examples can be found `here `__. License ======= MIT License. See header of the `pyparsing __init__.py `__ file. History ======= See `CHANGES `__ file. .. |Build Status| image:: https://github.com/pyparsing/pyparsing/actions/workflows/ci.yml/badge.svg :target: https://github.com/pyparsing/pyparsing/actions/workflows/ci.yml .. |Coverage| image:: https://codecov.io/gh/pyparsing/pyparsing/branch/master/graph/badge.svg :target: https://codecov.io/gh/pyparsing/pyparsing .. |Version| image:: https://img.shields.io/pypi/v/pyparsing?style=flat-square :target: https://pypi.org/project/pyparsing/ :alt: Version .. |License| image:: https://img.shields.io/pypi/l/pyparsing.svg?style=flat-square :target: https://pypi.org/project/pyparsing/ :alt: License .. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pyparsing.svg?style=flat-square :target: https://pypi.org/project/python-liquid/ :alt: Python versions .. |Snyk Score| image:: https://snyk.io//advisor/python/pyparsing/badge.svg :target: https://snyk.io//advisor/python/pyparsing :alt: pyparsing ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/CODE_OF_CONDUCT.rst0000644000000000000000000000004414412577542014426 0ustar00.. include:: ../CODE_OF_CONDUCT.rst ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/docs/HowToUsePyparsing.rst0000644000000000000000000017555014443622312015570 0ustar00========================== Using the pyparsing module ========================== :author: Paul McGuire :address: ptmcg.pm+pyparsing@gmail.com :revision: 3.1.0 :date: April, 2023 :copyright: Copyright |copy| 2003-2023 Paul McGuire. .. |copy| unicode:: 0xA9 :abstract: This document provides how-to instructions for the pyparsing library, an easy-to-use Python module for constructing and executing basic text parsers. The pyparsing module is useful for evaluating user-definable expressions, processing custom application language commands, or extracting data from formatted reports. .. sectnum:: :depth: 4 .. contents:: :depth: 4 Note: While this content is still valid, there are more detailed descriptions and extensive examples at the `online doc server `_, and in the online help for the various pyparsing classes and methods (viewable using the Python interpreter's built-in ``help()`` function). You will also find many example scripts in the `examples `_ directory of the pyparsing GitHub repo. ----------- **Note**: *In pyparsing 3.0, many method and function names which were originally written using camelCase have been converted to PEP8-compatible snake_case. So ``parseString()`` is being renamed to ``parse_string()``, ``delimitedList`` to DelimitedList_, and so on. You may see the old names in legacy parsers, and they will be supported for a time with synonyms, but the synonyms will be removed in a future release.* *If you are using this documentation, but working with a 2.4.x version of pyparsing, you'll need to convert methods and arguments from the documented snake_case names to the legacy camelCase names. In pyparsing 3.0.x, both forms are supported, but the legacy forms are deprecated; they will be dropped in a future release.* ----------- Steps to follow =============== To parse an incoming data string, the client code must follow these steps: 1. First define the tokens and patterns to be matched, and assign this to a program variable. Optional results names or parse actions can also be defined at this time. 2. Call ``parse_string()`` or ``scan_string()`` on this variable, passing in the string to be parsed. During the matching process, whitespace between tokens is skipped by default (although this can be changed). When token matches occur, any defined parse action methods are called. 3. Process the parsed results, returned as a ParseResults_ object. The ParseResults_ object can be accessed as if it were a list of strings. Matching results may also be accessed as named attributes of the returned results, if names are defined in the definition of the token pattern, using ``set_results_name()``. Hello, World! ------------- The following complete Python program will parse the greeting ``"Hello, World!"``, or any other greeting of the form ", !":: import pyparsing as pp greet = pp.Word(pp.alphas) + "," + pp.Word(pp.alphas) + "!" for greeting_str in [ "Hello, World!", "Bonjour, Monde!", "Hola, Mundo!", "Hallo, Welt!", ]: greeting = greet.parse_string(greeting_str) print(greeting) The parsed tokens are returned in the following form:: ['Hello', ',', 'World', '!'] ['Bonjour', ',', 'Monde', '!'] ['Hola', ',', 'Mundo', '!'] ['Hallo', ',', 'Welt', '!'] Usage notes ----------- - The pyparsing module can be used to interpret simple command strings or algebraic expressions, or can be used to extract data from text reports with complicated format and structure ("screen or report scraping"). However, it is possible that your defined matching patterns may accept invalid inputs. Use pyparsing to extract data from strings assumed to be well-formatted. - To keep up the readability of your code, use operators_ such as ``+``, ``|``, ``^``, and ``~`` to combine expressions. You can also combine string literals with ``ParseExpressions`` - they will be automatically converted to Literal_ objects. For example:: integer = Word(nums) # simple unsigned integer variable = Char(alphas) # single letter variable, such as x, z, m, etc. arith_op = one_of("+ - * /") # arithmetic operators equation = variable + "=" + integer + arith_op + integer # will match "x=2+2", etc. In the definition of ``equation``, the string ``"="`` will get added as a ``Literal("=")``, but in a more readable way. - The pyparsing module's default behavior is to ignore whitespace. This is the case for 99% of all parsers ever written. This allows you to write simple, clean, grammars, such as the above ``equation``, without having to clutter it up with extraneous ``ws`` markers. The ``equation`` grammar will successfully parse all of the following statements:: x=2+2 x = 2+2 a = 10 * 4 r= 1234/ 100000 Of course, it is quite simple to extend this example to support more elaborate expressions, with nesting with parentheses, floating point numbers, scientific notation, and named constants (such as ``e`` or ``pi``). See `fourFn.py `_, and `simpleArith.py `_ included in the examples directory. - To modify pyparsing's default whitespace skipping, you can use one or more of the following methods: - use the static method ``ParserElement.set_default_whitespace_chars`` to override the normal set of whitespace chars (``' \t\n'``). For instance when defining a grammar in which newlines are significant, you should call ``ParserElement.set_default_whitespace_chars(' \t')`` to remove newline from the set of skippable whitespace characters. Calling this method will affect all pyparsing expressions defined afterward. - call ``leave_whitespace()`` on individual expressions, to suppress the skipping of whitespace before trying to match the expression - use ``Combine`` to require that successive expressions must be adjacent in the input string. For instance, this expression:: real = Word(nums) + '.' + Word(nums) will match "3.14159", but will also match "3 . 12". It will also return the matched results as ['3', '.', '14159']. By changing this expression to:: real = Combine(Word(nums) + '.' + Word(nums)) it will not match numbers with embedded spaces, and it will return a single concatenated string '3.14159' as the parsed token. - Repetition of expressions can be indicated using ``*`` or ``[]`` notation. An expression may be multiplied by an integer value (to indicate an exact repetition count), or indexed with a tuple, representing min and max repetitions (with ``...`` representing no min or no max, depending whether it is the first or second tuple element). See the following examples, where n is used to indicate an integer value: - ``expr*3`` is equivalent to ``expr + expr + expr`` - ``expr[2, 3]`` is equivalent to ``expr + expr + Opt(expr)`` - ``expr[n, ...]`` or ``expr[n,]`` is equivalent to ``expr*n + ZeroOrMore(expr)`` (read as "at least n instances of expr") - ``expr[... ,n]`` is equivalent to ``expr*(0, n)`` (read as "0 to n instances of expr") - ``expr[...]``, ``expr[0, ...]`` and ``expr * ...`` are equivalent to ``ZeroOrMore(expr)`` - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` Note that ``expr[..., n]`` does not raise an exception if more than n exprs exist in the input stream; that is, ``expr[..., n]`` does not enforce a maximum number of expr occurrences. If this behavior is desired, then write ``expr[..., n] + ~expr``. - ``[]`` notation will also accept a stop expression using ':' slice notation: - ``expr[...:end_expr]`` is equivalent to ``ZeroOrMore(expr, stop_on=end_expr)`` - MatchFirst_ expressions are matched left-to-right, and the first match found will skip all later expressions within, so be sure to define less-specific patterns after more-specific patterns. If you are not sure which expressions are most specific, use Or_ expressions (defined using the ``^`` operator) - they will always match the longest expression, although they are more compute-intensive. - Or_ expressions will evaluate all of the specified subexpressions to determine which is the "best" match, that is, which matches the longest string in the input data. In case of a tie, the left-most expression in the Or_ list will win. - If parsing the contents of an entire file, pass it to the ``parse_file`` method using:: expr.parse_file(source_file) - ``ParseExceptions`` will report the location where an expected token or expression failed to match. For example, if we tried to use our "Hello, World!" parser to parse "Hello World!" (leaving out the separating comma), we would get an exception, with the message:: pyparsing.ParseException: Expected "," (6), (1,7) In the case of complex expressions, the reported location may not be exactly where you would expect. See more information under ParseException_ . - Use the ``Group`` class to enclose logical groups of tokens within a sublist. This will help organize your results into more hierarchical form (the default behavior is to return matching tokens as a flat list of matching input strings). - Punctuation may be significant for matching, but is rarely of much interest in the parsed results. Use the ``suppress()`` method to keep these tokens from cluttering up your returned lists of tokens. For example, DelimitedList_ matches a succession of one or more expressions, separated by delimiters (commas by default), but only returns a list of the actual expressions - the delimiters are used for parsing, but are suppressed from the returned output. - Parse actions can be used to convert values from strings to other data types (ints, floats, booleans, etc.). - Results names are recommended for retrieving tokens from complex expressions. It is much easier to access a token using its field name than using a positional index, especially if the expression contains optional elements. You can also shortcut the ``set_results_name`` call:: stats = ("AVE:" + real_num.set_results_name("average") + "MIN:" + real_num.set_results_name("min") + "MAX:" + real_num.set_results_name("max")) can more simply and cleanly be written as this:: stats = ("AVE:" + real_num("average") + "MIN:" + real_num("min") + "MAX:" + real_num("max")) - Be careful when defining parse actions that modify global variables or data structures (as in fourFn.py_), especially for low level tokens or expressions that may occur within an And_ expression; an early element of an And_ may match, but the overall expression may fail. Classes ======= All the pyparsing classes can be found in this `UML class diagram <_static/pyparsingClassDiagram_3.0.9.jpg>`_. Classes in the pyparsing module ------------------------------- ``ParserElement`` - abstract base class for all pyparsing classes; methods for code to use are: - ``parse_string(source_string, parse_all=False)`` - only called once, on the overall matching pattern; returns a ParseResults_ object that makes the matched tokens available as a list, and optionally as a dictionary, or as an object with named attributes; if ``parse_all`` is set to True, then ``parse_string`` will raise a ParseException_ if the grammar does not process the complete input string. - ``parse_file(source_file)`` - a convenience function, that accepts an input file object or filename. The file contents are passed as a string to ``parse_string()``. ``parse_file`` also supports the ``parse_all`` argument. - ``scan_string(source_string)`` - generator function, used to find and extract matching text in the given source string; for each matched text, returns a tuple of: - matched tokens (packaged as a ParseResults_ object) - start location of the matched text in the given source string - end location in the given source string ``scan_string`` allows you to scan through the input source string for random matches, instead of exhaustively defining the grammar for the entire source text (as would be required with ``parse_string``). - ``transform_string(source_string)`` - convenience wrapper function for ``scan_string``, to process the input source string, and replace matching text with the tokens returned from parse actions defined in the grammar (see set_parse_action_). - ``search_string(source_string)`` - another convenience wrapper function for ``scan_string``, returns a list of the matching tokens returned from each call to ``scan_string``. - ``set_name(name)`` - associate a short descriptive name for this element, useful in displaying exceptions and trace information - ``run_tests(tests_string)`` - useful development and testing method on expressions, to pass a multiline string of sample strings to test against the expression. Comment lines (beginning with ``#``) can be inserted and they will be included in the test output:: digits = Word(nums).set_name("numeric digits") real_num = Combine(digits + '.' + digits) real_num.run_tests("""\ # valid number 3.14159 # no integer part .00001 # no decimal 101 # no decimal value 101. """) will print:: # valid number 3.14159 ['3.14159'] # no integer part .00001 ^ FAIL: Expected numeric digits, found '.' (at char 0), (line:1, col:1) # no decimal 101 ^ FAIL: Expected ".", found end of text (at char 3), (line:1, col:4) # no decimal value 101. ^ FAIL: Expected numeric digits, found end of text (at char 4), (line:1, col:5) .. _set_results_name: - ``set_results_name(string, list_all_matches=False)`` - name to be given to tokens matching the element; if multiple tokens within a repetition group (such as ZeroOrMore_ or DelimitedList_) the default is to return only the last matching token - if ``list_all_matches`` is set to True, then a list of all the matching tokens is returned. ``expr.set_results_name("key")`` can also be written ``expr("key")`` (a results name with a trailing '*' character will be interpreted as setting ``list_all_matches`` to ``True``). Note: ``set_results_name`` returns a *copy* of the element so that a single basic element can be referenced multiple times and given different names within a complex grammar. .. _set_parse_action: - ``set_parse_action(*fn)`` - specify one or more functions to call after successful matching of the element; each function is defined as ``fn(s, loc, toks)``, where: - ``s`` is the original parse string - ``loc`` is the location in the string where matching started - ``toks`` is the list of the matched tokens, packaged as a ParseResults_ object Parse actions can have any of the following signatures:: fn(s: str, loc: int, tokens: ParseResults) fn(loc: int, tokens: ParseResults) fn(tokens: ParseResults) fn() Multiple functions can be attached to a ``ParserElement`` by specifying multiple arguments to ``set_parse_action``, or by calling ``add_parse_action``. Calls to ``set_parse_action`` will replace any previously defined parse actions. ``set_parse_action(None)`` will clear all previously defined parse actions. Each parse action function can return a modified ``toks`` list, to perform conversion, or string modifications. For brevity, ``fn`` may also be a lambda - here is an example of using a parse action to convert matched integer tokens from strings to integers:: int_number = Word(nums).set_parse_action(lambda s, l, t: [int(t[0])]) If ``fn`` modifies the ``toks`` list in-place, it does not need to return and pyparsing will use the modified ``toks`` list. If ``set_parse_action`` is called with an argument of ``None``, then this clears all parse actions attached to that expression. A nice short-cut for calling ``set_parse_action`` is to use it as a decorator:: identifier = Word(alphas, alphanums+"_") @identifier.set_parse_action def resolve_identifier(results: ParseResults): return variable_values.get(results[0]) (Posted by @MisterMiyagi in this SO answer: https://stackoverflow.com/a/63031959/165216) - ``add_parse_action`` - similar to ``set_parse_action``, but instead of replacing any previously defined parse actions, will append the given action or actions to the existing defined parse actions. - ``add_condition`` - a simplified form of ``add_parse_action`` if the purpose of the parse action is to simply do some validation, and raise an exception if the validation fails. Takes a method that takes the same arguments, but simply returns ``True`` or ``False``. If ``False`` is returned, an exception will be raised. - ``set_break(break_flag=True)`` - if ``break_flag`` is ``True``, calls ``pdb.set_break()`` as this expression is about to be parsed - ``copy()`` - returns a copy of a ``ParserElement``; can be used to use the same parse expression in different places in a grammar, with different parse actions attached to each; a short-form ``expr()`` is equivalent to ``expr.copy()`` - ``leave_whitespace()`` - change default behavior of skipping whitespace before starting matching (mostly used internally to the pyparsing module, rarely used by client code) - ``set_whitespace_chars(chars)`` - define the set of chars to be ignored as whitespace before trying to match a specific ``ParserElement``, in place of the default set of whitespace (space, tab, newline, and return) - ``set_default_whitespace_chars(chars)`` - class-level method to override the default set of whitespace chars for all subsequently created ParserElements (including copies); useful when defining grammars that treat one or more of the default whitespace characters as significant (such as a line-sensitive grammar, to omit newline from the list of ignorable whitespace) - ``suppress()`` - convenience function to suppress the output of the given element, instead of wrapping it with a ``Suppress`` object. - ``ignore(expr)`` - function to specify parse expression to be ignored while matching defined patterns; can be called repeatedly to specify multiple expressions; useful to specify patterns of comment syntax, for example - ``set_debug(flag=True)`` - function to enable/disable tracing output when trying to match this element - ``validate()`` - function to verify that the defined grammar does not contain infinitely recursive constructs (``validate()`` is deprecated, and will be removed in a future pyparsing release. Pyparsing now supports left-recursive parsers, which this function attempted to catch.) .. _parse_with_tabs: - ``parse_with_tabs()`` - function to override default behavior of converting tabs to spaces before parsing the input string; rarely used, except when specifying whitespace-significant grammars using the White_ class. - ``enable_packrat()`` - a class-level static method to enable a memoizing performance enhancement, known as "packrat parsing". packrat parsing is disabled by default, since it may conflict with some user programs that use parse actions. To activate the packrat feature, your program must call the class method ``ParserElement.enable_packrat()``. For best results, call ``enable_packrat()`` immediately after importing pyparsing. - ``enable_left_recursion()`` - a class-level static method to enable pyparsing with left-recursive (LR) parsers. Similar to ``ParserElement.enable_packrat()``, your program must call the class method ``ParserElement.enable_left_recursion()`` to enable this feature. ``enable_left_recursion()`` uses a separate packrat cache, and so is incompatible with ``enable_packrat()``. Basic ParserElement subclasses ------------------------------ .. _Literal: - ``Literal`` - construct with a string to be matched exactly .. _CaselessLiteral: - ``CaselessLiteral`` - construct with a string to be matched, but without case checking; results are always returned as the defining literal, NOT as they are found in the input string .. _Keyword: - ``Keyword`` - similar to Literal_, but must be immediately followed by whitespace, punctuation, or other non-keyword characters; prevents accidental matching of a non-keyword that happens to begin with a defined keyword - ``CaselessKeyword`` - similar to Keyword_, but with caseless matching behavior as described in CaselessLiteral_. .. _Word: - ``Word`` - one or more contiguous characters; construct with a string containing the set of allowed initial characters, and an optional second string of allowed body characters; for instance, a common ``Word`` construct is to match a code identifier - in C, a valid identifier must start with an alphabetic character or an underscore ('_'), followed by a body that can also include numeric digits. That is, ``a``, ``i``, ``MAX_LENGTH``, ``_a1``, ``b_109_``, and ``plan9FromOuterSpace`` are all valid identifiers; ``9b7z``, ``$a``, ``.section``, and ``0debug`` are not. To define an identifier using a ``Word``, use either of the following:: Word(alphas+"_", alphanums+"_") Word(srange("[a-zA-Z_]"), srange("[a-zA-Z0-9_]")) Pyparsing also provides pre-defined strings ``identchars`` and ``identbodychars`` so that you can also write:: Word(identchars, identbodychars) If only one string given, it specifies that the same character set defined for the initial character is used for the word body; for instance, to define an identifier that can only be composed of capital letters and underscores, use one of:: ``Word("ABCDEFGHIJKLMNOPQRSTUVWXYZ_")`` ``Word(srange("[A-Z_]"))`` A ``Word`` may also be constructed with any of the following optional parameters: - ``min`` - indicating a minimum length of matching characters - ``max`` - indicating a maximum length of matching characters - ``exact`` - indicating an exact length of matching characters; if ``exact`` is specified, it will override any values for ``min`` or ``max`` - ``as_keyword`` - indicating that preceding and following characters must be whitespace or non-keyword characters - ``exclude_chars`` - a string of characters that should be excluded from init_chars and body_chars Sometimes you want to define a word using all characters in a range except for one or two of them; you can do this with the ``exclude_chars`` argument. This is helpful if you want to define a word with all ``printables`` except for a single delimiter character, such as '.'. Previously, you would have to create a custom string to pass to Word. With this change, you can just create ``Word(printables, exclude_chars='.')``. - ``Char`` - a convenience form of ``Word`` that will match just a single character from a string of matching characters:: single_digit = Char(nums) - ``CharsNotIn`` - similar to Word_, but matches characters not in the given constructor string (accepts only one string for both initial and body characters); also supports ``min``, ``max``, and ``exact`` optional parameters. - ``Regex`` - a powerful construct, that accepts a regular expression to be matched at the current parse position; accepts an optional ``flags`` parameter, corresponding to the flags parameter in the ``re.compile`` method; if the expression includes named sub-fields, they will be represented in the returned ParseResults_. - ``QuotedString`` - supports the definition of custom quoted string formats, in addition to pyparsing's built-in ``dbl_quoted_string`` and ``sgl_quoted_string``. ``QuotedString`` allows you to specify the following parameters: - ``quote_char`` - string of one or more characters defining the quote delimiting string - ``esc_char`` - character to escape quotes, typically backslash (default=None) - ``esc_quote`` - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - ``multiline`` - boolean indicating whether quotes can span multiple lines (default=False) - ``unquote_results`` - boolean indicating whether the matched text should be unquoted (default=True) - ``end_quote_char`` - string of one or more characters defining the end of the quote delimited string (default=None => same as ``quote_char``) .. _SkipTo: - ``SkipTo`` - skips ahead in the input string, accepting any characters up to the specified pattern; may be constructed with the following optional parameters: - ``include`` - if set to true, also consumes the match expression (default is false) - ``ignore`` - allows the user to specify patterns to not be matched, to prevent false matches - ``fail_on`` - if a literal string or expression is given for this argument, it defines an expression that should cause the SkipTo_ expression to fail, and not skip over that expression ``SkipTo`` can also be written using ``...``:: LBRACE, RBRACE = map(Literal, "{}") brace_expr = LBRACE + SkipTo(RBRACE) + RBRACE # can also be written as brace_expr = LBRACE + ... + RBRACE .. _White: - ``White`` - also similar to Word_, but matches whitespace characters. Not usually needed, as whitespace is implicitly ignored by pyparsing. However, some grammars are whitespace-sensitive, such as those that use leading tabs or spaces to indicating grouping or hierarchy. (If matching on tab characters, be sure to call parse_with_tabs_ on the top-level parse element.) - ``Empty`` - a null expression, requiring no characters - will always match; useful for debugging and for specialized grammars - ``NoMatch`` - opposite of ``Empty``, will never match; useful for debugging and for specialized grammars Expression subclasses --------------------- .. _And: - ``And`` - construct with a list of ``ParserElements``, all of which must match for ``And`` to match; can also be created using the '+' operator; multiple expressions can be ``Anded`` together using the '*' operator as in:: ip_address = Word(nums) + ('.' + Word(nums)) * 3 A tuple can be used as the multiplier, indicating a min/max:: us_phone_number = Word(nums) + ('-' + Word(nums)) * (1,2) A special form of ``And`` is created if the '-' operator is used instead of the '+' operator. In the ``ip_address`` example above, if no trailing '.' and ``Word(nums)`` are found after matching the initial ``Word(nums)``, then pyparsing will back up in the grammar and try other alternatives to ``ip_address``. However, if ``ip_address`` is defined as:: strict_ip_address = Word(nums) - ('.'+Word(nums))*3 then no backing up is done. If the first ``Word(nums)`` of ``strict_ip_address`` is matched, then any mismatch after that will raise a ``ParseSyntaxException``, which will halt the parsing process immediately. By careful use of the '-' operator, grammars can provide meaningful error messages close to the location where the incoming text does not match the specified grammar. .. _Or: - ``Or`` - construct with a list of ``ParserElements``, any of which must match for ``Or`` to match; if more than one expression matches, the expression that makes the longest match will be used; can also be created using the '^' operator .. _MatchFirst: - ``MatchFirst`` - construct with a list of ``ParserElements``, any of which must match for ``MatchFirst`` to match; matching is done left-to-right, taking the first expression that matches; can also be created using the '|' operator .. _Each: - ``Each`` - similar to And_, in that all of the provided expressions must match; however, ``Each`` permits matching to be done in any order; can also be created using the '&' operator - ``Opt`` - construct with a ``ParserElement``, but this element is not required to match; can be constructed with an optional ``default`` argument, containing a default string or object to be supplied if the given optional parse element is not found in the input string; parse action will only be called if a match is found, or if a default is specified. (``Opt`` was formerly named ``Optional``, but since the standard Python library module ``typing`` now defines ``Optional``, the pyparsing class has been renamed to ``Opt``. A compatibility synonym ``Optional`` is defined, but will be removed in a future release.) .. _ZeroOrMore: - ``ZeroOrMore`` - similar to ``Opt``, but can be repeated; ``ZeroOrMore(expr)`` can also be written as ``expr[...]``. .. _OneOrMore: - ``OneOrMore`` - similar to ZeroOrMore_, but at least one match must be present; ``OneOrMore(expr)`` can also be written as ``expr[1, ...]``. .. _DelimitedList: - ``DelimitedList`` - used for matching one or more occurrences of ``expr``, separated by ``delim``. By default, the delimiters are suppressed, so the returned results contain only the separate list elements. Can optionally specify ``combine=True``, indicating that the expressions and delimiters should be returned as one combined value (useful for scoped variables, such as ``"a.b.c"``, or ``"a::b::c"``, or paths such as ``"a/b/c"``). Can also optionally specify ``min` and ``max`` restrictions on the length of the list, and ``allow_trailing_delim`` to accept a trailing delimiter at the end of the list. .. _FollowedBy: - ``FollowedBy`` - a lookahead expression, requires matching of the given expressions, but does not advance the parsing position within the input string .. _NotAny: - ``NotAny`` - a negative lookahead expression, prevents matching of named expressions, does not advance the parsing position within the input string; can also be created using the unary '~' operator .. _operators: Expression operators -------------------- - ``+`` - creates And_ using the expressions before and after the operator - ``|`` - creates MatchFirst_ (first left-to-right match) using the expressions before and after the operator - ``^`` - creates Or_ (longest match) using the expressions before and after the operator - ``&`` - creates Each_ using the expressions before and after the operator - ``*`` - creates And_ by multiplying the expression by the integer operand; if expression is multiplied by a 2-tuple, creates an And_ of ``(min,max)`` expressions (similar to ``{min,max}`` form in regular expressions); if ``min`` is ``None``, interpret as ``(0,max)``; if ``max`` is ``None``, interpret as ``expr*min + ZeroOrMore(expr)`` - ``-`` - like ``+`` but with no backup and retry of alternatives - ``~`` - creates NotAny_ using the expression after the operator - ``==`` - matching expression to string; returns ``True`` if the string matches the given expression - ``<<=`` - inserts the expression following the operator as the body of the ``Forward`` expression before the operator (``<<`` can also be used, but ``<<=`` is preferred to avoid operator precedence misinterpretation of the pyparsing expression) - ``...`` - inserts a SkipTo_ expression leading to the next expression, as in ``Keyword("start") + ... + Keyword("end")``. - ``[min, max]`` - specifies repetition similar to ``*`` with ``min`` and ``max`` specified as the minimum and maximum number of repetitions. ``...`` can be used in place of ``None``. For example ``expr[...]`` is equivalent to ``ZeroOrMore(expr)``, ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``, and ``expr[..., 3]`` is equivalent to "up to 3 instances of ``expr``". Positional subclasses --------------------- - ``StringStart`` - matches beginning of the text - ``StringEnd`` - matches the end of the text - ``LineStart`` - matches beginning of a line (lines delimited by ``\n`` characters) - ``LineEnd`` - matches the end of a line - ``WordStart`` - matches a leading word boundary - ``WordEnd`` - matches a trailing word boundary Converter subclasses -------------------- - ``Combine`` - joins all matched tokens into a single string, using specified ``join_string`` (default ``join_string=""``); expects all matching tokens to be adjacent, with no intervening whitespace (can be overridden by specifying ``adjacent=False`` in constructor) - ``Suppress`` - clears matched tokens; useful to keep returned results from being cluttered with required but uninteresting tokens (such as list delimiters) Special subclasses ------------------ - ``Group`` - causes the matched tokens to be enclosed in a list; useful in repeated elements like ZeroOrMore_ and OneOrMore_ to break up matched tokens into groups for each repeated pattern - ``Dict`` - like ``Group``, but also constructs a dictionary, using the ``[0]``'th elements of all enclosed token lists as the keys, and each token list as the value - ``Forward`` - placeholder token used to define recursive token patterns; when defining the actual expression later in the program, insert it into the ``Forward`` object using the ``<<=`` operator (see fourFn.py_ for an example). Other classes ------------- .. _ParseResults: - ``ParseResults`` - class used to contain and manage the lists of tokens created from parsing the input using the user-defined parse expression. ``ParseResults`` can be accessed in a number of ways: - as a list - total list of elements can be found using ``len()`` - individual elements can be found using ``[0], [1], [-1],`` etc., or retrieved using slices - elements can be deleted using ``del`` - the ``-1``th element can be extracted and removed in a single operation using ``pop()``, or any element can be extracted and removed using ``pop(n)`` - a nested ParseResults_ can be created by using the pyparsing ``Group`` class around elements in an expression:: Word(alphas) + Group(Word(nums)[...]) + Word(alphas) will parse the string "abc 100 200 300 end" as:: ['abc', ['100', '200', '300'], 'end'] If the ``Group`` is constructed using ``aslist=True``, the resulting tokens will be a Python list instead of a ParseResults_. In this case, the returned value will no longer support the extended features or methods of a ParseResults_. - as a dictionary - if ``set_results_name()`` is used to name elements within the overall parse expression, then these fields can be referenced as dictionary elements or as attributes - the ``Dict`` class generates dictionary entries using the data of the input text - in addition to ParseResults_ listed as ``[ [ a1, b1, c1, ...], [ a2, b2, c2, ...] ]`` it also acts as a dictionary with entries defined as ``{ a1 : [ b1, c1, ... ] }, { a2 : [ b2, c2, ... ] }``; this is especially useful when processing tabular data where the first column contains a key value for that line of data; when constructed with ``asdict=True``, will return an actual Python ``dict`` instead of a ParseResults_. In this case, the returned value will no longer support the extended features or methods of a ParseResults_. - list elements that are deleted using ``del`` will still be accessible by their dictionary keys - supports ``get()``, ``items()`` and ``keys()`` methods, similar to a dictionary - a keyed item can be extracted and removed using ``pop(key)``. Here ``key`` must be non-numeric (such as a string), in order to use dict extraction instead of list extraction. - new named elements can be added (in a parse action, for instance), using the same syntax as adding an item to a dict (``parse_results["X"] = "new item"``); named elements can be removed using ``del parse_results["X"]`` - as a nested list - results returned from the Group class are encapsulated within their own list structure, so that the tokens can be handled as a hierarchical tree - as an object - named elements can be accessed as if they were attributes of an object: if an element is referenced that does not exist, it will return ``""``. ParseResults_ can also be converted to an ordinary list of strings by calling ``as_list()``. Note that this will strip the results of any field names that have been defined for any embedded parse elements. (The ``pprint`` module is especially good at printing out the nested contents given by ``as_list()``.) If a ParseResults_ is built with expressions that use results names (see _set_results_name) or using the ``Dict`` class, then those names and values can be extracted as a Python dict using ``as_dict()``. Finally, ParseResults_ can be viewed by calling ``dump()``. ``dump()`` will first show the ``as_list()`` output, followed by an indented structure listing parsed tokens that have been assigned results names. Here is sample code illustrating some of these methods:: >>> number = Word(nums) >>> name = Combine(Word(alphas)[...], adjacent=False, join_string=" ") >>> parser = number("house_number") + name("street_name") >>> result = parser.parse_string("123 Main St") >>> print(result) ['123', 'Main St'] >>> print(type(result)) >>> print(repr(result)) (['123', 'Main St'], {'house_number': ['123'], 'street_name': ['Main St']}) >>> result.house_number '123' >>> result["street_name"] 'Main St' >>> result.as_list() ['123', 'Main St'] >>> result.as_dict() {'house_number': '123', 'street_name': 'Main St'} >>> print(result.dump()) ['123', 'Main St'] - house_number: '123' - street_name: 'Main St' Exception classes and Troubleshooting ------------------------------------- .. _ParseException: - ``ParseException`` - exception returned when a grammar parse fails; ``ParseExceptions`` have attributes ``loc``, ``msg``, ``line``, ``lineno``, and ``column``; to view the text line and location where the reported ParseException occurs, use:: except ParseException as err: print(err.line) print(" " * (err.column - 1) + "^") print(err) ``ParseExceptions`` also have an ``explain()`` method that gives this same information:: except ParseException as err: print(err.explain()) - ``RecursiveGrammarException`` - exception returned by ``validate()`` if the grammar contains a recursive infinite loop, such as:: bad_grammar = Forward() good_token = Literal("A") bad_grammar <<= Opt(good_token) + bad_grammar - ``ParseFatalException`` - exception that parse actions can raise to stop parsing immediately. Should be used when a semantic error is found in the input text, such as a mismatched XML tag. - ``ParseSyntaxException`` - subclass of ``ParseFatalException`` raised when a syntax error is found, based on the use of the '-' operator when defining a sequence of expressions in an And_ expression. - You can also get some insights into the parsing logic using diagnostic parse actions, and ``set_debug()``, or test the matching of expression fragments by testing them using ``search_string()`` or ``scan_string()``. - Use ``with_line_numbers`` from ``pyparsing_testing`` to display the input string being parsed, with line and column numbers that correspond to the values reported in set_debug() output:: import pyparsing as pp ppt = pp.testing data = """\ A 100""" expr = pp.Word(pp.alphanums).set_name("word").set_debug() print(ppt.with_line_numbers(data)) expr[...].parseString(data) prints:: . 1 1234567890 1: A| 2: 100| Match word at loc 3(1,4) A ^ Matched word -> ['A'] Match word at loc 11(2,7) 100 ^ Matched word -> ['100'] `with_line_numbers` has several options for displaying control characters, end-of-line and space markers, Unicode symbols for control characters - these are documented in the function's docstring. - Diagnostics can be enabled using ``pyparsing.enable_diag`` and passing one of the following enum values defined in ``pyparsing.Diagnostics`` - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results name is defined on a MatchFirst_ or Or_ expression with one or more And_ subexpressions - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results name is defined on a containing expression with ungrouped subexpressions that also have results names - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a ``Forward`` is defined with a results name, but has no contents defined - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a ``Forward`` is defined in a grammar but has never had an expression attached to it - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a ``Forward`` is defined but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when ``one_of`` is incorrectly called with multiple str arguments - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent calls to ``ParserElement.set_name`` All warnings can be enabled by calling ``pyparsing.enable_all_warnings()``. Sample:: import pyparsing as pp pp.enable_all_warnings() fwd = pp.Forward().set_results_name("recursive_expr") >>> UserWarning: warn_name_set_on_empty_Forward: setting results name 'recursive_expr' on Forward expression that has no contained expression Warnings can also be enabled using the Python ``-W`` switch (using ``-Wd`` or ``-Wd:::pyparsing``) or setting a non-empty value to the environment variable ``PYPARSINGENABLEALLWARNINGS``. (If using ``-Wd`` for testing, but wishing to disable pyparsing warnings, add ``-Wi:::pyparsing``.) Miscellaneous attributes and methods ==================================== Helper methods -------------- - ``counted_array(expr)`` - convenience function for a pattern where an list of instances of the given expression are preceded by an integer giving the count of elements in the list. Returns an expression that parses the leading integer, reads exactly that many expressions, and returns the array of expressions in the parse results - the leading integer is suppressed from the results (although it is easily reconstructed by using len on the returned array). - ``one_of(choices, caseless=False, as_keyword=False)`` - convenience function for quickly declaring an alternative set of Literal_ expressions. ``choices`` can be passed as a list of strings or as a single string of values separated by spaces. The values are sorted so that longer matches are attempted first; this ensures that a short value does not mask a longer one that starts with the same characters. If ``caseless=True``, will create an alternative set of CaselessLiteral_ tokens. If ``as_keyword=True``, ``one_of`` will declare Keyword_ expressions instead of Literal_ expressions. - ``dict_of(key, value)`` - convenience function for quickly declaring a dictionary pattern of ``Dict(ZeroOrMore(Group(key + value)))``. - ``make_html_tags(tag_str)`` and ``make_xml_tags(tag_str)`` - convenience functions to create definitions of opening and closing tag expressions. Returns a pair of expressions, for the corresponding ```` and ```` strings. Includes support for attributes in the opening tag, such as ```` - attributes are returned as named results in the returned ParseResults_. ``make_html_tags`` is less restrictive than ``make_xml_tags``, especially with respect to case sensitivity. - ``infix_notation(base_operand, operator_list)`` - convenience function to define a grammar for parsing infix notation expressions with a hierarchical precedence of operators. To use the ``infix_notation`` helper: 1. Define the base "atom" operand term of the grammar. For this simple grammar, the smallest operand is either an integer or a variable. This will be the first argument to the ``infix_notation`` method. 2. Define a list of tuples for each level of operator precedence. Each tuple is of the form ``(operand_expr, num_operands, right_left_assoc, parse_action)``, where: - ``operand_expr`` - the pyparsing expression for the operator; may also be a string, which will be converted to a Literal_; if ``None``, indicates an empty operator, such as the implied multiplication operation between 'm' and 'x' in "y = mx + b". - ``num_operands`` - the number of terms for this operator (must be 1, 2, or 3) - ``right_left_assoc`` is the indicator whether the operator is right or left associative, using the pyparsing-defined constants ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - ``parse_action`` is the parse action to be associated with expressions matching this operator expression (the ``parse_action`` tuple member may be omitted) 3. Call ``infix_notation`` passing the operand expression and the operator precedence list, and save the returned value as the generated pyparsing expression. You can then use this expression to parse input strings, or incorporate it into a larger, more complex grammar. ``infix_notation`` also supports optional arguments ``lpar`` and ``rpar``, to parse groups with symbols other than "(" and ")". They may be passed as strings (in which case they will be converted to ``Suppress`` objects, and suppressed from the parsed results), or passed as pyparsing expressions, in which case they will be kept as-is, and grouped with their contents. For instance, to use "<" and ">" for grouping symbols, you could write:: expr = infix_notation(int_expr, [ (one_of("+ -"), 2, opAssoc.LEFT), ], lpar="<", rpar=">" ) expr.parse_string("3 - <2 + 11>") returning:: [3, '-', [2, '+', 11]] If the grouping symbols are to be retained, then pass them as pyparsing ``Literals``:: expr = infix_notation(int_expr, [ (one_of("+ -"), 2, opAssoc.LEFT), ], lpar=Literal("<"), rpar=Literal(">") ) expr.parse_string("3 - <2 + 11>") returning:: [3, '-', ['<', [2, '+', 11], '>']] - ``match_previous_literal`` and ``match_previous_expr`` - function to define an expression that matches the same content as was parsed in a previous parse expression. For instance:: first = Word(nums) match_expr = first + ":" + match_previous_literal(first) will match "1:1", but not "1:2". Since this matches at the literal level, this will also match the leading "1:1" in "1:10". In contrast:: first = Word(nums) match_expr = first + ":" + match_previous_expr(first) will *not* match the leading "1:1" in "1:10"; the expressions are evaluated first, and then compared, so "1" is compared with "10". - ``nested_expr(opener, closer, content=None, ignore_expr=quoted_string)`` - method for defining nested lists enclosed in opening and closing delimiters. - ``opener`` - opening character for a nested list (default="("); can also be a pyparsing expression - ``closer`` - closing character for a nested list (default=")"); can also be a pyparsing expression - ``content`` - expression for items within the nested lists (default=None) - ``ignore_expr`` - expression for ignoring opening and closing delimiters (default=``quoted_string``) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the ``ignore_expr`` argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as ``quoted_string`` or a comment expression. Specify multiple expressions using an Or_ or MatchFirst_. The default is ``quoted_string``, but if no expressions are to be ignored, then pass ``None`` for this argument. - ``IndentedBlock(statement_expr, recursive=False, grouped=True)`` - function to define an indented block of statements, similar to indentation-based blocking in Python source code: - ``statement_expr`` - the expression defining a statement that will be found in the indented block; a valid ``IndentedBlock`` must contain at least 1 matching ``statement_expr`` - ``recursive`` - flag indicating whether the IndentedBlock can itself contain nested sub-blocks of the same type of expression (default=False) - ``grouped`` - flag indicating whether the tokens returned from parsing the IndentedBlock should be grouped (default=True) .. _originalTextFor: - ``original_text_for(expr)`` - helper function to preserve the originally parsed text, regardless of any token processing or conversion done by the contained expression. For instance, the following expression:: full_name = Word(alphas) + Word(alphas) will return the parse of "John Smith" as ['John', 'Smith']. In some applications, the actual name as it was given in the input string is what is desired. To do this, use ``original_text_for``:: full_name = original_text_for(Word(alphas) + Word(alphas)) - ``ungroup(expr)`` - function to "ungroup" returned tokens; useful to undo the default behavior of And_ to always group the returned tokens, even if there is only one in the list. - ``lineno(loc, string)`` - function to give the line number of the location within the string; the first line is line 1, newlines start new rows - ``col(loc, string)`` - function to give the column number of the location within the string; the first column is column 1, newlines reset the column number to 1 - ``line(loc, string)`` - function to retrieve the line of text representing ``lineno(loc, string)``; useful when printing out diagnostic messages for exceptions - ``srange(range_spec)`` - function to define a string of characters, given a string of the form used by regexp string ranges, such as ``"[0-9]"`` for all numeric digits, ``"[A-Z_]"`` for uppercase characters plus underscore, and so on (note that ``range_spec`` does not include support for generic regular expressions, just string range specs) - ``trace_parse_action(fn)`` - decorator function to debug parse actions. Lists each call, called arguments, and return value or exception Helper parse actions -------------------- - ``remove_quotes`` - removes the first and last characters of a quoted string; useful to remove the delimiting quotes from quoted strings - ``replace_with(repl_string)`` - returns a parse action that simply returns the ``repl_string``; useful when using ``transform_string``, or converting HTML entities, as in:: nbsp = Literal(" ").set_parse_action(replace_with("")) - ``original_text_for``- restores any internal whitespace or suppressed text within the tokens for a matched parse expression. This is especially useful when defining expressions for ``scan_string`` or ``transform_string`` applications. - ``with_attribute(*args, **kwargs)`` - helper to create a validating parse action to be used with start tags created with ``make_xml_tags`` or ``make_html_tags``. Use ``with_attribute`` to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as ```` or ``
``. ``with_attribute`` can be called with: - keyword arguments, as in ``(class="Customer", align="right")``, or - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` An attribute can be specified to have the special value ``with_attribute.ANY_VALUE``, which will match any value - use this to ensure that an attribute is present but any attribute value is acceptable. - ``match_only_at_col(column_number)`` - a parse action that verifies that an expression was matched at a particular column, raising a ``ParseException`` if matching at a different column number; useful when parsing tabular data - ``common.convert_to_integer()`` - converts all matched tokens to int - ``common.convert_to_float()`` - converts all matched tokens to float - ``common.convert_to_date()`` - converts matched token to a datetime.date - ``common.convert_to_datetime()`` - converts matched token to a datetime.datetime - ``common.strip_html_tags()`` - removes HTML tags from matched token - ``common.downcase_tokens()`` - converts all matched tokens to lowercase - ``common.upcase_tokens()`` - converts all matched tokens to uppercase Common string and token constants --------------------------------- - ``alphas`` - same as ``string.letters`` - ``nums`` - same as ``string.digits`` - ``alphanums`` - a string containing ``alphas + nums`` - ``alphas8bit`` - a string containing alphabetic 8-bit characters:: ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþ .. _identchars: - ``identchars`` - a string containing characters that are valid as initial identifier characters:: ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyzª µºÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ - ``identbodychars`` - a string containing characters that are valid as identifier body characters (those following a valid leading identifier character as given in identchars_):: 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyzª µ·ºÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ - ``printables`` - same as ``string.printable``, minus the space (``' '``) character - ``empty`` - a global ``Empty()``; will always match - ``sgl_quoted_string`` - a string of characters enclosed in 's; may include whitespace, but not newlines - ``dbl_quoted_string`` - a string of characters enclosed in "s; may include whitespace, but not newlines - ``quoted_string`` - ``sgl_quoted_string | dbl_quoted_string`` - ``python_quoted_string`` - ``quoted_string | multiline quoted string`` - ``c_style_comment`` - a comment block delimited by ``'/*'`` and ``'*/'`` sequences; can span multiple lines, but does not support nesting of comments - ``html_comment`` - a comment block delimited by ``''`` sequences; can span multiple lines, but does not support nesting of comments - ``comma_separated_list`` - similar to DelimitedList_, except that the list expressions can be any text value, or a quoted string; quoted strings can safely include commas without incorrectly breaking the string into two tokens - ``rest_of_line`` - all remaining printable characters up to but not including the next newline - ``common.integer`` - an integer with no leading sign; parsed token is converted to int - ``common.hex_integer`` - a hexadecimal integer; parsed token is converted to int - ``common.signed_integer`` - an integer with optional leading sign; parsed token is converted to int - ``common.fraction`` - signed_integer '/' signed_integer; parsed tokens are converted to float - ``common.mixed_integer`` - signed_integer '-' fraction; parsed tokens are converted to float - ``common.real`` - real number; parsed tokens are converted to float - ``common.sci_real`` - real number with optional scientific notation; parsed tokens are convert to float - ``common.number`` - any numeric expression; parsed tokens are returned as converted by the matched expression - ``common.fnumber`` - any numeric expression; parsed tokens are converted to float - ``common.identifier`` - a programming identifier (follows Python's syntax convention of leading alpha or "_", followed by 0 or more alpha, num, or "_") - ``common.ipv4_address`` - IPv4 address - ``common.ipv6_address`` - IPv6 address - ``common.mac_address`` - MAC address (with ":", "-", or "." delimiters) - ``common.iso8601_date`` - date in ``YYYY-MM-DD`` format - ``common.iso8601_datetime`` - datetime in ``YYYY-MM-DDThh:mm:ss.s(Z|+-00:00)`` format; trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '`` - ``common.url`` - matches URL strings and returns a ParseResults with named fields like those returned by ``urllib.parse.urlparse()`` Unicode character sets for international parsing ------------------------------------------------ Pyparsing includes the ``unicode`` namespace that contains definitions for ``alphas``, ``nums``, ``alphanums``, ``identchars``, ``identbodychars``, and ``printables`` for character ranges besides 7- or 8-bit ASCII. You can access them using code like the following:: import pyparsing as pp ppu = pp.unicode greek_word = pp.Word(ppu.Greek.alphas) greek_word[...].parse_string("Καλημέρα κόσμε") The following language ranges are defined. ========================== ================= ================================================ Unicode set Alternate names Description -------------------------- ----------------- ------------------------------------------------ Arabic العربية Chinese 中文 CJK Union of Chinese, Japanese, and Korean sets Cyrillic кириллица Devanagari देवनागरी Greek Ελληνικά Hangul Korean, 한국어 Hebrew עִברִית Japanese 日本語 Union of Kanji, Katakana, and Hiragana sets Japanese.Hiragana ひらがな Japanese.Kanji 漢字 Japanese.Katakana カタカナ Latin1 All Unicode characters up to code point 255 LatinA LatinB Thai ไทย BasicMultilingualPlane BMP All Unicode characters up to code point 65535 ========================== ================= ================================================ The base ``unicode`` class also includes definitions based on all Unicode code points up to ``sys.maxunicode``. This set will include emojis, wingdings, and many other specialized and typographical variant characters. Generating Railroad Diagrams ============================ Grammars are conventionally represented in what are called "railroad diagrams", which allow you to visually follow the sequence of tokens in a grammar along lines which are a bit like train tracks. You might want to generate a railroad diagram for your grammar in order to better understand it yourself, or maybe to communicate it to others. Usage ----- To generate a railroad diagram in pyparsing, you first have to install pyparsing with the ``diagrams`` extra. To do this, just run ``pip install pyparsing[diagrams]``, and make sure you add ``pyparsing[diagrams]`` to any ``setup.py`` or ``requirements.txt`` that specifies pyparsing as a dependency. Create your parser as you normally would. Then call ``create_diagram()``, passing the name of an output HTML file.:: street_address = Word(nums).set_name("house_number") + Word(alphas)[1, ...].set_name("street_name") street_address.set_name("street_address") street_address.create_diagram("street_address_diagram.html") This will result in the railroad diagram being written to ``street_address_diagram.html``. `create_diagrams` takes the following arguments: - ``output_html`` (str or file-like object) - output target for generated diagram HTML - ``vertical`` (int) - threshold for formatting multiple alternatives vertically instead of horizontally (default=3) - ``show_results_names`` - bool flag whether diagram should show annotations for defined results names - ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box - ``embed`` - bool flag whether generated HTML should omit , , and tags to embed the resulting HTML in an enclosing HTML source (such as PyScript HTML) - ``head`` - str containing additional HTML to insert into the section of the generated code; can be used to insert custom CSS styling - ``body`` - str containing additional HTML to insert at the beginning of the section of the generated code Example ------- You can view an example railroad diagram generated from `a pyparsing grammar for SQL SELECT statements <_static/sql_railroad.html>`_ (generated from `examples/select_parser.py <../examples/select_parser.py>`_). Naming tip ---------- Parser elements that are separately named will be broken out as their own sub-diagrams. As a short-cut alternative to going through and adding ``.set_name()`` calls on all your sub-expressions, you can use ``autoname_elements()`` after defining your complete grammar. For example:: a = pp.Literal("a") b = pp.Literal("b").set_name("bbb") pp.autoname_elements() `a` will get named "a", while `b` will keep its name "bbb". Customization ------------- You can customize the resulting diagram in a few ways. To do so, run ``pyparsing.diagrams.to_railroad`` to convert your grammar into a form understood by the `railroad-diagrams `_ module, and then ``pyparsing.diagrams.railroad_to_html`` to convert that into an HTML document. For example:: from pyparsing.diagram import to_railroad, railroad_to_html with open('output.html', 'w') as fp: railroad = to_railroad(my_grammar) fp.write(railroad_to_html(railroad)) This will result in the railroad diagram being written to ``output.html`` You can then pass in additional keyword arguments to ``pyparsing.diagrams.to_railroad``, which will be passed into the ``Diagram()`` constructor of the underlying library, `as explained here `_. In addition, you can edit global options in the underlying library, by editing constants:: from pyparsing.diagram import to_railroad, railroad_to_html import railroad railroad.DIAGRAM_CLASS = "my-custom-class" my_railroad = to_railroad(my_grammar) These options `are documented here `_. Finally, you can edit the HTML produced by ``pyparsing.diagrams.railroad_to_html`` by passing in certain keyword arguments that will be used in the HTML template. Currently, these are: - ``head``: A string containing HTML to use in the ```` tag. This might be a stylesheet or other metadata - ``body``: A string containing HTML to use in the ```` tag, above the actual diagram. This might consist of a heading, description, or JavaScript. If you want to provide a custom stylesheet using the ``head`` keyword, you can make use of the following CSS classes: - ``railroad-group``: A group containing everything relating to a given element group (ie something with a heading) - ``railroad-heading``: The title for each group - ``railroad-svg``: A div containing only the diagram SVG for each group - ``railroad-description``: A div containing the group description (unused) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/Makefile0000644000000000000000000000113614412577542013062 0ustar00# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = PyParsing SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/_static/pyparsingClassDiagram_1.5.2.jpg0000644000000000000000000071556214412577542020603 0ustar00JFIFC    $.' ",#(7),01444'9=82<.342C  2!!22222222222222222222222222222222222222222222222222{" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?Aloou;ޱwKQ@T(T :U|-@$چ"W.' 8viaeVm|sPhM n OS[U>NBa<8Qn?Zoqvuro G"UgcAa$YSN4K}'ɎjV860WLkVw!_k=ֹ=Y8ռbz=v`y[CNXt+ =~[t^G[!}|j.ya?Ǿ,x眚Mֹ=G!_k=6)7閡ƬLHcjw@#RT>-A5?]y-] ui+ni %`z~'un?ZoW}\gXytf`2c jxv^$;I~Q6ҩ66U2 -mu@z!_k=Տ? YX ;qn-!E3<D%cc#u(t.uȎ 7;1 wn2\64rR!LFI2A Dn?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAEsQn?ZotP?n?ZoWAE|K>#N=r'ˋRwۺc9'O&%{]'w>$O[ +Jj'2lD 9#Jg?oEWA@@%{]'>6^I㢀>o+lW?WR٫((((((((((((((((((((((((KMxWW-6k@JD,+*E5CZ <kC<[TV<ӕ_?5oJ?UE9QZ <kC<[TQÕ_?5oJ?UE9QZ <kC<[TQÕ_?5oJ?UE9QZ <kC<[TQÕ_?5oJ?UE9QZ <kC{ v.)bO ΋Z\P̕!Cs@@/G4?Ty~0ևy**1|kC@%{]'>6^I㢀>o+lW?WR٫((j(ItѾ>A'woVnCŵFv*Pڮ/7=VI}oo5{UO>Ě*MiѴ,({ O5Ο0$BLC"4_$$1_ đ$a"9Ö{WD[IJ4 HiPw_9|Ҹvg8YX%HWC ׃ &l%p~pa:~QDVVv{2p=k*I $9$,@KZ^e;$'NUpڒMoIi5K$E\ i9' úcBc+Xm㶅a[rYZL'n8jSnVHa-=̖acTK둃].$߽^_f] #6ڶ>} 9QQ}Qu1H.cbR1Gl_\j$s,ڪL+XKpElQQRG Hδ4 =E) S2v9ٜOj:UΗu#ٽe*VC?'7> }N3I|J#n fwN4_^By6tmEn-+`_Roe+PVkqHPXUOc[_[/yL[ιtNݼcORѵ+Bk=Nt{FL)|08s[g8Ry&tXfbi5Lwq0A.\! uK$Ce= V.VYjKjf[‘m ?&rpH țzՄ֚UbNqj'tFǒ- e8IIqJV`uB7˭iѯ` `A^i:e<]jKyG-+"A<G>NUF$bYč*ףnS+jr8gc$l{"77a^>ةk߿F3Eq M $R(dt`UGQOm~yHv1­V.aER(((((((((OzL]owjL4G{ƨV&g#TaϮ;5Eޢ0?[ 3}u, >iQ`2JY^}d(FLw2s~- ޡ`:Zl>t;bPG0tiQ >;t sN:PI-FBgc|1r6VmÍZDӬ`w1 0|l|T)OL]owjL4G{ƩޢI-WPssuj Ρqqlb,JʻULVuGNi-DE"2:qoL4G{ƨL]owj #c-Ž!2·gYUB\ oy1֟nm2YiA{L&a 򍨋bH溏L4G{ƨL]owjX}nq_9B'n'iRXe o䳂I$ֿ|>Y]m&+4 |Ĕ36w`1laϮ;5G&g#Ts;²oQX?iQ >MoQX?iQ >zL]owjL4G{ƨV&g#TaϮ;5Eޢ0?[ 3}u, >iQ`7L4G{ƨL]owjE`aϮ;5G&g#TX J? Ewuƚ+:,çqҽ [Š( (/]]hnyol3"sl@p˝z[x@0SNh/=[-o=zZzܢ?=[-o=zZzܢ0oea9krտr͗#ߛ/G(V[6_GC~l( ?=[-o=zZzܢ0oea9krտr͗#ߛ/G(V[6_GC~l( ?=[-o=zZzܢ0oea9krտr͗#ߛ/G(_Wa4vS\FAfT!`-Gohz^X_pq&K6@%{]'>6^I㢀>o+lW?WR٫((o]ꩬs$i$M4Z%Z0 7c=9WSPj H1P}%Gե)O¢=ZoC֗G-h˾#6's""ּc{"z#?47Sϔf qѾjQ/c\EI6 #pǮiBҬE럳VOAWBA4 zWЛ;Y3okEeq qnJːwیTsx Ih-}V*ŽOs[9]M#߉/ngDd!ыƀީNxcX-U4Mf3u\5Z1`k_ izrZsZ[3xڴN`&؝/1/G9qӣOz(FWQo|Os I,bm>)N2l [qG{h$f)q08ꆑ Iu!Z}Sb 0;f,P\7D͞>oƅ:]cuNֹ$٤M5: D|a۪sӁ]=St9#K>b #X!T`q>r(/%- ((((((((((((((Oa;B͞*<86qt&%A*N:t..GuEq_5MJ8O iFƍcɝG9qbAl-c rcRn}уckEq뷧WMAmnƙm+HPd)kzxHѦTݢs5 UwHwiQA:GsEq#?,,TwoCRb5ˌ1>ˎ4ډp&&wA8ƕ\:Z+MJPPFѤ t< J1sn_j<\Rs0HZEce A܏AMŧa)~'WEq:!85-6TvpǻRp2i%8'&g"O6Hny a#hS]էH#6Iqۼй22mCu[+[mIZpm<ϒ`>NHRuirM3VIt!a,Qۈ;1l/zV\zic=Ν V'Da; TX%5΢Ov$6\q7/C#9a&QEHŠ(((((((((CA/A]pA u(b ( i ?)l5=OaM;KauI5t*[Ky M4B.*I9'wF_ZIot-fLӼLYxFQ~lb=WβE.[{ A78sIE j b'2GbOy8"F,;xYkXs@(!u?< %,;Cd AГ8gLYn-˅d~Vg.RHJdU It=l.mG_M>}D%ꌶI`X3t'tUј&E4 "trwwR旫 aЎq"$BȊd^O?[9I6^fh0@qK_kvN6y%Xah[͑C?zEtWv[_=m-*O$~|Uc=#@wAc*$KXK{ZX"(W{,lKm.\l # >P 4[ky}"!-w32*F.J<`0[: }_AmLLo(@>^v#9:׉-4=-Nx]Ānչ89: 6^I㢏Wwx ; _[5t7@w?Կj(((]~ ,c$X#SIֵoTmٿF~mvsqҗ7:WԴ]~YL$s2szwbumKJ?&>|w3D_<a|_e%,~ȱHHC$OGS<7qM-YY AМ\kWIFYnfǧxCZinxA(Xf_0y>}M"}FS0 |KCWn;ZO=.;f/Wۻofs9Y;mR.- 'P@RHn=3 j~ptGm͌fF>.>kŕ-~cY#'!'y'+Nӵ{|ɥI"ŔEpJ'jfF%bkvj槖=:x^ &WiL"9Ä߂]3U`ڒMOe Flmǘ逤qtՙeMi-aYF#HG_j>0Ƌ^$q낳*(%<m=jg++moīkzEw*z<{췑N<ͻ)`NGo○SVNxkj"yAȶ+zfUGIEfkV1B&7&N0'j95f7rܕ)'lضi*sz6ETQEQEQEQEQEQEQEQEQEQEQEQEQEW?'%Uj+*E5iI"CI#"c֝Y֏OAC3ǃx%AFbIMskrZ+?ZQ5BYX1"9@`J1@zW3ǙBbl')O^<Zi'&W;ju}mdg’vz!k:uuuebt#s8g%C&nG~4yn-&ivX?^KgPFGJ̽4M*6sLN@,QH_JH,t%XZF<Ty=: 籣uد˩'ĩܰrw`\d^N7[($k"0d` 9.)}7M+e+FVye6M}ܚO]$dAcw=ٶbʂ$ JsEryܐI95˘t7ftMd̀==HtpyPU6+*(9zaV NTbS\λ)jt |[R:xb"vģQAڥ5)~|Nڏn#Yvg}9<> #1ىcc#$#;msJ{]G$#V ?pN? O\T^ Qw3GZM71F?-CxmOX"[n(̠)uYiKQof8Wjg<Ēy&((((((((((((x PWw\&%_"lf (( i[ t5x@0SNh>^,QZϹ-4DZ,78<꾽D$ѭ:4P2(%ƫVIO24Sza!z8)Z0."ICwK1d"E ֎mYeDhc8sۜl3$Wֲ$k#K*\FV-wLdehP~$wvqjoUn-:}Cz Mu,O,5#1ˉV.p>xɩc|Cq j3N]9Fx8.1Z2@ %`F"س3<2\ZоN6E*p1׭GiM"Pk op|>\nۙH5upq"myB3%h0Eei+g/crتέH%t|\uw'մP4B%,[z6 A˭鱩@<)`:*?35}cJ+[QKxdxc(geRRNJ[k޷qwoikm<piraF8UBHqI *C) zѳl k[A F*gҬ\Eik5Ͷ(Pd+8^83 NĎ{$ mH F-(tx\Zěuerr*&qǭv:fy +4r6V8`AU{Č-!u39tn맮oN.FII-(EPEPEPEPEPEPkG+E0\(GUI[c 1 Z`d۷~vnv܌--1BGDog'`sbT FPT#T DIx[qn pqkivB >Aq=6 bힼSEh^qYz^k$I݅Vp,e9IUW𕃇['F5Ӫqkz^Wax'Mڅ{tLv@8rrt&VԯQxcY1 I8/sE?k>05֣w7ni0KC*+}sV:Eiz,涷#2Dk45}Š(QEQEQEQEQEQEQEQEQEQEQEQEQEIu w$Ŀ M@ֵ]=yp^Lrvs\.xĊ2mG)FFȊXdW9(kb0ܫA!GԐ+-g}B,IjmzyqǠ`?O?lx&7̄I&!zkNTkbh;Bv}W?bǠ1@dU._L(V1e@##T>#P<gQ*#Xfˠ)K3Jʓ1Ce[J+[֐!E -7.䏼1io#; 'K˜ґ568дLӆZ9!ÂF8I.vXK)hh5vuےr 0y9&i$в$~[ 2b0y#H=ANz=C7m,x X%ٶBܷb~r0*_j3-Eg\Mpp~r:rZw+<1Y?q4jk%-pY͞E#Z+XbmqJ:l$*0\hڏxn//c)ǘfR yq;ӧdUk-HbH!Iyc1׍J( ];J`w]%szw kv6JB ( ( ( ( ( ( (>@%{]'>6^I㢀>o+lW?WR٫((((((((((((((((((((((((sӉ33VA.%! X#HEIN,gw$P p7mBxM.9mR93 `͜. pp;qi-Kn'UP=W<~8ﰚW}KSd 4N*2U2 lw:ZvW1Gk*$*!fEqN:0h[_;;V!wȻ"*)_sb3\hmc ~9R2ݬ@#]ֶȱ]jLcYO<*6vpp'ۥG7dYduVKeeYsk`>lO8۾H.|Is ~so4ΤlV!C E'Ԯq;${uϔS\8=JTVB4em)/&iJ`p0ݧ=Ĭx|u,pp0H>X]glbX! r9!մ 1jnyn&_1\4A$\YiniEQEQEQEQEQEQEQEQErA x PWw[ESQEW=OaM;KacC#ii1~y!YA# AV A#:cC*UrpS@>NDoo㾺짼mKmѤQβOk{<wlŦ iN6"nP7 dG{bi$0]+ŪE~,/ !T0Gq=sM uik}:0Ҵ 2rHCxbѴ !Ml'"A{wɒF;aϠZZzݽ&>UL wP|< Y.B$'ˍ/?h`s`8#`QwaԴ[ mNnFCp͸cto}z[ >V1 }v\·Gi}=M~C>F/1zsj> .O ۴"#Cm?'dݴt>(,63MAe~%t{"FyjI_ޛio;xDPc%C̀>\1.ybxZEP #ʯY7m#XwO}Ev 鏩i6h6}]78l?E-6?%eQCC#8<`z?+hn$k# ;!߸nB9@OqcURd.Z[dGonbH 6Nԥ]sMGPPJKفYnV3c'>/hsD:>j3zT}5ͥƬ#y#W*aHָ)4ˡG;p 8B~V7y<ۥm/{"?g)}}.#NMuF[++i$!A>OӢYڤ 0D@Bp)c\#>+m ;xoZ2 4`29@r1f,>Ǥ0KsnX1A(gzuߵb\[߸O\U7YDZtuHQBZ7z %Ym.g(2HVx$t&,5[i%W QYE DU @r>QOgo':MG=*sɷDˎp:[آI ,7`=wrH\N@] Jͼ"-D#:C 2Tr9tbO[Mqm|.s7D_ FHANُ҅þX&cѴ +f֎:$9KW(\)Ӏp}?>tJ;-ZAH,w6G\Rx&w7&k";',L@ `u7;*(w kv6J(?mt ( ( ( ( ( ( (>@%{]'>6^I㢀>o+lW?WR٫(((((((((((((((((((((((( >[.Q5+%[ "UT1}0kP֡C$o'n8d@%RuԂk{Vɽ#H.@ &v{'Mr$j-8XP#Oآzޝ,hCF7'ctN#G|T6!nu'K`X=G+͒8N:I<#lc/V56cr\-ē*У:Mnh$tbF,!R' ׽ Ucu$Rj(f,dfU@3+qzpiL̻A'\[>) 36dOR91Yx6ndV/݄bά@A] n? ē*>%[lO'4?K:iLZgۻhnۜkV5E∦[]+r.c+<'$ "F$FeP 2 E ((((((((ӿk_ ѷuW7@ֿan뤤HQEQEQEQEQEQEQE+]jr*T;(beB9J% -Kk0$J̞V2Pq=˷'}ѿo݆r!a#hr73{f Es*r޸e*Os?$9gJ{ҢX+lYںúĂdGÎ[AY:}w0Ikm,s6n#hՄs{M!kmhm&8gܕi㎆Rx!Ido-ȁF߻ 1S5gkuPm39N˿`Ro[_tZnjikkco"bL`v,u(o![L6&d#[ sntd{E>m܍>ǒ8Mucm鳬ڥ7,s"1WEo'vv871ʸu6Qciye@r ҕ9I=;J}鐉m"-yv:מ$ƎU+0Odd/Uy'F3pcM)p1S[h/#xgQO|}Kg4[Bc*J ( ( ( ( ( ( ( ( (9 J? Ewuh?P] #wGOUw6xs#UҤʯ0P)q"ɒ[Hn9sYTc,0JF:QEx@0SNk{Ɵšw@QE2(((((((((((( ];J`w]%szw kv6JDQ@Q@Q@Q@Q@Q@Q@ |mۿEk@O7@w?Կj+o+lPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP\xW_)OPn*Dlu ؊H]lixF żRVwmnb8hӃxSԮ-fOKh#d|-Թ }ђHp'ev$HbydpfcM.eإkor"%B3.i0~P37c5];hVKpL2H'5;X3/e%?ʿWUU* $FiC HnHݠqZ)ܓs̰$uHXk:.׷R:rʠ{?[k{]9 PHZKȮ$rO}deq嫗>c)xF6Ws,,ƈIS&7bV(5]3iepʛg#[\EspxA\r~a(_ZuMj[;GUHP=vb`uw]{6߼n{m..u{ mJ}_xT s rY|fpɥHen*63CsOG\ͭi.USex$ 4'ڴUֱ.aN,#ha"nQ\9-SmlfXd8 ^c>U;^Qnl2Cqn,G@rïR=ҳrIբ) (((((((4(  @H[Š( (((((((((((((((((((((((Wwxow:(º]s u/- ((((((((((((((((((((((((+*E5tIu4&ц #Nz RK4 'Hu}:ۢQR}Gk/l0LӪao?Sl3,4AT/̫a=E3ij[%$F#anT>sP2>b$v&rj-|7Z2qN 2K.ݧ*8ӥ/#Yf EEk hMTg'理K8ƣ^_:Mʒ Hb1+_{AmNѺl9(zM7fJqt>x; XnFqp8hZr +B%+ 6B->czds0h%9,LdVq{zE(e41!$1h3ҡݍO',rn.%XdrVڵݮlͨLkt[=أ79<'#Oʶa𱈝xȀQk{k[7t &q`C;x3}+ˉ;XF^LB'$+Ix1%jgLIx )xֽ4Y14"<(dYxz=W!a1 mh@ˆkmqŷ{}PuOާ\M(l@|ÿ~r;`s/}im{e!y)T;Pgk|2MJ-= )5Բ$X]0N P]vC F G5V  u}+kd#NZKq n!v] [ -Kf`R~cFwMy^d{ILar~\tEaʻ1-n rKLd XH9ϽIi]Os$W;sc8ErERQEQEQEQEQEQEQEh?P)WZoݧ8m ~Pӗ.sٱִF2P: Tl<7lz۝]+)X ٌA.Ӛσj6mpjDX0޻ c5CUuxl^w\!@l+H6O9s*ishĜ}tëe FӌR}HGwMI"TYuI"z`W9GZ'RG鯮Eu)YQ `=rzvlǩZWkήdC6FsB'F/;hI5:%ZW ebv3Nj̾f1沤6G$d3Z tH=LOk eRh2="3xЫE6FאOѪ7?TDڭPn￞cT!Piu)#iF䜡MGv+ǯIu /́yxn;;Dϱ&E4ɨ٤ iT*3U{]o2[N6M%IBd+еkkY`kipPz(WZ_ i_$P[Gyx+%rzdwYnһ4gE`L# 6[y~etUGÞNE[ԼyqkOG.aE"xR ĶbBʤS-?1N:Y Z/[km՟s;ʁB#r0TxAähmḗVH'ϕ#\Y0pvYxb;xF(-v )eCFGL;Olqu ̥yUpNz^WX}$3"(ʉ&ThЖ6xS*[J2yL\f<ٰ||-xMFE!oBxQ$rrjlGooppqpe?k 6%:m7VDhn0j&I${إ7A{Cꦷm<9ao{5Ɂ|ǖ{ΝM݋V0A{m!cʡvzơ}wH5[f@ElJʹk1N![x m ˜OڥAYL7L0o>ʅ$``:9qY[t{CH%L)l\de Ѿ{o6n}.? <[p ]_O@ݶp/ko&Ƅ~bs Dc^)cHC#`zGQY>#-IYHYیmGh^7%T`b;:cq,xPA6uYH'{O4ە6#zukhM8ϵE&mg_.đx|V$ZE'EZIn>o[@6ʹ>`>qVt ZCΣgk=6ƞd(Mĕlqvݪܛcqm#PıUG{8o{Tk'N0rwM;FB8=^*W6Qi32+yM)IY6"($ ϵT̈o -PJ(I`(c\ճj7Mq..@1Oz'g,-4Z'dvo/l("0}lZz]4{-Epm6oz5$Ɩ ţQ)œ<=|:'}|~_";sQim ¶KuT`3V[I"e!TL0PI۔N[uۼ>|4 ¸IF!4*t6qۏjJ[{\}kcT KƘ zQ۟OX1ʹ\I.P LpGV^yal-H5 e'1q'vZA"g`ێ6#W_ (O|ܠxo'>!彨2jx-7IK,ct=) ѨH$ -ți\¯m\U["]%V|+"8Y1ؚtxN_'%lc?Uk_ ǣr,Z#':'Ir>l:Lۢh±}e*Xc8oJߕI 34WWSħu8xgR΃n<+tv@AS ZTqEmᴎ$]M{=օV'[h,\[bC, #ݎI<8\Q7mdiH ه85)_BdwmۜvN+"zm~odi|)K( |йny{i5,v2@vKX֒̄%ߘZ%fO+l (8bOCgms) #WP1ʊH xEbpUۆ/*0Rp9ϰ8iZQ15_=@d1-E*7J4HbY)98zu^L K]de7ny{7j>*Ů`{d*7[j *'+a^ͼw0ۼӶ-lsN0:W=wg s[2G6Wo!#fNB3玁~,P,HF8?l>m}?,I%/x cWC+[WVZy ]AfwJ ޸Q bW#9}{MJAd#U,Oe9K}ȃ|c>\Q8:D>I0}c( <;`\mm0yѲȇ(pWZtTx}g>nluznZw̟ؒ_`U&gpBVp"xri+C(hD1J4w9^'@.\˹ER([VJ'i At ( ( ( ( ( ( ( ( ( (9 J? Ewuh?Pz`?O~3_XYDNEh&@V wqU}$` Haweq;ԙ"=:Nxk @?}zH*Y6%9}\N*ދ̯x gc >`09z^=tqlۖ "W$H'pc(~ߠ^:/SItIg &GUq ǜhZE8Qvs\_rZGpd`iDqzEBd uTmҏ6լ1'uۆRBpw>6\]G(HfV]%9ڣ\o[fk1E2L(=ANKy与@"J8 @+~R:rxzY-ۿwN]:'r4YI8un* QEQEQEQEQEQEQEQEQEQEh?PQHzӤ^Ěvw3 ke.Gl;Fyvɝ!e32PFۻG"I>ڳN"v0|EcAB-:ⴕٌINwL~s]K4Wx~jo(pK>].uY^Ӛى$򋎪$۰Gj ?Hlm+%ϔnǟvyc'WGmgZX'FBIl-T-#;)|;d$I95?oHLY(JB63Y/0Icv~tգӝe #Bd 8,9=sk's6ևS4I* 0O>ƒxmfjY-Vr][ZE d1,T{vo4̇E*As\$]0ۼ Ǯ6WZخgus##JH,t%XZF<Ty=: 籥}u8KG 's"g`1 9kI;!ڹ$n`d/ʅ'nzx{8$. c?W)g{a(-t2w]1ʮ>P|S~c:Olu Uκ.v[N˹*R}(,(((((((((4(  @H[Š( (((((((((((((((((((((((Wwxow:(º]s u/- ((((((((((((((((((((((((+*E5tIukh{K ,0>犹o:][d chVS&ц #NsMnPlxm^E pk&6)yqgu#4 n aAs5yg[1V$d e :?cVP#=,OcM[dm8V By_iUC/7o3f}kmm_RGfbO^ID%E;ytM"ݼvi98!9s7 rvAHdWpdJrĞ8I9'='Š8#7V$mmx/1G2O`k0ŵϙ]E,Is&"D̻r@9kƑ 59ؓjٲ*i]!ȺYP9d9p2~POC^YH⌢줒?3N2`RyW=[[uiMŢ.*ni@띹;\85 =]9'-ɏ|;W^3~leG?ߥ8 Q궊ii'?\7 n ԒpqU%m泗UtndhdD,Tdc>WlqyQOn8nEF%S!q!R"opqq;HlAw,  0pOyTs, kuGgPGLwPiVrN{ܙcL[a$7=9gWjɫ,.z|`f#kyQIF Vث5jҋo"TIEGG?ߥIdT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yPT~p~yߥrA x PWw[ESQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE+Bvl"Ύ?'N?$Q >Bvl"::+Q;\_G$ .s}GsE?'k( 3IeOН/H,Ύ?'N?$Q >Bvl"::+5KSӖV)^F`왈ݺ5eu # -Q@Q@Q@Q@s;y_]覮H~y#P#&3p0Q(ONݣ׮FBygh\L dda8W]7Yp ~[5Rpyl.Ƌ\"Asa;т`,pU[^ V.)켞b#bpqǧ\NM "6 ȃhU2pBQNG O,34xc&yYcrc)Re70,BGqyziH~+B/n$o5]OcS$#E,M6}KK 9|,fR1k[oҝiGEI7T/+=6X7m$YRI2HN#!mYnKg+%/>qR#PQIuipVTXفUV|(:)b 4I* S5X6,r$@3ŭb ˟-eQ9$GoҀ#GoҀ#GoҀ#GoҀ#GoҀ#GoҀ#GoҀ#o~/?ߥH#7@%_"4( c7QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE|JnO|mۿE}?WR٫WA@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@s5ӿk բ+((OǺ4Z冗g&+p WVw <9g]ͶOv5r=Q 8nFcclrq5g`NER Q[$BI.+urO+XlD,QEdQ@Q@Q@s;y_]覮KͬWW5kxBBt* '>+kZNC /%$i?Jghc 8#!GZJG%$i?>X?tO@F]8=T)_ִ5I&m5BxrI<9nz? G?OGJCRcV_Ӈöf4q 6A!Iʞ=XJmGQ"$G3 b c;<` G?OGJM)!8Cy˹.5bv yg Fڸ``1Vl|!0kaS"FDFv}ixW z?('4I{ְ!{CPM#Fɍ 2'XIsmxgHlnIiIV[8Ab[yl t4QEQ!EPEPEPEPEP\޶%EoQ_l{eȤ$ezJuz֥RH|טC-vU Z`HWnц e-}@n( ZjІ$01'9B]^O[ޯ5+{sm}%v].|I0)(((((.+}2d(.E'#* 74i2m൫(hHv-bNs1fuz֥RH|טC-vU Z`HWnц k2x|9[[k.f+7$Jjn YY%78T /,)ee9nQ#ԼsizHKH,g杞iaPAp)W+921ƺB8v/֥_IW ɑEkG+p}9*7~,Ai8VvmK{hc8*p+;7vp,dI:L))#p%@ zY+[\;}Vm$p6nx ڟ6[ͨG2Dfhe #%)=|%[8Ăyr@E%P)IVF8a fCa3U|qUYw&_&/X^{M:8Ryg 䴃$N3ZqzTjVx̒9wƸ,#TggQ"%UURr@s׽f뛋۩n5%eL%l 9 )@BF^%΍5&[vSNT;f  9 Ty@Zʲg$`rx =XZ4 S`6#!Cݎ*+O^i3Ar_&-!(%DN34x,zޕ`A}B &Bn~KbmM6f zcg5ol2)`Ĝ9'4e%ii͍Jg<Us 4wv&W%{XGD%K;q}5>e5cdPCP@ r2zGgndI}B/#3[\:6^I㢏Wwx ; _[5t7@w?Կj(((((((((((((oH\K{4w"ȤnV_5Ep>-Uv?fɪ߻3]vp>-Uv?fɪ߻3]Y`MW񚩪YxH/u;k$V%8O\ j~!پcm#h=B/#0?d0+#M3"dgZ~jvYc5D!") ;޺ɪ߻3Ro|E5XK, #iq&EWmIYwG 5_cho 5N.ſ2j5_ck L;tnok sop96+/~? \ \/~(/~? \ \/~(/~? \ \/~(/~? \1隈It4*zbVPEPEPU:O.'gNPKN=*s;y_]覠I}oWjMtE{ƫV˜S+]'}^?5[Z(S+]'}^?5[Z(S+]'}^gu?vzܗWZ}0%1`dֺ(S+]'}^?5[Z(S+]'}^?5[Z(S+]'}^?5[Z(S+]'}^?5[Z(S+]'}^?5[Z(R;Z7K5#͜ HH%+Jak%QLG?'%Uj+/ĺlυu}.ݣYl$(gBqj%;y4GNacv@@(r(<RA\FѬt}Fh,㷍,U(' gPYEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%PAEsct?MT}M7%P'%UjԮ?Ɩ*_5HrdHQ| nHqk+2(((((((/֥_I]mr^K,#+R>6^I㢏Wwx ; _[5t7@w?Կj(((((.6]_TX{+)#Y*YdzԢ >& >& >& >& >& >& nГ<j[ /PyO vg1Oɩ)mF{QDrL۱`0TץJg;}OVmWK)̵R%vg|xWEE7pJERJak_*ׅop)*9[{ PȡԌA8IEs7O*m2[߃6q4Ygc$Ï29 m (F? dӧٟcՏ1Ѕmc0eR@`!6]i?o1|k3@,GzonS:ӓz \ʷe.m "nۺ#vSw$z._D?{{hKNsv'4Ơgdۏ9y{c-mDc8yIxO(Gvqr]ӿ\-;5ځyh vs8Eg( Bp k,/D?{{k&[^0RHl.̱^j(YdHna]q(D}w -(LŠ((( k>vg6 p Obox$1S^LdM'ӱQOi_6ğҿm'#}E.D('4IQOi_6뾢D('4IQOi_6뾢D('4IQOi_6뾢D('4IQOi_6뾢D('4IQOi_6뾢D('4IQOi_6뾢D('4IROkj^t\wE?yrC&&X5?ZEz׺z"H`VcMB8bbdxW z6\ '[H\;Hb8+xn⸷9$l]H #ԕI%l(b>@%{]'>6^I㢀>o+lW?WR٫((ֵFX4:kyoE\ $Ga?^-i(oKſ?s/"h9is :Jw$Ŀ MQixZ'eZGujzO>ۭ%mЮ}g2(AfvW7h9hoOȴs :J+oOȴixZ'eZ9Y%ixZ'eZMi n&dRH Cd,>dgWE67YcYXu1殞!IҴ"%x?<(u/ D̿J(oKſ?s/"h9is :Je ;o [ Gh9k?Y-c"wmsZVϓ2Kfvc=zQ̂(oKſ?s/"h9hAftW7h9hoOȴs :J+oOȵ'-}7\Ӭli9RSAOCN酙QEs u/-  ; _[5tQ\T:TzψH=pĐj0"'m#W;"Zwk=.iΦӿ\-;5\9Y\"Zwk=%;G:Vu5Iu%;Q"[{ I+ .#Q΃m%;G"Zwk=sgSEri?o9Y\"Zwk=%;G:Vu4WoMC\yõ6*pe7_ڴW&_)((((+*E5tIu*ַ;G!gk<㵽Es?LovCy{kz.!gk<ַ;[Qp0?[3~^ޢ~?Z/\ Cy{h?Lov`ַ;G!gk<㵽E3~^??[(?~?Z/oQE4[tv.Mܷ :p38u5%Bo5VQE1Q@Q@Q@"?ng$_ Zvr|RpX8pXo^i?'lGVlG~14̤AEG[qo,sA*H2AG9sD)[U5 whi79C ܜ,'bmW3O∵$zvrRA,,Z[y/T)cJ屹{ڛ*y<{R^>DĿG[@M5b˹4~O /섳|v=󎵛?m oѮ&c(AR @aA$j[@M5ko4OKެ gX6]_DUJJLrcKk1ʿiW|;I(@@aVh8_ _Fb"ZU5jkTGy :g:s~hcH=30Ky2ykc[ko4OK}ſ?q/#Qw{Ag:YxXsKVbAhKu'&_:u[\_Aut%s t$]PĂrz:_ko4OK}ſ?q/#Rolt7WW[&Y%e&S+)v`~EQNiumcs,G+2<<q8kW~-&'%j5.\$913b5+;KБך/,X/o/#ў7p76 |'9h8_ _FVEXAG@8Le29KzWQX?ko4OK}ſ?q/#PoWOZ_ _FsVWZO4/췏>[g;LvOLr㸥QElfQEQEQEQEQEQEQEQEQEQEQEcu<-äb {g\=W6ғI; ȯ4ii+Cg?ن `lmo/q:( (Wwxow:(º]s u/- ((/(/mlV>%E]{G`*FWxMYw5֝m=Hn!`26ȩϬAwX>֨FكAbG$c=Z\=R[HQ I `/ݸ+:Յ~׍f-\[&(< IwR¸>FZe\o?r?{-h-(WA\sa]KKf9C+Em[uHn`*FGV Ѵ{p4bH`uAԎ2>zGIhEr4H^`R+;quhMkVO6\1pϽ26I,#ԌZ-bB'Bsұu^݆aud^͑q66֑˳yji֚mPLJ.%lXMȢ}p8dcgA L H4f%A֭_j,fX (u9R>t*o%^[z+S\34̒fݛbHgh%} Z(aEP?w_)K+7OI`եI]O6"FQVHQEQEQEQEW?'%Uj+*E5jQEjQEQEQEQEQEQEQEQEqP,?uw%Bo5VQE1Q@Q@Q@Q@s'WaHںz4O‘L*;tQEbXQEQEQEQEQEQEQEWOZ쫍GZ'eT7;(3 (8χWWC\y_36H3U/O7%T~襭Ǚr?Ty~0ևy**1|kCg伎A溉JG;^B]L N<%CN?8 :' "z@`q/tOEW?_zmkNu&v$`e!I8䀹 Q;uIXIX*\4ozccV^_ڮ-8Fq DŽi?aRG%WU[Gc Ӣ/*N<%CNޢf'D_UxKUE`q/tOEQ DŽi?a[P'D_UxKUE`q/tOEW9kF-'+T3 .m'z q(D}}EVg?WR٫WA@-^'Rvse`c*ch8_ _F,k2dA+ʄ@H#_ _F~-&4aԺYA#4_ _F~-& 7x:mevU$)vϨ+7OI`եI]O6"FQVHQEQEQEQEV !ILHK3I'VǷDb`pAѯ2CMyƫGF1,NƛϿ>QȊg= Q WCϿ>G&>9yj&TȾ zd/r(fBSH 2UA"G&>>D<5_^?<5_^4hӿo39O4?MWןjO4?MWןjM;}?4hA{U5U5N}ƏM;}9s3a Miض-!-%.X"8=kz=R,H`w~5z+QE0 ( ( ( ( 4O‘th_#+jTw6袊İ(((((((79hіWOZn)lwQElfQEr襭>oMF1np = L4G{ƫM+ 3}u?0?[`7L4G{ƨL]owjE`aϮ;5G&g#TXxKQ{ç]Ѣ3*8in]?Ku5Ԯcy =´lT9߼L]owjL4G{ƪOĴMo 㿼]X\Zhy7 h.};Z+ akhV$r+LpAPXIFp_ >iQv$8'SJbt;y٦_R>nw ,o P;N9L]owjL4G{ƨm7L4G{ƨL]owj7L4G{ƨL]owjE`aϮ;5G&g#TX |Q#`kZiW9kڷE|VyV3[2[twtq+QKch(ow:(%{]'WA\sa]KKf ( (9KJ`iCjk4Frm6ono 7mw95/(/mlVRJ+ծugֶo#ӆfpKzꪅjs B=Hzd)َIdgI+xiJ,{ #ߒ.3\j%ENɎfcuA!#~b s9 ;x#n]ӍTg΁DES?s^j]oa}i$EoJHvYCG< 6I,w4ݹ*}~[:5ۢDiUV26K`׿0t Y Hڥ,G&]*K 7vzUEDT@T`T[RNחkGZ'evUG-2֜7;(39º]s u/- `[]h TQGQmRMޛ=jvSwOZ%% -Kk0$J̞V2Pq=me*Os?$9gJ{һj*y\5il$5 ij36S8O覭/ :O?'SV'u?V؉QEY!EcLng'ukM klTp@T?5oeHi\cV[6_G&C~l=bj9kտr͗#΃=bj9kտr͗#΃=bj9kտr͗#΃=bj9kտr͗#΃=bj9kտr͗#΃QixK5]V&%K|@r|?]NII&qX⾈$Q `@ؚZz~7lu3 ]~"Bg=$#hNG:jt'R-lu Fe5K,BaFC#<5uy>%$;d*cr'-dl2Wey[Iiu'R߼ vX\s9.O_[zueI4/8<:Պ[@-2-^-dgIEs^-/ D̿G2 3/ D̿G_EQ\_EKſ?s/"̂(oKſ?s/"h9hAftW7h9hoOȴs :J+oOȴixZ'eZ9Y%ixZ'eZ?[@-2-,Β[@-2-^-dgIEs^-/ D̿G2 3cD)[S[@-2-`.xVMk@["fXמnTr:¬GG'se¬GG* /Jtt{0:+Y_.U^auW'  ҿ]<7@+Ø(Og:?VxozW ه1WOZg:eIf3X[Z˰hWʚs* (9_?Rr{h a S\ *jK)^[lV֏-U1tPTr݇'*>ViL[(c9Vc72Z0C$3Ac!y<1 IE]uK{S-Q̸'q1 dMNF^OuzxHбU2 Or:#;+ cP_&?,Ds3.XFIw#u9mUyv1u=z=xPot*IflBev #|3ȌBrpKm78'͔u$-yPJK*09GQ\& WRV?,,H C$s=NΘǣtKd.5 VdA7øQ_ V^ 4ѭXDD^H\vy{+Zk"I"w kwb ;Km*mv)M7duW.o@%{]'>6^I㢀>o+lW?WR٫((oR.FVU׼9xbԴ;>S۬<=+Y_.)X(Og:?VxozW uW'  ҿ]<7@+Ø(Og:?VxozW ه1Q\* /Jtt¬GGcS7aFKOU^k_%\Yv #.FpHf'#urO*G^EQgv c8*9 ѷuXonu6tZXb dC#)lA̍Ў++ſ2jrnY}`MW?|[C&~.FCſ2j5_cha̎?|[C&~>-Uv?fFCſ2j5_cha̎>FZ|[C&~1<+{,0 BBrvsN0i(9º]s u/- m|G'UiS6w7I AAև'D_Uo__.a=#vCJFo'D_UxKUݟG#v.@38 :' "_40/F ?C(c7 Ӣ/*N<%CNVqh\nEIHdDA󟳺`[VU t2n1T )|ixYKi~۰#Nn2dҍ+[ EVӇnn|.1S_^x6VP\OiVkwf>`ɶp9%ĥ{y_j(Š(( J? Euh?Pщag.-"*nGW?`=_YH6 wLv$c^-:f]>r{}j&vUKcU#zwEYXm(b ( ( ( ( ( (9_?R`IkG5W *"FԔPEPQp.]>IEQEQEQEQEGVј]YNKIܚ((79hіWOZn)lwQElfQE|JnO|mۿE}?WR٫WA@Q@Q@Q@Q@Q@Q@WWڗKC\ 5,p%䞔ҹW-?oi򳩢oD?{{hKNsvth[-;5?ӿ\+:+KNsvD?{{hAΦӿ\-;59roD?{{hKNsvth[-;5?ӿ\+:+KNsvD?{{hAΦӿ\-;59rσsa]KKf(|[!H&bBS[PY%XOrI?ӿ\+:+KNsvD?{{hAΦӿ\-;59r o xMK"2y\\ek#GwI\MX>7L6t,,R8erd6:LHU5/;YtgQ:1Ƀ(&F2 1Y hhvY,"RUV/B;Euo4MIkj1\Tq)(p5(Q@Q@Q@Q@xW_)$ 䊵'%UjԬ\LH5?eAП/H+2H5?eAП/H( OoYEj'+zS?[_G$ ޢ0 O$Q B~" H5?eAП/H( OoYEq iH"fB>SӂG55_ eGV&GE\-=*:fԣu#bc8<?? GV3ب3 Ӣ/*N<%CNޢ,_40 Ӣ/* N<%CN?8 :' "z_40 Ӣ/* N<%CN?8 :' "z_40 Ӣ/* N<%CN?8 :' "z_40 Ӣ/* N<%CN?8 :' "z_40 Ӣ/* N<%CNMr i5h$8 »j4( pܙlwtQEj@QEQEu+;KU7=nɀoA%B_ "ºM[z-A%B? ' jψG+Ky%AVr`;Tg$`Nl^ROkqu%ūH[i"H '#@$ _A%B? ' j1=(3V8]L#Fтe%LV*,/v]n@X 5@I?V?_G [D|_MS?Wشm-<2ڬ{lqiYNW="($(ow:(%{]'WA\sa]KKf ( ( ( ( ( ( |$ K]Ur襬z( ( ( ( (0iE-hbܨW ۗ,2G'qxGA1H.3' Cb=G^mMypꭻ<`K'9\-i{k[U*=2Xc8|+oqso>E=oZG̼QNѵ=zK-O$rTH*q}+xaP$i|5C ( ( ( (8>FZ}\?r?{-k9ºo]s2t- Q@Q@Q@Q@Q@xW_)u5K.Tn"Xe!iMY𮯥۴k=񴄅 TN2} sxİ$M%jc ׵Dvլ:umndI()>I#5K\{IFKȆ?M, u+ğҿm'# G?OG}0koO\buX ;"

ki!Vuh:Vn :} 2Wty8OJWCs־02h+ưyh \K<3@U@=s[&i+mC#wxK0999Tji w-I;um Vs^0M=\̑JN\g*&OS2U&="&2IHkg9ﶵ4i){tc|Y%1xF9a' RRC/>l'{e-͐s#93Q7ilNYE9d C~az[_%mvEcI.c< 䄢RT \R-6IZ"#`? PZڥlYHIc@*zDh?PuMH# 9GW3qKZQ ]E<\m zV<[AMdoO4-6^NRr$Ũ²C"vO>1X!5N$K{/坷*!}` cw& 'h'7_I CR mɘ۔e%y; 9߆:UOơ=K<[sm *A' 7b> 'h'7_Iw ǂ']fƕ7G$B0rcjH (Gsް 'h'7_I{\%_"~ 'k‰w5ſ F7&$?pc YPR*( (uoźkC*HKWR2",Gz]'}^']Zެ25[]'}^բ8r_?Q >jG8r_?Q >jG8r_?Q >jG8r_?Q >jG8r_?Q >jG8r_?Q >jG8r_?Q >jG8r_?Q >jG8r_?V}i|CyZUXmiyv8Q]-q(D}Sl($(ow:(%{]'WA\sa]KKf ( ( ( ( ( ( ]x|-mo&*3/%EN=Mz-r襨Q'%j>h8z$o e< ̳~-&'%j7M97)'#TDĿG[@M5m3UI uKd.=0}~-&'%j2Ƴ,FD,O$ d2?1O '%j>h8zPc[@M5ko4OKն%Љʪ?0 =iDĿG[@M5oS"9Iadet9 B(~-&'%jVW#5%GDfp%N8 4DĿG[@M5nR7 d 4̱Jಡ<1b}ſ?q/#QDĿ["GN3dEt`e9z_ _F~-&[@M5ko4OKսEy6/[\i_o}߼wD뜞kGZ'ewմv3?/WMk_AT (((((/:.o-f8ĥ"V zV_E@SNkVRh^-/ D̿[TsQh9hoOȵE9Qh9hoOȵE9Qh9hoOȵE9Qh9hoOȵE9Qh9hoOȵE9Qh9k'᷈eP垹 $r$1U vu_ eGUM%chButx55ԓRF4ATCz+Р}s~lQɍU1A&4fm/ i__ ?>g>߱SȇCiVO?G&Эv~YϤQ ?9s3_ +kݧ}} bD?6mom/ iA~`H?أ39M[[?KB}g>߱G,(ACiVO?G&Эv~YϤQ ?9s3_ +kݧ}} bD?6mom/ iA~`H?أ39M[[?KB}g>߱G,(ACiVO?G&Эv~YϤQ ?9s3_o ϊ;짳[6(hG}n}} b8xFP)&MEU((% ?Y5nͽvԢy08nqI >-Uv?fpw-I}`MW?|[C&~.FCſ2j5_cha̎?|[C&~>-Uv?fFCſ2j5_cha̎ J? E>-Uv?fs×^5R\fy[yK5Q"QLM(B(WO7`_X>襭fEPEPEP ڶ;_jF)9r~߅<sFDV3_j]=ۿHmk g-&F-F}6[F"xAb2:4fcXom˛d:9>մu9ԚH ^+!YLr59##sɠi677 ukp%Hz-2R3]Ҵl-L_8rd۷vq*4;$-4m:81C'ڜZ[O#;=ގbx9FA](lx]uvw' eILǸ?/Z#Jӗl-F x'׽vi4e,;hV5-dgϵ-tQE ( ( |Q#`k]q(D}}EVaEPkG+hCw-mbH+ ~W+']Z΢.=voiqw%ԑS#j푟ZQ7mͰ8iceSq* 8c/.pDr5`9{U{}+W&MV2Ћ6!vBQqaF+"]4oϦjkkeY{$sn#o_ۆOӵ; ^H&mdC6m ~g<֊j%% -Kk0$J̞V2Pq=me*Os?$9gJ{һj)s1+m.,rvx+V"2BZk"#e N37rSH3^ Eo%c7ͻ #:`dkK-u 'Dݔ,_54S\^_%y"h9UڅcyA9}Ar #V{kin&P[Z zLc;QK|Y2i5h^eY<*d;2`tn=ao5Ri.ɯw>W*;AE/C-/4EnngiD`$`rᕋcc75mJOT\'ӳa9{s[TQ}nh^M{kgQ=̅!]T62x+[@ 4[rF4c2ĠJ֕^J);)QEq(D}>FZ}[Cc9ns2t- e ;o [ tB ( ( ( ( (9 i[ j7' qq*C ZI#TQy $w8 :' "9z_40 Ӣ/*F DŽi?aG'D_Uz_Y7fqdb)o0lEbO cO ֻKyRhe$l]HG޴2=v( l9hрM R m998?7tM+ X2ʁo`?bsGW3~^??[xM+Cq#G$E?pXqWܳm!@#5o!gk<ַ;[RCy{kմ{m'ZO٥7O1-yzxA^\o?r?{-j;(3 (>@%{]'>6^I㢀>o+lW?WR٫((((((_?RU\m/ \\D*.JFNMEr<7@+  ҿ]O+(Og:?VxozW ه1Q\* /Jtt¬GGc?U^Y_.fYEr<7@+  ҿ]9VxozW g:=se¬GG* /Jtt{0:+Y_.U^auW'  ҿ]<7@+Ø(:}IYt1u{KP   ҿ]9VxozW g:=se¬GG* /Jtt{0!GZ'ewY|=tٌV/##򮾭++_A\ma]7Ka((((({ƿšwZ_AaM;Kak,[LeDfrcu99㷭g=ZE'd٭E5+(Hc#^}S*R];]u2m720voemp:!8l  oc&42 S0>'5\OReEpJ rڇu M&&o.=$e|24HFߏ7}YЬEӠvG*904I3sa4R衵z-c66R|8F2Cuj[-߇ :+\1q4gBpO WUuiWMeq*Mn$eRHO@*;w6qkTu` (# 0OAF SZC<ddc?3vCXj<޳Gm#2nlO86oM:fvC1h#·''֚^aj_0B8\qM:[6OqYG#ha/+<5G0$qVosuVCd3[DҬr NM73wt0EIR "%UEfڊ v`S*c񞄎_AY_hQ@Q@rA x PUreEEPEP-/]WQɫv% ?Y5nsUv8'8|MM*{=@h".~*\c 7\RE,ku AJ_Uu#ibXdn_P{h[zUGg+:wxzLxr<p |==G{/ i2)f5)7y Otm{52//O޶1o Zd 1TF!;;iVϞ X$o*|ѱݖ<`}Zk="/wu!v"qXgT 1f4αugG-ԆK&@D*Luy 9!ktny%aȐȇ Kp:tZzY\o|GmX$[Z&U!I]rt5i5Xot{i P3†##N@h?P6^I㢏Wwx ; _[5t7@w?Կj(((((((ס* |$ KS)Xi\ ߃kcG'~C?UjQS 2; z a51J(ew?Q ߃kcZQ51ס*() ߃kcG'~C?UjQGS/ס*NC^Ԣh_'~C?U?0UE9LNC^?; z aR=r?0Tw?V{@8 ¶=O]':{kO-Щ[?0UE9LNC^?; z aR=r?0Tw?V{@+%u6uBJrHRN2@ϸJ/Uxkc???F fa b>ZGwgkc???F 9o'D_UxKUݟG#v.@38 :' "_40/F ?C(c7 Ӣ/*N<%CNnzO}???Ž@38 :' "⫌U<7Z.-IWeu AGz_F >:|*MOQP }=* }QT AmS=?[3~^ڹJeOx "_`9}A*{?~?Z/+뗊 PK!Fm{pWʨ3Ul V->:'B.T_j^Cr [n8Qz{fqZ?[3~^ڲ͵el.7\rF9cFr/Cy{h?LovnW62*Y??,m NѦ0gs8.tڹ J? ET7&[QZQEQEyE%Xj\U6G>$?&XNZ'7_I.+dջX7'7_I-މy&q?doO4}ſ?O/$\ xz' > 'kz.<[AMdoO5E-މy&(q\[j|BbC01G]rA nL;( (|$ K[i1,-4{;6;xmbYUTo羗y"e#7?K<GH̎Š}/Eo羗y"F(?O?$Q{'(a̎Š}/Eo羗y"F(?O?$Q{'(a̎Š}/Eo羗y"F(?O?$Q{'(a̎Š}/Eo羗y"F(?O?$Q{'(a̎¸>FZӼ}/EdN!񦘺fuZ ̶ݝ>1Ŧ&GEEPkG+Xĥ؏ 7?0G皥=.4{k H.h׵Eߛ/GN~_xlt#;ɅM[-o=ؚZzӼ̱F]c ٘*$ MBK/e2JKmP2NI(adfbj9kտr͗#՝W^TkטMgQɭC"]6@''D_UxKTF+a0GTQfm>eh̃aW-Ǔ =G݉/n<J DŽi?aG'D_U{Q>ҭ1t*@ V5qG-кIenҸ$!M*~R B*'D_UxKTF+cOLk:(H2N ,lV Ӣ/*N<%CNƬ4(  DŽi?aXԼi]]Aum&6Mѱg8aUCqKcШԀ((ʹ^NuxS?sZB/&-3<ہ{ſ2j߃sa]KKfſ2j5_ck 8Oɪ߻3Gdݏ,?|[C&~>-Uv?f( o 4`MW.ſ2jտxrKfK9qo)cF1Qһ (Š(EPEro&6U[ϧ3`RK ucMZ 䪗$ftW3Z <kC6^I㢏Wwx ; _[5t7@w?Կj(((((((:vM}DV >nqZ@c_n}IEO,YtbOZΣ*;S[6_G#zۆd9mȡdTWiaa=܈deLn dƣRS[6_G#z km6mY\<2یۜ摵k$w1bp xTѨ]8kS[6_VGFMVѼ:*8JZ@_ClqGʇއ~$ ԥC~lG?5eΗ>f\(xΠʧQ{.g@˲F jFG#z?p͗#3O o=8kz.#z?p͗#\ G?5e?ߛ/G(?8kS[6_[Qp0p͗#C~l`?ߛ/GO o=oQE[ObQߧmRB^$Ɲs=1}6>FZ}[Gc9ns2t- e ;o [ tB ( ( ( ( (9_Ÿ#zĶ9$]U*ג:zG$ {+ OoYEj'*,QoZtڿ=6ݑfi qϥr moVkveV.vRrAO< OoYEj')8hjc|d쉵m)w$ yWlpK ЗZzźAG5ݽBI/qQd@ /J O$Q B~"z_<& ]4j{ZM%/L`8##9=x}^uH")u+̰NT$ֶ O$Q B~"m[:[|bo`y2lrcL~Z'%ۀ~^Mn7EӴmc!G#rOIS?[_G$ .o:ޢ O$Q B~"7̾/ OoYEq iH"fB>SӂG5 QEh@W'i<֑vjVW?x±XnzO}???¶( F ?C+bnzO}???¶( F ?C+bnzO}???¶( F ?C+bnzO}???¶( F ?C+bnzO}???¶( F ?C+bnzO}tKkK7#EQEQEQEp^ +`.gj +R0,䞾V&C~l/]WQɫvrw4IXտr͗#ߛ/G+m2K|xOr*xJ[mbٔqyd G3,&C~lM[-o=mkBKX%S!IKc y fغ2W^Z2JnȪ21{s$ Pe%eA*sBW _3(;lzް&m.&2C*cDWo])QEQEWOZ쫍GZ'eT7;(3 (>@%{]'>6^I㢀>o+lW?WR٫(((((( Zj |=k{m%@>Rf:|$ KYبzngXgcp@='Ԭ]Y3і+ޭQPn줒VG3s?.-7ӣ%F0Fl:G<2s}b/$X.$q=/n+'ǡ˯Vd:;ɗ`$ی*c 'xn]?H%xLe] otR{ʯn.w_aw1cjצ|;6t϶s{֭"8U%_BpGM{fA0zzRʯp)QEQEQEQEQEQEq(D}>FZ}[Cc9ns2t- e ;o [ tB ( ( ( ( (9_Ÿ#z?)aW;MJ[U+5c۳z"tk)RvFW7Ap5?Q жAx(RS~r*ŷesKi_oWwi+ 瘁#")Hwuր73ɨq-##"F//9v+ivWhwWe#:n,R&_1oR =Emks$w~P]3%Gm?۰SrZUܹF~9z?E%y|Qc%_ eGUrdzQZsC乓JH$d6omfw{SwJ ' h_+h/uo.CLm!K+ʹm*F7.O! ڟ%%R㑡|ZTH# v@Olk%f7 Э/&A%B U[7SC/$c Fz՞_+h/ Э/&p0 ' h_+h/(?Km4/O\ A%Bӭ-4z=6-,V$j͹ >»j7xX뚸nL;( ((% ?Y5nb|7^F#Lx295 B~"kSEV/~nne "lA[؟8 eF@3tq B~"H5?e ͤ0DEs %0w\A;5TZè-\:B!1uPN$ ? O$S_!Y_-X?WCWS47AFXqI$Dl7llcL{ ~|S B~"H5?e~97Iߧ숻]`C?+ W$ ? O$Soq$\|T& sZ_j'+{߉\iZ{;\Ly09\xN[EVQ@-gP\Cm\.>l9hр6^I㢏Wwx ; _[5t7@w?Կj(((((((WO7`_Wi'֑aɣVUdPP g*&بQOi_6ğҿm'#r2Q\o%$i?JKcN= Q WCϿ>G&>.D>fsh~ ?h~ ?t?ؚw4biȃ'k5G'k5]&>ؚw4r g= S[2&YNT ~?M;}?4hA{U5o(Cbp+ӿoN}ƎD<5_^?<5_^4hӿo39O4?MWןj_fu ֿ{4&X2x9r8 ȯKӿo_֑isNbE5 ҚBnUEU+?? GV k/Tf[Cdyط <3بuV<[AMdoO5fOoD[3}7m3R}ſ?O/$OoDS 'h'7_I}@Oh44&,E$BEX?doO4}ſ?O/$zE`ſ?O/$OoD\ckmt8m/ < .Lg.HtwbGF0;]}{YXn¬PB}F'ooQCj A^$ux"m8M%ɋRT Ȯ+:|^Žtim v>bJgQo*L]\ ʜ | QEAQEWOZ쫍GZ'eT7;(3 (>@%{]'>6^I㢀>o+lW?WR٫_5l1C tojulnS 髖.+d2vCJ?'k(Q;\_ZTsQ >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9Q >Bvl"IeE9QOOϑoesnfZGc6y:Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k)~+Yx"KX$'{rȬW\o?r?{-im/WMk_AZQEQEQEQEQEr!a-Vay3%d In?oK@m+bw.+C -;5?ӿ\*nʲ0ӿ\-;5ݢ0ӿ\-;5ڗV5M>-.PIQ-LCjPę񎙩ئ.T4v7W,' w>REadi?oIxGu9M鰴6̊x?\U NUm71A*GT{KNsvD?{{kv.da%;G"Zwk=㵻Eada%;\nGdG+ Is,IJ/?2K*2GE\Ɖ!R?"}Ck-SQGF?14O%cKY/I|){dz\^bMH-%L, BU{S8%NˢimBm eF$3`t9c؜Qd>g/k=5XRV8Fj ~} -k, g@U8>`餍pI I^U$c Wj$M떕 @IAz2#,.in\< }-+"I s'\G_jXrOĦ ᣍ۰ Yk妇OyݛYE9[{)ڤ ( vp]<аIRIPr=~u ]Mح`hV(JŪYaԱclԚׅ5e{s,J8Q0HI! 6)K; +[ƍ#*vn#+1j-y|RcH ۃ.$e{\|彎[] 9QyF=A4nCm%7HbDe miqenv>Kݙ.~' ezH9o:*((>_*ׅ¸M^:[QZQ@Q@/ 5=E~R1ZIuV^nzO}???¨~0ևy*/O7%Q΃C(nzO}>_?5oJ?TseF ?C* h'aAU9Y{???F Z <kCFZCqKc+c0(Wwxow:(º]s u/- +.+d-/]WQɪ'Qݢ+"Š((((((/zV:՞-g孼ۅ۲3*J!wws_%G%mΡIpau$Ă~c$28b&.T {SM=-K8&8OIZavw|1D< 綷fG6wbpn~iUqs\#m4[iZ9,mD=<[z8Vr0Z={]oM{g}bl/.ɡW-XʤK848#МZ/k=Z}OHZ,UOʸry=Z[LlA&H_a/^n5"[㻞J" fzU42HQDff$31,ĒNI&(@QEQEQEQEQEG-2ֻ*|Q#`kU -ºo]s2t- (((((/(/mlW795qkks7mdh/ie9y㙾h8)\v7'%j>h8,QE`ſ?q/#QDĿEE^nik;L=MSE5Z;AK;[Y!Z7 ͷiŒ8|c+[~-&'%jiks"sisjЋ9}LŤ-h8 6z9=J4I<C"I^so]m.ms!;K!Ŵfr< t:?p{_{-bn&|dI{oRK׶W:<L`sOּ1k:mnHMo4I&d$`C K`]mK:-oM{?-f]êr0xjԮB}wR&ks玠کEM愿@z6z4\4G*lc`YN w;Tx+MYἷ侸#a+y . 8#<W%/ԿP+K_Urdw4QEj@QEu϶]iZk7ɥINb2p+/g:(g:?VxozW VxozW g:(g:?VxozW VxozW g:(g:?VxozW VxozW g:(g:?VxozW VxozW g:(g:?VxozW VxozW ^:bᴴsoj-_ɯH~J`i@Q@Q@ |mۿEk@O7@w?Կj+o+lP^ wR45]C|ؼ <? 5:[h8j"8?& ?@*'Q [D|_MKm5sE"֭xk #HzӥhK '!^+6ܨ[Km4/O"uxMٲ0Hw X=ʽsnO[H,Q"FH=iًoX?V?_G [D|_MG7"aS0NwFG9넲񕎣6Vy>H ܲ?ϵ{;/O?V?_T'-. i-ĥOp퓕&3@|_HQ{C> 2DѲxR1OYd/ [D|_MKm5NJ3\ZCi%ݴ"Ļ`M|n,hÿ ' h_+h/1dldϘF۳l,Vz7q+s uW`ij42 ' h_+h/TSHt~";d#`L&VC1CA!]y6pq\ Э/&A%B1,`/DYxTGF*K|<:Sq+~3֓WiKm4/OU2 V3C4 J$ :sgn2q'Y%ŴҮ%IU .8n22)۰sGA%B? ' j+oi7w6;B8)$NdErEm5 UC9$wߝBbĿKm4/Oֵ_w+2!*5߼*$xKR ' h_+h/O+!]ᡂH ȼy88{İyLydyQ-6'қMh.h [D|_Msh27O>noĖ3igzw0:儒<> ?\׊?r?{-iڃi{_A\ma]7Ka (((((/(/mlV>%E]jCV/ŷ{񟽜e/ }dVROp'_WVՓS]Y $ݧNYY '1jRJU2FA FYYW$ űGVamm Q9xmMB.xb].#a-Yا pWr9cd ?RԊtr2Du T9>+[ +vų#Y#/O=ǭkmْI+ұSNvQ|,v9┑+z%s>KfX^Nܤw-Ąsa:jRV] (Š(( /?2K*2=f(H =!V?"bO/@Ե:le{pm>dszcZs}GsE?'k)]s}GsE?'k( 3IeOН/H,Ύ?'N?$Q >Bvl"::+Q;\_G$ .s}GsE?'k( 3IeOН/H,Ύ?'N?$Q >Bvl"::+Q;\_G$ .s}GsE?'k( 3Ie>hJNӯ4˸#_.&ES1AEЬnQE((h5F5]C& *d9SױZiRKA_jݬc 3}u?0?[) L]owjL4G{ƫzL]owjL4G{ƫzL]owjL4G{ƫzL]owj>̷ }: %6FyV ޻ZY%idw4QEh@QEICap-Τ𳱆9r`p%cOoOȴC+Em[uLcKſ?s/"h9kbv>TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ>,liw{k$z瞘笮79hіQlM$( (>@%{]'>6^I㢀>o+lW?WR٫o "ºM]MrEu,{͸Hdh' 02NONȳ/ hj }ё- Bn?.1a$mkY)ʄb@#i[S,{cgfF$QynKe9lGJC<ۈX޸WGC|_r8  jErDZҝbUh8Aa :+a =OYGޤ,&$8\(U(Uأa 818o{4{w,$2,qƪ$ ˕*+S2-ȞY (CJtM9XQc 7ݐr (TbZ-ǘ$i?y|z1рzbvô/˟/bszU(^Qp ܬgn_ "2H| ؓEaʎs[ 쁄a;Üuѷ/5ē'3$p\h\)~Ee{2tu+3۝[-WgF14E7"/|ۏVRsw[Fh.Hscn ~Gb+jM>f2O $gVU{?gvܓyqݍwo3R47"*'Uœ kV.dӦP6OXXb͖FI8(ӅNeݿR7mݎ3}kNW*R]$k #Z7 F^{v>f$K6Rq$YS pr8 )݅gain`Y۷wj8<;#"C)Rc Iq#.;TсJբc,wNc>Vd |7]jRZh6NL+渒V-Iv$ (Uحmc~5*rwr}S>.A!w 0ql7yo JP #h')վ6|Mm|mG2j,'կJwGQ\o h6ג~ƁĬ@yizl.-n~τa4[Dsda[va̭4z%&vj6pi_*ׅ¸M^:[QZQ@Q@ua]GK&4hz֛=zIlGr/&<0>UU^7 uW'  ҿ]<7@+uW'  ҿ]<7@+Ø(Og:?VxozW ه1Q\* /Jtt¬GGcK_R¬G[-%8 0#UuUMڢ* (9C+Em[uCO}OZӬmJ7Xn6+;aϱN<%CNKSEE`q/tOEQ DŽi?aSaV'D_UxKTX + Ӣ/*N<%CN,`xk@%{]'>6^I㢀>o+lW?WR٫o "ºM]Mrx>KJa4I%$jd4o\* /Jtt¬GS:+Y_.U^auW'  ҿ]<7@+Ø(Og:?VxozW ه1Q\* /Jtt¬GGc?U^Y_.fYEr<7@+  ҿ]9VxozW g:=se¬GG* /Jtt{0:+Y_.U^auW'  ҿ]<7@+Ø(Og:?VxozW ه1Q\* /Jtt¬GGc?U^Y_.fYEr<7@+  ҿ]97,%MQN׺q[?<7@+Ø(Og:?VxozW ه1Q\* /Jtt¬GGc79hі7* /Jtuffax,5 ;ºo]s2t- B(((KMxWW-6k@JD,+*E5CZ <kC<[TV<ӕ_?5oJ?UE9QZ <kC<[TQÕ_?5oJ?UlzHՆuK!tBy7}=:Ut6FTE:haʌ/O7%QZ ՆH'V$CЊi*1|kCSaʌ_/O7%QzRU滴ֶlY܃#a[Uh?PU=sQ_.=Rk8gtǴpA'x^=zMN֊nIs\AIy/0&TA|ݺ~T׮PyL+U S<0'o8'9_>u{-]xʀi&[yf#mZ8XUtkײ_yq鱋c0I%Y)͑iYޢt=N?m1v q3 syvL(C ( |Q#`k]q(D}}EVaEPkG+˻kҶ|]0Ҵ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;TcixZ'eZ?[@-2-lQG;Ts׾%7:\$YV=M#u0O+,M=W t:'F"Jma]7Kaºo]P(((iQ`7L4G{ƨL]owjYʗÿLYCx5:?+oo#n2͊:GL3 JvH8BL]owjL4G{ƪs#I"ŇMhe-@Y<ьJ #dz%ʩu`| 68ɮ 3}u?0?[OVGyxP׷Y]Ckwcy[=6-;Ev=kђ$aT|V/&g#TaϮ;5S?+ 3}u?0?[`7L4G{ƨL]owj^eF_uiWe|I: ']D0ʰO QEh@W/0]wŬs!dǕj+?? GVبMj]GQnf .zu޵iFX*ḿ)՛JN^O=sۙz\HG*cէ: :ݗ+lD\ dIgc5tSe-ywBIq%2MW,,I"ViWo,IjwDEeE*ijcDr{yßqUm`֦czXm} zU} ķZ^*79TcV&g#TaϮ;5BZm{ޓ6I1G#j? XRZyĤ%(q#9L]owjL4G{Ʃ_o/8׈ Ɵy&{:tIg6Ҽ  r ms^"jvfV/&0Y čwc׵L]owjL4G{Ʃ;²>g5v-cy"n %$c:L]owjL4G{ƪ]݇\%_"0?[ ^EMrc}@`OpQaӸU ɖǡQEEP1Hn4O‘m2lJ KML8]rrd*FRh\lnָRaM|OJHtm>ګj#X r=~zwb+^:";{%Aj9tm.{so6g$ )S2qWh;"Tkw&d(VfK08]Et{"@fG*jѢعWbFsn3E$*HA>->V(lBv@p8VXԧ,¨Y$f`3ؓ?8 :' "_40}O{V'D_UxKU6z_40 Ӣ/*E`q/tOEQ DŽi?aEޢ8 :' "_40A,?tOЍp/Wu- ;K KP .' O»@#Zb%/WMk_AVHQEQEQEW?'%Uj+*E5jQY+6Mp*GqWvF8#H.1W=_[YCYe?1>dGq.,xF,JYvnA8?6x˥οtW'K-&KٴZq,,*ۤ%gj^"V9>y(LupNhw{Dw4W4R\ ຎ4[2pGOy$GQR))2K t8['R۝xR-\ldY 3ə^uRTt+kHl.&y`YQ:F=[~^c;MPyhu:8H4gIyCZ+׾!͸k+ChH,|4;qv }᳻mSV;|Ę;y $9ٽ59KkhԞ%.l]Bd5ݴZz- y210hgE(if_SWjW2$K$"BqЖ~A#fqUM\:| _Opc6)vd0y*"U )5hOEQC0۴@-Uv?fFCſ2j5_cha̎?|[C&~>-Uv?fFCſ2j5_cha̎?|[C&~>-Uv?fFCſ2j5_cha̎?|[C&~>-Uv?fFGZ'e?ſ2j[LNh]Imyhr: ;QEh@QE+(l/qep27eqa)+^ZD2 e`dӶ2lYyxR3OK$`(>G888n'hDPU( >T>mgwmsKɧ8R(乜@(Hnɤ Ԡc5Q+c|EO ~+jieƠ+k5P\$]a-+2 8=~;?|!cf4c>xq)E#An%nu$>Bp=9Ey*}}F p J G+ttU-^鬴[ݺy$!IF~ gn@š|-l'0Hr-ٍ9ϵ!CCЬn5wW$g*.Y ۿs!nIrOZ|ek5_]^]BxXaVK1|{ CWQFhK)+)+[M4'DVV)?wy?wyÜq.pIZur_OA]Sw]OA]Sw]>FhK)+)+[Er_OA]Sw]OA]Sw]uW%?wy?wySqwVA2E*2Qy_!] SWTUl7Q!K3<#E'dM\ca7shBx;G?R kϾ+5G&O"T\5[]'}^.CEs?Q >MtE{ƨI}oWj\&O"TkϾ+5Eh{]'}^?5[t4W= >?Qp:+I}oWjMtE{ƨ sPo_JOMtE{ƫ>Z>!e<*wڬge<];ggp;( (>@%{]'>6^I㢀>o+lW?WR٫(((((( ѷuW3=_V?6{;TQEbhTK/K#S"}6b0A =Պ( u)[]B+m6p9<4;;+kd@B1* ?JEDYCٗo!s*E42RB"_7gs\ E;*i,$K>@1T 9j$($[t7} qWpi2!mn%2"_`s]gӊȽ{[_k}#Dor%K( |kPi5f@vm% 0m='zw٠|q}*Z) iX6@ PbQ4gj7b8? 1~gSEOt.%01k1 TdV gjgC#Hv~SؒG5-\,WZUFaʈx8EDT@T`R@X((y"] x?_{ǟ+X\5-ºo]s2t- B(((|_\v] Φ7ndEÀZ6VSD?{{j\W:+KNsvD?{{is򳩢oD?{{hKNsvth[-;5?ӿ\+:+KNsvD?{{hAΦӿ\-;59roD?{{hKNsvth[-;5?ӿ\+:+KNsvD?{{hAΦºUӿ\^l Y MnAfbxbI=&thMMW]֒ɲk+2;q!\Vԣ.i%ޫwGV?u+[7\1=Io "ºMJnȨGbj9kտr͗#3*տr͗#ߛ/G(adbbj9kտr͗#s01?5oebj9kn9YؚZz?5oe>-/A򿴮Z/43 UmQ8=jնiy5Vyj9SPt`xZ|,M[-o=ؚZzׂttu 0X7nOci=+(xg@ 䃑EYbj9kտr͗#YؚZz?5oeE,OM[-o=ؚZzۢfF'&C~lM[-o=mG3 #V[6_TKQ|awjWZ-ki4OrD$PFzZ돗JajbV;( ((% ?Y5nua]GK&j ȏT_sXErsI"Nq Zy:UMNQvk[V,pdGu0W i+yv9;8'H>"OX&จ;IaMvmj h%ˉbzQ۶)xoL s+}Uݙhlf==-_Ah^8B}sV}V-w-s0#V&v]'R:0 ߻ϙ.,lghpXbߵ5[I\^Y* qbº=O5eđpX!_ rC/O_MR40B}$U>Jӭ,mGn-GAI?Y}+x PW_\%_"-(H (9C+Em[uHn`*FQEQEW u^6+5܏٢KtXwyrDˑ˴r >#Z\Csgcr{B|\AM&o_ 3Z&jo!W >Sv:6oov&Kyb&w2,(h6*IFЕ]G÷1ZQt%ʤ1qrAdAOŤMl!m\ˉǼf>0d K0S)QEQEWOZ쫍GZ'eT7;(3 (>@%{]'>6^I㢀>o+lW?WR٫((((((OUBM)t.\\ƌgPwtޠ$sRjN) Q WCϿ>G&>O"39O4?MWןjO4?MWןjM;}?4hA{U5U5N}ƏM;}9s3CMyƨCMyƫӿoN}ƎD<5_^?<5_^4hӿo39O4?MWןjO4?MWןjM;}?4hA{U5U5N}ƏM;}9s3CMyƨCMyƫӿoN}ƎD<5_^?<5_^4hӿo393BS2dE G>>0|ذ۩=obiȃ'k5G'k5]&>ؚw4r g= Q WCϿ>G&>9vcc}f>sK݉f(TOjOЍؚw5r#bvϽRVw0e ;o [ t@v)(((}z[/d i5([[kQ%7@`?`xl7E /,%L6H"'tNr^IqW$vR6'b_7&V%JovQ"y>*-CWK}m>m-!)ˈ̌r_,W$n&z$R4I,NFCЃS4TE 0)jXטQEQE%Bo5W/ G\y[c7W-/]WQɫ?}kg} >/#]WPIj&<[Ӛ8vV$ ? O$VV,ޢ O$Q B~"E`AП/H OoYEzS?[_G$ ,REORw&kY݀rl8U~$伂6(0J uX2k_ OoYEj')kjRjIc-=5.<,6ϙR29z}O—בkAE`[yYsƉRl=cZj'(S?[_M]}H{idbbVlА>Qm$ ? O$Rzޢ O$Q B~"oQX?j'(S?[_Eޢ O$Q B~"\|T& sZ_j'+{߉\iZ{;\Ly09\xW ɖǤQEEPEPGjqb#( ^Ik?ߛ/G.+dջX7S[6_G#zޢ`?ߛ/GO o=oQES[6_G#zޢC~lG?5eEO o=cR[_ky=䋨 8@"Ӏ:z]rA nL;( (4O‘m#k'Tj(F[jW돱V7PO=q??[eޢ?[3~^ڑV!gk<ַ;@V!gk<ַ;@'FΨ5Cٝ@ k11Z͹_4:]m4S?-1pIsK~?Z/ZM{9_M0ZA$ ٷzӓKӦl-^+hTe>n ?LovCy{hg@MM{[f--iH9%WROXΨ5O?6!|1oq:w!gk<ַ;@V!gk<ַ;H +3~^??[ +3~^??[ |Q#`kZ9hOK|nyW\c[t񞂪;[EVaEPkG+gI.K yYHMtE{ƫV9ÔI}oWjMtE{ƫV9ÔI}oWjMtE{ƫV9ÔxL5aoM2֟qob \?r?{-k-Yma]7Kaºo]1Q@Q@Q@; 5Or5V,?}ſ?q/#T'?*GsZ.;?ko4OK}ſ?q/#VS/giggg'Xc2FzԕDĿG[@M5n#*r7V'\68lg ch~-&'%jޢ?ko4OK}ſ?q/#VVZxPɑ€QXz~I[]BA4s/~-&'%jֻ5/49>ƒQ3^[Di ULך2h8_ _F) _ _F~-&kF0U$lG VyR "c~-&'%jޢ'%j>h8zx$G*91 08{95~$\6kkm7BN|K4hAxaQEphWL`h3>bn(^>^vRA %DN ]@FZŵ߈ń2M$͸DQ 69l oOv ]B,Jv-[&wqQ:t;+,_g\}r$ort#cS hֱE46kn|1$ cߏQ{K& +t@kQP<$T+E"IlNє"gQӧnvO477Zxby pAmC+EmZ[JѮ+9l ^V$~[}=ⳏkyf ?9H1+' ^ePʖ2#m,@)! ft{tK#tDlK8L2I<֠i13P YP mF#TgS f(m.G?xHswwYVO2fbm );GCs-?4yTK<ȆY 1S8g=1w6f JrR]u`12cMRȌA>x.Ş&Db6ɶ@R<'?/9Go_D㲳Tk fvU`ݹI\`7O]Ŭ\y,Q#ޤp@#oӬ/oe+y.ʬ$B9koq4ƯH[ٖEe>w`"oi*@O!1Ck,uO':xSr`2˸_y`;KÜU&TrM2"G8S;FB; bO0߭[A$۟12F{禄W~6.;g&I5;2,>Uh#}+T.7VeVyBcW%UN+:KiّoX2v6=1Qj>DO# ǎ)05] ru^1MԱ'$%_F!wa~RH H^)[ĚbHwȶs3ӂY@vQmK[UM?tۗU 2zlU=Sf"1a(wm 2q6ggޗq^>!EnRFB7i;y94iZj,D-^`*H' 0@#/p5$z5[v@ ҡtv!4V-hv9O֢*K |Q#`k]q(D}}EVaEPkG+FZ}[Cc9ns2t- e ;o [ tB ( ( (9_Ÿ#ԴVZ(">nK S¯w5YMٔՂ6X (㷑Z[q$BVx ܶsmiEyWyل;pPŻgrַ;Zv)c >iGOT6ED/eK|0mrka#Ldyk9LF@YHW~GI89?Yѵ,Og/LyU!F#x2=w+`G&;/xA$7D#;h rx`K3OޅɉFm;syD9h/f8f7qYBIk5巛OVo㰩5}7]Kd"Ovq$u#^4x):wCn੤sH9..u2%(>V%%VF8fsTմpe%FTQ31ffWqE\6NiL &a'Ao-٠ӄw),cnr dq!p:9ȿ[|T& s]prP,?uwv%xKA_jk~xwT{BZVk٤rI,N9'_*ׅ?VxozW 'np%ᴵ˫8X9t19\(B((((((((((cD)[Vbh_#+j۬e(Q@Q@Q@Q@Q@Q@Q@Q@q(D}ʸ>FZCqKc+c0(Wwxow:(º]s u/- ((((((+?;뙧Gu 5[Pba$GN߭zr^@h=SRgF >7$D*;?LovCy{jiݳOC:#o.Eb982= ouRU33Y+=OCy{h?Lov J3x:K y|8Y?8#M]9w+$rD$كd.HCv?LovCy{joI,[Kky8Kidi @EU%gOS6HK'vbPX?<-C=~?Z/KlKV) `i|< #6RK#.#AA+ƢCA< vsG!gk<ַ;Vlyf3˳lr[ . htr(u##"1??[3~^ޢ`ַ;G!gk<㵽E3~^??[(?~?Z/oQE?LovCy{kz.!gk<ַ;[Qp0?[3~^ޢڶmI4}51%O1o^+kGZ'ewմv3?/WMk_AT (((dԶI9v n*[Fqުyj&Uȋ,mzT4j\Sv9O4?MWןjO4?MWןjM;}?4ir!3CMyƨCMyƫӿoN}ƎD<5_^?<5_^4hӿo39O4?MWןjO4?MWןjM;}?4hA{U5U5N}ƏM;}9s3CMyƨCMyƫӿoN}ƎD<5_^?<5_^4hӿo39O4?MWןjO4?MWןjM;}?4hA{U5U5N}ƏM;}9s3a Miض-!-%.X"8=kz=R,H`w~5z+QE0 ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (8KL=C\ }6I&Xbe=;[O?$WyE.T;|s=y9_ (ʂ}/Eo羗y"(Avp~o羗y"7?K<]r8?7?K<GH9P]HO?$WyE.O?$Q{'+Tg{'(|s=QG* |s=y9_ (9_ <}/EwQʂ}/EUm3wͶ2X#OKHǔ=:j) ((>6^I㢏Wwx ; _[5t7@w?Կj((((((([_WS\ua]GK&Gsf xbBP-Uv?f( o 4`MW.ſ2j5_ck 8Oɪ߻3Gdݏ,?|[C&~>-Uv?f( o 4`MW.ſ2j5_ck </K;0Soiopvdy8?jo 5g%YU 8Oɪ߻3Gdݏ,?|[C&~>-Uv?f( O m}^ 2CꨮP\ݯ\}Es2t- e ;o [ tQEQEQEQEQEh_#+j۬MC+Em[u4[QR0(((((]ʶtwh6P⎶^mx ֋i\Cd+[t.D/\S.w{'.oΧ%jrlA)"@0w28 pFsM=YIA-gKziw @;ņl;2]/(M&IhX|ms(2uOMov}H`ZT6Ӄ^*G5_6]:Wm]DBA]DCZEy|z%]N\I b*az A7:nl1KBI`ʊDà?r>oVo;(aEPEPEPEPEPEP5,?tOЍs<\BJᮇD</ְ؉nhQEdQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ |mۿEk@O7@w?Կj+o+lPEPEPEPEPEPEP^_j^/ rK&hԱzWW+']Zӿ\-;5ݢ.KNsvD?{{kv.KNsvD?{{kv.KNsvD?{{kv.KNsvD?{{kv[Ŗ_o+{#f;Xad\'"Zwk=㵻Eada%;G"Zwk=㵻Eada%;G"Zwk=㵻EadqŘީ M[XuKt6Ӷ <TwV4ְtV8+ Q[$Br+l5W?/WMkQEQEQEQEQECO}OZӬmJ7Xn6+;aϱN<%CN~!R?"$ݱնCXsE DŽi?aG'D_UoTmYv38 :' "_40( DŽi?aG'D_Umy T\djJ4 LN<%CN?8 :' "niIpƥݏ@4<Wj<8u8E01?8 :' "_40}Z=܅#PHBK'4M#+ Ӣ/*N<%CNа5)8>В RHP$U${FN_40 Ӣ/*.i76hդ#f\,IOC @'*q/tOEQ DŽi?a[QN<%CN?8 :' "VP9-f,iʬM;3 Ӣ/*N<%CNޢN<%CN?8 :' "z_40 Ӣ/* N<%CN?8 :' "z_40 Ӣ/* N<%CN?8 :' "z_40 Ӣ/**GiawW2j;!7vDAWqkW t:'FKsB($((((_r^_ik7<4\X2HXNC^?; z aNC^?; z aNC^?; z aNC^?; z aNC^?; z aNC^?; z aNC^?; z aNC^?; z a|YN;;iWwRgd0^#' N'(((((((((((ow:(%{]'WA\sa]KKf ( ( ( ( ( ( |$ K]Uy׃PGi7U\܀q'eDh'%j>h8+oQX?ko4OK}ſ?q/#Q`7'%j>h8V[@M5ko4OKX .| ֯mz^Aun1(#h+9}+G~-&'%jjY|0Mlſ?q/#QDĿNZ_xDҮu;n/I\laEc9[uDĿG[@M5!V[@M5ko4OKԬDĿG[@M5z_ _F~-&E`ſ?q/#QDĿEo%uIyg%zvm{k}キ 1sqa oV؉nc@v++l5UQEQEQEQEQEr:'kĞɊb5S!9D2H;O͹saȨeơ-f1Hdf( p)prkI%ZJGx1K}N9mELm֌f=ڱFyx6omjK$ c2!;g#Vgc*$Vq,rl0OE`z0AH% X׌u<a5KC9Ms1sח1db&r]Vxr8̜xaazgy2Fќ` g]=Q~v&Ii[i(ˡmP09 ǎEI7ep|"B( AQEcuםm"LϱX.T6H 'J]]3ɺX.F]`+hI^#m"z)r*J< >wa(OY<-$Rq̾;0{Z4 a ڮrƺj(Cu>zei!@3ʒwMP@5o^²&IW`D>YPpyctP`yKsgMZiJ,-mV)2E9 Rð-#8"H]`$¦MJEPdQEQEQEQEQEQEs^<\BJᮇD</=,?tOЍk QVHQEQEQEW=_AaM;Ka i[ Z(juKeh@JU$d?6Ji1FRE;um5`eԃ*~BGZOz,z^/^XњktT1˴=62FqO~gErD_%?jY|Hwٿ>m x]Ֆnme{6kAUeK,2dPwWm#MK->iMAo%%7,=.5xI9,B6ӈÙ$`r9PAsea³p)<ȱƀ;=voiqw%ԑS#j푟Znm5恨[[.tI,ACI;-v\@co\gI\V}͵K;uYdxˁH#-ZuX#&n'#0c IJe\MF9;J+ZU b fȇto9bvryf͸Rn%~PF6n>N/oH/gy { #q1xdefX%pYPHd~b\^d$i-d+2yX`aA8'5_=@d1-E*7J,{F:P (QkpOR㎝k=a %9&KU޹y:^$ )].WyI%?LovCy{kW.-b N *aq p{jv%#3~^??[*nQ~?Z/\ Cy{h?Lov`ַ;G!gk<㵽E3~^??[(?~?Z/oQE?LovCy{kz.!gk<ַ;[Qp0?[3~^ޢ~?Z/\ Cy{h?Lov`ַ;G!gk<㵽E3~^??[(7caGs HsuBqֻ@#\?WҸk? B56"[QEY!EPEPEP\4-yMᄍ&x]-=VX–Bo!FG^A٪څ\X^Fb7ܤ`++O o=8k,Wϛ6q$.BɂsR]4iشѥ99 ?ߛ/GO o=0,2 Ihf~]!"snZ z {h:cx "j#z?p͗#ѭfxi+$2fıq{z Y-]]\[tz+xK|8vu%C=ʶlKYظnj麝c ;`⥺KجQU,~I>ÓO8b]ơg8`UMb6H`s"l1". ~Cjr# 9d ͬ';wR_JiZtr3do-qs=\j%<1(GGc6}7oy2x2;֯~ vxeV* w$uSHUm]f\KsGı&yfXU~@A:CڲI "rttɑهHuNPY eX =hyHFhXxݏ=+?ֲF [!f}|޾RH.P[xbȲβ́6n?GL*1`8">ךa/)*ބ9AzJ+7c ZV6s-Zys~x늜piv(Bvgc== sZ?u<5E!Y$eFF $^jVn<-[+.,If#,:< rr>A8$ïKaeI|yR;9?FȾH043d)bg#8 +{0C6'Rv(hityIGVjvSZ ]CB!U1.I=r9Q궭qA+3NӎAC\ɳ`٠x9BF2gY?2XlsҷOd27^&*I㎝ $%&ّo%uIr6_ 7\W_a oUas_A\ma]7Ka(((((?? GVկSn1?8}UuDGح֗+F]>lN0>Q88&{"`r"Mze&aymI (^8r灒j̚4WqC?Vx DTO1ٓd3E]H%X /S~srskWMmVMa;`Hs =jyqt ǜcc2 <閷TQ3*NX a)g.C~}+]PKOѭ7[!|)Y YUdU0xpp WZ'ԝ(6) qkmp01ܱhv8څQHaEPEPEPEPEPEPEPEPEPEPEPEP5,?tOЍs<\BJᮇD</ְ؉nhQEdQ@Q@Q@s5ӿk MjYeZ[N* 36OUQKr<7n8=:ѻwKC'_meY$WZ( n^`e^6⋋"7kydL>zogq{3Co-r׸;k68±ҟ4)* BcKMl}սNCL?t}2 k+'fbdvRGN}c^Ҭj#Cq:)v*0]-sc ֺ*^7)ǪjoZXmyMscӽn[mlr@|9<UCu V戦t/P̄U$Erwr=f#}+9%"QP; p5tkɮu^)ʝ6B 4HO\Ð (EJw( ((S7aFK?m\ NsC#)^ 4gdݏv×VQVǦ}m q(bIF 'dݏſ2jwE;!]'dݏſ2jօ|E[ Kkh-dGo*WQnd8SixZ'eZ^2?|[C&~>-Uv?f[@-2-^->-Uv?fɪ߻3Z^-/ D̿E>-Uv?fɪ߻3Z^-/ D̿E>-Uv?fɪ߻3Z^-/ D̿E>-Uv?fɪ߻3Z^-/ D̿E>-Uv?fɪ߻3Z^-/ D̿E>-Uv?fɪ߻3Z^-/ D̿Eo]z{WP[ILE0UQ'dj~z7eTB`ixZ'eZ?[@-2->h̓_A\^<[mEw&dn>:{g</ D̿G2 3/ D̿Q}B]u:n-)o^%*A144YEQLAEPEPEP/HRjp:+jFծ)KX%ح+Ds8;Tz'WaHڶ.H+Zǥ4#dia=s31]EgF/;"|2f\?%n} z.}9bJWeV0@Uq;?kv *"*2@zXˣxnY6M.q/FLWWjX;q:tQ/9iZǶtfx{{8.$D,K7$ =q]I/ ^2ch\޷um.ȣT+urarǾP~o$NlYY.##x\ݏZfutL+·~wu?$R$`n< Gnd|zC"6d9',pxN/?yt61?ۭB-R5%+jfxCeKKkH1G.Uzu[:tIͽ*SJX61NjoIE0.T{s5SRMU;'&dHFPpw }F9%u=jQXk diWp%ѷ&1/|Ac=|VrEPHQEQEx(Iq]}!\o:'6"[(B((((((((((>6^I㢏Wwx ; _[5t7@w?Կj(((((((((-Eu;2Q6/$;6ZF]'(K?u"T  T߾GF<\LW9}:˯G$Xpg8\sşHkAp)xKA_jݬ/ "ºM[lQE ( +m--5 O4K]w!.P P ot_Zv]JXaK2|ڶm8P ,(^]{xKshyKF`rqFONoEN3s5N'c2܈ۧC[N}SŰ_x}ݧ&umim"%U0}EW.|Iu/M>[d0$8M5/O+$ӭo˝dG_617\gWA-@I7*K hn~H]lM_}C0hs\KqΥN p3g5ycY0M1op>_'vQIبQHaEPEPEPEPEPEPEP5,?tOЍs<\BJᮇD</ְ؉nhQEdQ@Q@Q@s5ӿk xLoEm03:udV*YT%/9j䁀OҹѻVKR) ISsd䍬U>3}j:5obX.8XPuWwo H]` $ ǂW6W :34i%|$ǘ~IiYWzd_+#ݵ*K%Wg!pv2Gpyt92_זC2ڼ9j_=mtɺ-_[ sۛS^ H8*,j z{.KoaÝ|Tڿ`:sj7è%:068݁s]E4LN9tIwī02qW(op(QEQEx(Iq]}!\o:'6"[(B((((((((((>6^I㢏Wwx ; _[5t7@w?Կj((((((((((FZGqKc+c0(((?? GVկP42}p|[ډN?btkjiiok."1N1G^}r1beQoZݢ(.aY>qN;q[7oRKeu"E3P0HKy %~&gGmp.%2');6tU zU8im <+k&R6H->e{F;)(休Ȕsq9$q)}C%9]>KVb/#=9iߵRVYIpʒKrywlh1ӷ[[[[xuUP}sHP19mtlNa}GFK&ei dFV}n;Od⛴ybC4ybQ#ff.o&m)Üqڷ.4}2:m31qY47H#mh)]qcZIǷo!i멀'b[h&I$W1yr0?4@d?tǴlSϒ WKuo/ zw:JT2EdO֬}E9,pGI#ҋZ] |Mz[[kª;gN3_sZOwj2>Up~_~U}ꮵk lg/X4$[ozӷIBŦ`N{=KyX*K ( |Q#`k]q(D}}EVaEPEPEP1He$I ubs~Eb]j88?mާt(4{ ]K(Q1czXƭdXiv%KRY$€~c+Z ]f&"~џF%z#n=z-:y㶈+mA$4Pmhpek`7Ă%A rXÏCC u[cuZgG9,pr ❕r{Dr,YC̅NACێQQap Gd}v\[Kl/ BEݱz0rJx綾e n%7F9B /e95{#zqq ahh) 8'?".^O*1Q6 g{~6?]]4G`AGR*Ke?//mfhCCО:uz}r%Hɴ/icsdcۻ$5E:H/av`n^f;ID$YyF1ZWI6D6噥AX[_^wb'sIh\Ciw؅0pOGNhB8e HRAW1ImWB?%-"O*W}C9A@sHbt yᇆ˕u2X S]{QPXQEQEQEQEQEQEQEQExEqa +@#\?WҸk? B56"[QEY!EPEPEP\4-y7^K{heԴ92A# + Э/&A%B57A%B? ' hzg/(c,3]x4Џ h&UP̟ap 68?CE`/O?V?_H +Ox2W)4`ʭ$9K<2Z b$AS ' j8ə/hH~_c訬A%Bo!>.Sp#gϱgfRxkAu TAw  [D|_MR Q[$Bľ{i.e3jL%F~ppJq8VIk (((((((((((Wwxow:(º]s u/- (((((((((=!V?"sşHk(?|IcAycCp5+0Uӫ#J2 zO4?MWןj{:A$o`6?Pbi8"yj&Tyj&UbiϿ>G"fsh~ ?h~ ?t?ؚw4biȃ'k5G'k5]&>ؚw4r g= Q WCϿ>G&>9yj&Tyj&UbiϿ>G"fsh~ ?h~ ?t?ؚw4biȃ'k5G'k5]&>ؚw4r g= Q WCϿ>G&>9yj&Tyj&UbiϿ>G"fsh~ ?a:Ů2kLV7I+c4 ܨ̈'ctN}ƏM;}j ; ((((ј.92۵Oj]GQnf .zuޡ?? GVղe"v}O{W9/R]n[* xfɠ鲕S+|ӹ3=)!C6vW+Y\ҘIYd(ݽ=G%1Kw1we9`֕ihP˫\]1RFӂ{*&,-TgV9ݱYC ]Eoq?_ٷ?gvn= c4+u)t5l/dbhJN<`0m~AkiڕH]cƠoe,}w6v iifIB{I-on`q+4$2 y kyOpחƵS]Y@cq+2]T(䓒sryMJ Wͱ%s0BaUOZ4koyw?m${~)o4K[-yLVPl2<6IK MqK)ne Hwb˞d%Ԯ^zVᥤqq\2p wcŽK`qZ 64 B# F hvW%'fk?Xďmks342‡-$*FؓG ͸f pN7i)-[6jI̙'̒Y%v㎼qQ1ªݣn,eKV>0pI /-D&mdk$RB1#MʬORǚ-KO#w(ޯ*EY,వKku+g$I$I$rjzNХ{jQE!Q@Q@Q@Q@Q@Q@׏?WҸk? B5xEqa +@#Zb%EUQEQEQEx@SNk{ƿšwC5&Tf,QcXl09F8ZQTr>S{cKks3V[8[Čg'h]ݎaKkPi23C]>ȡx# ))CuWr&7 ]PWz ۧ\}QلiVxbU!lH`ۀ*#1|M+U՞Vj KQwvpXORO?tXz-&kKV?/ mIE+f?CRL!=BUk( bGFvK˒>`F3cBvl"IeE9QiZcCY)z!E1a:Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕ?'k(Q;\_ZQÕcizrJ=ÈL1X۷\WFdx"I.+urO*j̱EU(((((m]nj& ڥѭg:ݼ񴹈E'x5!R?"x"Xq6EbeWruyV;88V,L|PpJ~H=e(؋c-U dx=qIVjqBksl|ОY8j>`0vg= [Fcwzfe!m9;vBvGVmp~a yǮj״Mi:c@֌ ri(kK4.fPЅ69r3]28E;J|8`p3ia|8x?M߆jX Օư9U\HTO=t**+ZE#Aa(.M” # /~-xIdH4ci ptcV."uY$Ivx񎻨66N&K^;weHL?C#$qOW5O-ƥir[`ܱ&efo<@|Š(0(( Q[$BI.+urO+XlD,QEdQ@Q@Q@Q@Q@Ue:#VkKKq.aonX (qzUtbW]i XQSmRڵޥ=\iwkhxc<1ŻY\׮D!H9#x3+(ml[m ]\(ese#yFVoaNi+ ("ҥ%٭#d<0O=ㅳ5 !Ys+C*YBvwm)XnbmokcG.ëuQבZռRtɮgW`~@.7a*}z-~lZZH ŗ%\q~;}g-ݙǓ)׾qך˟wHMko6\,L'n ```el 5e<7RV9`F=яCNqqkYc!n!kU +m$?b`|k^Kd+iiQXG<3VhSC 5y;VYYr }G,sKVu:Z*us{X]kq4*#lHϠU~Vv);QH((((((((((((kǟ+X\5y"] x?_aТ* ( ( ( ( ( ( ( ּ>uoe fB+,n7 ;l@%> |Umkv-5FCnAɕ1ǯgak:y$^uM $5x±tZQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE|JnO|mۿE}?WR٫WA@Q@Q@Q@Q@Q@Q@qWCԿt+Jn̷YqWk\PXۺl8G?V?_G [D|_MXu9 WA%ydtխYjZi`/O?V?_RV{WaE:1y0c6kgY$,1E2Z{'+|QiztB%OڌX)&4`2Q >KHwf'Hx)⸇B; 2IB1}t?Q >]V) V q*4aA(VǟdwB,M<;BیIkI}oWjMtE{Ʃ俱|Bl#f@FF6'f<>I& ɣ͓ 8ɮ]'}^?5[-cd`H?OݱbMvW`LTOC\r G)e܅,A2I'5kϾ+5G&O"T[G8 _h!\-eQ.1֦|s= >?Qdf'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Ef'HO?$V&O"TkϾ+5Eg3W{/ gq*9Y f`2Pw:t SSsYEtP? Bf"HuO5eQ@$: ?!?L?$WAEs&k(T3\_]CЙ/HSsYEtP? Bf"i: :(d0!))ulW?g%YU((((((((((>6^I㢏Wwx ; _[5t7@w?Կj(((((((f?(zmt%WUÎF"`0:d&EswhdgbLt/;\XTk5$D[g>[ن-o2֢عV2SÚlMDQy2o9|0I2xw}RĻl0$IqZtQv/tW7IpTq(8jdѴ%d2fTv*)WcZEaʻgC;;Y"2H8ݘ]N=A9(xMXdhdDUIUeCmLJ4cKw V9eb )?r=V.Õv2dH$RgghRk B:Ɓt<"dICLA-Uv?fɪ߻3G#duW!5_cho 4r0G_Er>-Uv?fɪ߻3G#duW!5_cho 4r0G_Er>-Uv?fɪ߻3G#duW!5_cho 4r0G_Er>-Uv?fɪ߻3G#duW!5_cho 4r0G_Er>-Uv?fɪ߻3G#duW!5_cho 4r0G_Er>-Uv?fɪ߻3G#dIo%uIp]z{WP[ILE0UQ'dj~z7eTB\U-ݓQEB ( ( ( ( (9 ѷu\m|CԾ-yZU߲MouuӾq:~?Z/c-#E`ַ;G!gk<#7Cy{h?Lov7Cy{h?Lov7Cy{h?Lov7Cy{h?Lov7Cy{h?Lov7Cy{h?Lov7Cy{h?LovLqGys4QƮc'pI2q*yU"n49hò犥~?Z/f]O{pZ F8xJjxsCK#t{{-.ク|~?Z/@hPSeU.@18W3~^??[ +3~^??[~?Z/~?Z/~?Z/~?Z/~?Z/~?Z/~?Z/<\BJᮇD</ hQ>&c>u2D9G{D</ְ؉nQֵFX4:kyoE\ $Ga?^-KJ`i[&M^-/ D̿[TKſ?s/"h9kb9rKſ?s/"~<[}cgݵiOen~矻EsF?_EKſ?s/"sF?_EKſ?s/"sF?_EKſ?s/"sG7>{xGhl丑c%,U6gֺkn2@q$ğ MZ^t!ZEL(oWCKy{+ w"Ơne_\4- >Bvl"IeEeFG$ ?'N?$VsFG$ ?'N?$VsFG$ ˃UV75"Mg4o;1?%\sG;TdOН/H}GsEkG;TdOН/H}GsEkG;TdOН/H}GsEkG;TdOН/HGW:uΝy]Rw 2,JpyOZЮ>_*ׅbqQEdQ@Q@Q@Q@Q@ |mۿEk@O7@w?Կj+o+lPEPEPEPEPEPEPEp>\ 5,p%䞕?zo\":w=#޹;@Xc;~?ӿ\,tW1?zo:z+GNsvDt?{{h =#޹;G":w=?ӿ\;~OEs?zocDt?{{hGNsv\":w=#޹;@Xc;~?ӿ\,tW1?zo:z+GNsvDt?{{h =#޹;G":w=?ӿ\;~OEs?zocDt?{{hGNsv\=m?[#h_nupynhpN kٮ]2AIQPeQEQE%WUXqWR^Zb|ݔLwjlPXۺIy[2Fqz*Q%rrj:ƷGO߲EhFayX?69{gUS C?mۻj띹+9._O㴻VLIo @g+%0rAz%yI$Dqn-U,$wA'b\.qsoI$QK",23@=RZkew f]$e@9=-[JrN{H:p]_[f쏴y @>qjڬ1޽[d%wo"RjH״w5ki<U5z9U-%ISpGA)NU,'◆PkPevJMv((cu*wrr`AY6j$H?9w#9w ٧<#tEgB۸7͑Hm.%Z5Ԗ:Nv2Fd`}V9:ZMn&70[[+Pl ܨ:o?nװ&/Jy{8ϵ_HdцUi[NOfֲ6F3}(,~{z#.} "Mߐ ]~PG]UF`5 +azWi/Gg[[Z)Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@׏?WҸk? B5xEqa +@#Zb%@m+bvSY.fXԷhp 3x57'D_UL֣ܠ\Tטc·^,pinK7r3ZYuې s]'D_UxKU*ʺ96wgm5ΧlY[}&Kty16+}KZGs~o;1n7Y*x Ӣ/*N<%CNoHoƹhskuPٺ'*C@ #W<֚}ZDÐ 3]GeiF]"F88_40 Ӣ/*1A ѣl:x4y9=ż=OF"y&UcVDeHi | w DŽi?aQO"Lj ]]VKXS1ׁ{hM.pL6$ں :Oآ* k ?)l5=_AaM;Ka\:>34vV2 ^b$$`6g(Xcq wlAcw$ۄ9v r+֞=Ě~oa[)a/;1!E,xQiyq$ nm\<ݱ%<Cc;GoMk_Csky7srG (.Sí2׈4>5Ĉe]$BI?tWG5Iq6pGnks~^9wLE&ӿ]Dx/^h]sMɧ4%Lj/\KIj ﵲ:8Mk4&i/~9n]r\FH*)vXi!k iq츂@]!vdbBg&#2 ^uuӄè~ʒ1.A pEzVtcog+ػOvbM}|cy?48_VRMLasI]%0E6ZO>fh†\5SW \|T& s]qP,?u8n((((((ow:(%{]'WA\sa]KKf ( ( ( ( ( (9o?Rq.4ViIHci HѢڠ&xPK ay_K-Qde1]Ldp)gmŬrZYC /~r7Һ071[z{iw eou;󰸺0(E*FeԎWyY-%er(p7[:Ͷ]s2P0!)nvFįox%g- ^c.3 2P+Bq[V!y|1>ua5vyTSk!'{W''5n}#Mtj [g<2e=엣c{(eQ0Gc{R5/u7H-Go;\[GY 1$(ÉP:d &qv-#x* w@)]ܢKEr-A-k=>;ƍdX avrO,O&~&T5FgkK.w~Sb{ѻ=/GE`Y^^e6TqycL} 5~{&{۸""(H'M&OͤډD/hVY\"balHWݣl+O~OEh#`xr}AؑOtvW%u m&GW p iLţ'q†4,@`{h16guÝ*y<'Q\Z-=#Cvv]c*p ߒG]hp (Q@Q@Q@*ԼZG\O ) ѰQ䎵]>ynOaxRVVA𖹩_yk66)IYB_1`0O\ޝ%Z]!EP3=_V?6aͨZJ0G ) gPG%WUXwzՔMռ0+4 qa?ޟ7+6+ \IEQJK$$x'rq]sx$z_؉G1-ʗ?>8M߻[tRv춢fI@is<;ʆ#p 10ϼ3;%u[;ipL \xʏL[x$YbA(˓hV].r q9ZUy6;}WqDgd, L8Nr `V^7vqk(9EpA*Z$*(QEQEQEQEQEQEQEQEQEQEQEQExEqa +@#\?WҸk? B56"[z w6Ҷ+R.FVD*;QR0(((((OI`եI]O'$]j #b($+xa-"IRHPo!x =_AaM;Ka[ Э/&A%Bޢj`/O?V?_[Qp0 ' h_+h/(?Km4/O\ A%B? ' kz. [D|_MKm5E Э/&A%Bޢ?V?_X6zVH,-lk;hXԷp2B39W/ G\reQEEPEPEPEPEPkG+->D6}>5ͳe 9=?{+ ܴV63y(DFۓx$i.S[Ծy&QtMrKB0۾fq]QtzL.e$l=z)Wj:FDjzuhۑ.`YBP=46ݲ|-Y8Dܠn䏔,$hqa6ь 'y#4;Yn.c4褟&yVɜq=NsWKybegPcߌ~sEZRۻf7O;snv-rQ&MH,a$XaILӲòuqJ?SÚͬ6|B7 7Vl >VV6֜(]q<5:jE$Lh͟b]`,v )'q&fH} mij[jkv\A*\5Ĝ>0Ϗ7mN/p1h|qAE@#qR6PjOad5х|Ӂ~3tjXu}LX[4Y[tB;x`rMa=(]Ms "iLnjG݆ x 8l{i^,/WRKFe. o8 w2 `?KzK([ +؀3#_nq+NS}>K~Lo0q9u S-q%n; }c~{ A.-Vs:OBt/aI[X0Vg#0 qjQ-?bYtذ:7n#y 4҅smg3ʳNۢ`y}j[hwvPY\t֖`8R0? 㮴 SN:H#H0+ss2JIn.7wIe#B (Ivv:.ۛK2yX_pF3׮)dд]*ա;B Y84M.6Ow%"y9vpr21Zz ktza3uT.[88N_\ڇIm}P,fB2rWci=U(aEPEPEPEPEP.%Z];J`w]%"B(f?(zmmV,PXۺuwi]^˫]\C0Pȱz3?\3`Sڇu[\y ܶ3Uk=bMB ZgaG~#XB`hS`|FW-hz/DFmնuywiҢF%'$iZ+Es׾,v̀#jGwo]+RIieV!6G E(y?SӬm6mCyh&H#r$2;0[Pb @I?.M;Zژ~21 0b '<(͎,ޯZGK"3=[}c';N31BOK\eӬgk;y.E3ĥ2=7M1eirp88qpG hO~^L٣TYt`8y=|=LӼ7lp(/ v#尪H $ xkj5p0>S3oHgʮ Zji,3QbS3)i q;r{ݎ̰JYP@H#\H kƯfd6M0[(3ʒ).}gyIi3- 2ecI/khf!EPEPEPEPEPEPEPEPEPEPEP5,?tOЍs<\BJᮇD</ְ؉ne_P4_Jج}KJ`i[ܨQEHŠ((((( ?'SV'u?Vo?w_)K+XlD({ƿšw] s5ӿQEsQ@Q@Q@Q@Q@Q@qP,?uv%Bo5pR((((((ow:(%{]'WA\sa]KKf ( ( ( ( ( (8=VESح Ӎ>1}/j՗m 9V^8犞Pqሦg 3 g(ïl<#d2,Fˏ"ahv(zNK4/R$6˫ *0$7t#\i YOk0hfe_d`8xᴹ0B鼸`;l1*TsjՏVwu+ȶwˉvn:s_Z.|kͥ9e#m,B1c9 UnVVoiFBNwb-M;H 0xw*sibfa^hQ)ԯ4գm<ʭ:ȱePъp>'P D0o0Bӂi/ g'g S~]'$ bTE,/<k[Mwr 2yk+`Qщs׽1|-'oL4ofm=GD>mt>R+gyI@j,q12*K閫I|C E@w۵'P}j0n}/S༗X\$$J$„Up0~dQ9%ot=Z{ǀز}|WD""Oy)[mݷ۳v9ۜT6r[]2R7T yDϾTBi5 v"w3}|ݸwnyot [Y."EV_ICJLJW2ؠlӇ<^7kZʬx+Xg3[KlukXDi)p@E1a-9> 0<d1 q!cS9g4Nj4b[lDsD0]YQ9 о1ZMrJ p5)#U4UAmq:(KćaTT|皳mXa)'ԯghtrmA%:W;|r3E24dRF'-Nq9<QEQEQE@ֿan뤮oN.FIH(J`wSHu Yh\s9#*FֲAO$ʸ@q3an]I+&VV2O["R|eg۸&w.s֬\\"z䷷cRw0w4-⍧%KIf-eѼPd`62qӜZzKIb@*@y"NGy8?GÂDjܱmˈCG \nԚllr\]8XpEê,W`=h_sGry|G`q*HUFǪ],͔vv5rV-O^FM.5*$TܹUlx|m]ôar6B}K)'涢jɋ'4I.$Q fr*1u۔ q#8oikp3!c$c0#۸= qm€hip yO0dc;zzΞ'^ȭ4ah畑y3 l1IMIpEfc5 ߅4=, PuGǗRz=^,n-Uv?fFCſ2j&(;+OIuF2I1 I$Xc,F .2Wr0GQ\dݏſ2jÙ}`MW?|[C&~9\dݏſ2jÙ}qP,?u/dݏ4 궺ԯ類CIp`c5QFMTbފ($(((((ow:(%{]'WA\sa]KKf ( ( ( ( ( (&[:X[)7p7s16t[Y-&- Qv;# H]WhK4y⺶eH<#q>m,ehЅ -؅.si 4N :gF,KzI ;[9$9aLKan_crш4BsRp8sVhz/5]f;My&$[r2n-@x+TZe4(esz褹LJ0y dԴt;fүGl-Tmݴ.wg?]-.1F)҅l ֻJBuH~C-|W^]m ?̼P1ܿ(AΡ5Z_[vFIhO%r9qJfHc 4X:wd֍ED'ygh&V] b9ېjM?wj5ji16/6n ˅bxN1ZBfF pO$:{R~!cfw4jAcC1c0 ޳6]cLK;ˉb_,+*VPeVێw*H'g4pU 'z%+ p~˦i=%,r$s`20rV_j>!2]YKHQerO^9\0")f8Zl}d<Tc]Fp:U7y/]Kikb/#I"IPacW¶;t-:V}z7K8VG!,#rU}tkx|o3;Ե=WKm5 s`F,1摸wuRTӭ"$LGdL$޻-hft̐ќ ?5->:ώu}+K2}b Y2t ă =j'VMoIF5di[.wIQx]}ilEn]>ơ Ȑܡ8NI +9+Nj7[wTJ5Kؖɛ0FssTf4kiYn#.>XR0p 0${Lhi\$qwc2MS4UV_q̿a.L.Ip\V>"]·#5dGu]k1չ#ЊRǠ5->䪐8[_]ܴXE ;Z$5w8IN{ S7u֯j#i@ߨc ؓ 2g' 'nO[k:- 属$e:9MU|68L^7[ZpF]sHCwA.r*o}W첛'oG$S ?x }@18<K*}XH}{xA lI9o"l,%'w$ e@9[E'21~:OjlF#?y'j mH(QEŠ((((((((((((kǟ+X\5y"] x?_aТ* ( ( ( ( ( ( ( ( ( ( ( ( ,--F5W6pw"+IIT^:;]I7XrWxl-K4MSD,!S~IrKq^O|fIՎ·%ͽ}ܛ@O R1th}_6&mf*ȫ#Jcy`$T (((((((((Wwxow:(º]s u/- ((((((?K>v!6/nA , ƞ6C>iW#n3 ;7 .E-om,Q?oaN4[ѐ[S$cgׇ<1Vppa2̙˷19Ǔ޺*+}3Rkŷ42H%8yLqjl ɂ WUuIY 4QeUq yE%(m,s~'Цզ43SHڌĊB?0G@y H.JI5^O:O42y5s| Zm+z1!B*]Kdg&< pm0N֕#`v8AX [Ri ?Gy.fӥwul#7V-F[IdYͼ.9GcJ>BӴ$:WJp_j9q$in>q93Z3$ebPcYPG-7UI-&RlmÍ۱QM_0Zlpڏ bkY 1MXB>T`A)WwH˕y1,00G$gkyAhr^gKm="H #PJNy^-[Kp Wo;o8ۛ,ȃ~NyvQՌKmGG6Am(#kV0%q\ymEeC9#Ȼ_(|6Tu:)[[]nVR>$?s6l##*hX?2Bpc.KJ q[+J:Mſ˷ K #?nQ0VĊDL&B##M~y#85h=JZtݯ#pv[:%y@7`t)hF2T̟_C9M67F[y9Bvl"ҼM\:uwqK<$ȲU*c68==h(Q@Q@Q@Q@Q@ |mۿEk@O7@w?Կj+o+lPEPEPEPEPEPEP ˽oG𾑦\x?Xi졷̩d@JտM$WMEnj&(տM$WMEsտM$QЛ/H3Л/HsV7[_]5gsV7[_GBn"j( Bn"[oYEtP9[oYEۚ 騠.s?ۚ ?5oueQ@\5ouenj&+nj&(տM$WMEsտM$QЛ/H3Л/HsV7[_]5gsV7[_GBn"j( Bn"[oYEtP9[oYEۚ 騠.s?ۚ ?5oueQ@\5ouenj&+nj&(տM$WMEsSQFZtZ[ķ2B3H JH]5P (J`w[4WgN:f*FӬmo/-d`ZGn{&F;ϵc? تZ;1xCn A}sxJ\ص.&8&(aXPJ`91+[x{OmDޯy4 0=1Ѭ!0dc1e' zqҹ9uҬwڄKVlC1f)mɟr1c'p4]Vis"m7qŴnPʧc{f/_ےM_ikq,DkWy T$vU4Y٣m/Y]ٳ ^lm'WlG0) /iK{X`XK5Y.YOéA@V<i' 7w/gEc˶MՋ'y ,Ic 2SDK3"I5of @aa؆x7g~(6ɰ d:(\I2O4K&a mc 8૚BTԵ3M0;%ȓVBKIe7xXpܗ;( (((((ow:(%{]'WA\sa]KKf ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (9 ѷu dU=_V?6aͨZJ0G ) gX5u4l?-~~/g;s}^v>ke9sT;*<ܞU%2 *" "G=j3"iw׎:|<'{N=*>V;N9|jvH% X=d] P$+h=1ѤK/t96J[[%YI >psK]sM2w a]q{ZƟC-AbYě;9QrV򩇆.m促8Ei$y M؜i[G_CfT#i//V4ʀ1z*+f[$g.9#@8T{;3\u-,U*`F.~ɑ֞Itu..V-"HF q'$c1cPm4,̗SEiI )9>C:,Ua2pp{y.mD7C|w?ܬ_p0N27<_p'%xR&ww=ޢQeƗjVi=u = ڭ"Juu*r2"al1dXܾqozڌ8S2dc<>)GB\nś\Yȃil@(_guA 8٧LBOg9ž5Fo-_D-ܬF2 O7}ߘml㊣?;;X.(cDsrLwjiM'}י>kqOi줄U u3Z+u QDFݰ8ʶ38sLW[i%ߐ2_qzMF_ƶfyx߻{nsޞ(ӽ?\\. RHmȗV/8A4homYDMЌH?@2H xxIafrW# #q&[FݚgA 7E71'<-5%K5+miN35%e@g1_2 }2Zo O*I5IRVa{n;s[6bpیa=)+"J fY)Q@Q@Q@Q@Q@Q@Q@Q@׏?WҸk? B5xEqa +@#Zb%EUQEQEQEQEQEQEQEQEx@SNkkǐCuᄷ&]KOI#C+A;P٢ ' h_+h/sSz_+h/ Э/&7iiw}mt#9!s FO$ ' h_+h/?V?_G [D|_M 7A%B? ' hz\6ž[9 661VBx<ßZ_+h/ Э/&V [D|_MKm4ޮ>_*ׅ/A%BҴEžake YػGm ƥ끒ϰc(Ԁ(((((Wwxow:(º]s u/- ((((((((((((((((((((((((((c*FӬu{j]&BFsl8=_V?6FiG`GTh _cYͿofcwgڶ?wfv7}qjʋJW1$i4Ao*g H88p{vz#!&+p0#bisIf8bEnIg#6#&)#V =rpOӚv_cю4WNHpHe*;z UmSXOm-^+r8zWᷲ-8'ʹ!@Oj9-4۝I&[d ȭ,JA}phw{nZw,biAB !U\ʜ= zeݜ:n@*cAj7y ݴ0\Z#u5NQ\Jèִ/El?hmmܑ,e**tw=I5&Љʪ0Cfs3}QAEPEPEPEPEPEPEPEPEPEPEPEPEP5,?tOЍs<\BJᮇD</ְ؉nhQEdQ@Q@Q@Q@Q@Q@Q@Q@s5ӿk բ+((((((+Jak>_*ׅyEVaEPEPEPEPEPkG+e岼Z 9D LadcINГSsn`x.4/F&I .\Mooɮml0呷099>?ZQQ,bhVPFˀ2xG:[I4ΥIԪq?YRZӾmZHY =h%:mp_Z+7[~+mہWz`-UcukIVdP @-$X_im,ҩ]EVv+ 0ВS{'EW:)8٨>3/p;s]A(;֫i5Id,/`pӡreVnVMY!ES2 ( ( ( ( ( ( ( ( ( ( ( (9Ȯ?!apCkW t:'FKsB($((((((((+ C\4-Q\EPEPEPEPEPEP\|T& s]qP,?u\7;(3 ( ( ( ( (>@%{]'>6^I㢀>o+lW?WR٫((((((((((((((((((((((((((f?(zmk$L$9?d%WUXkrkWQ^Yb| kzqXhLu n/'h^s2Z_/81wgkf$ o/,!2@c%:0zdzHzٙmnX i-|dd7o6Isirf(&푓~qo m/5 h%iI/Z?%Y3mmr6acI{۫h!T ;e[.<_HchF!Hw Ǡ%tw:M6RQ+Fo&5$;إu}{:wmm6u<ƞhLgQ0?$4+}KSG fۣEH*\t `-u}FM~k!襄 ?NJZdӴmI9$w29|zb{=u~F|^M[ d-MԠ5orrp >A!*v`0{wl-Uv?f( o 4`MW.ſ2j5_ck 8Oɪ߻3Gdݏ,?|[C&~>-Uv?f( o 5&V\:ci. lqF:݉ފ,p)QEQEQEQEQE+6MI\ny Yibd1x^HOS3Pݍ)Iٻ|GoJ#Kk::eA<kNk`ͼYC%G*P 9N08i]*pV5{q[mP۝8Ta*$ۃ3W*/L_,K!bEeFv1j΅sM56{'% |dv*7u\!}UUپ  YY7.v(2nc ;8F*܂E{#JnP1V%P!@=*S!aEP (((((((((((((W t:'FȮ?!apCkXlD4(B+gFYZt2]J۪#1+t ?5oebj9k9\cV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltcV[6_G&C~ltG:Z}y>kW3hj ( ( ( ( ( ( (>@%{]'>6^I㢀>o+lW?WR٫((((((((((((((((((((((((((f?(zmKej|p^#%~f@N9E=_V?6hC70tc?ua?-1<[f}rAkv? A| Ey!fid o<k7~>'̌fʦıI%IeHb8#jZq].x^E{H&FLYp r16K+ w*u8*Ų ݷ30?[ 3}u]cLi ]JlNQ԰>#{a@Y)?wg]_iQ >hG5/,?P3~CA=‹'dTr(@`9R~4X.L4G{ƨL]owjı  >Syh6‰87gBPC@7miQ >]:=+*\Fwߦ .9m}JdP!AOB?0X.iQ >oQHf&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[P&g#TaϮ;5[PXQC>c>u ]Dyw(uD</=,?tOЍk QVHW;qojt1[4Jy18\8]sz w6Ҕ܇ OoYEj'*5hZ^rnqt:;j㹃 B~"H5?ev\/Ŕ8eM ]*XoM.7WT3[Hi4]ϳ+AП/H OoYEY_ g0,O+nW$x9, \$Sk- ht t̊"$ ? O$S!\aV C)p_d9N*I|OO-Ĩ$VmMȪp(FKFqwWkAП/H OoYEYM95Ğ_,2`ci<0-0B2sp<|sE{9eoH5?eAП/HIw~}.{v f)l=^mvؼ7(T*Ax V)oH5?eAП/H/=2;h2N.*ZλN܀ `1NuYBuud:n$Eн{2$ ? O$V@%{]'>6^I㢀>o+lW?WR٫((((((((((((((((((((((((((f?(zmMi&TQ`BG\@Ny 5UB%di&?Y{9&[dGە`!+Ԓ k? K2c ;R5ҭ[H՝WBΒwCiIY!jahG $9'ixuW J⮗D</ְ؉nhQEdsz w6ҺJ/(/mL#7"oB s~%V&=Ɔz2N,a+]?>T35M<\\<ӆVbl]%Uf9隍<]maGN<7tA8[N͠Y=ɼ9D[z}s] (f+ QJs$EI*D1қ g+RxZN{$H HsvƊ5Ej0 i 1)6xYPO*ſ->Ύ6B*q\*QyEBWؐ7{6c%r%_wMt}F+y{<a{me*AZPՁTwoH,/,[Ǚn`s Y@3 1<h]X쌋 &{?1r q}wؼ_eHDXC.r'1] 5ubc9GfT> iNc-ot"O#UM2 +&%HBXNN~cԞrbsVBZmRiivRٿ'-zR^x;MYnIB eC\.1l}v+KZi0t |fa"r9Fxd&.OdkȰWnAX2Okzvso[mQFɝ2xX2hة] ]GH)$'9iVWSA:[xnq5qEqr.O9FHǷ =*(I%an핬=:&8C(9ǹ9&E2oVrA x PWw[ESQEQEQEQEQEQE+rsI\7/ܾy<]imǦ],WmdG w#7۞&z.-پN(8  9nn_Qѹ}GX^3V5iSM]ۙBO:ga]R}Ph{`Pr﵆}:E>V%8[n_Qѹ}GXzR?e8mX3f3ۏz|>*o,e(#}pF.V&~u.ሳx&2 rV!KktҮKʳˆ\8s8Sr΍?:nt׺|SHed :lV2[r΍?:EVܾrά@/ܾ4Pm?:7/[r΍?:EVܾrά@/ܾ4Pm?:7/[r΍?:Er6 dqWMkW t:'FKsB($+Կh ѶW7@m*eQXQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@%_"4( c7QEQEQEQEQEQEQE|JnO|mۿE}?WR٫WA@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@PXۺPfL<PXۺuԷwXn2Nx[ӲFe^xsJ.kwx0 0d`sZofeUwVB,@ ~Q֥n9Zڛ+t{o7v6߿# OXGPbMr6W:i4Z\HV<2'BIg8yD_lR+D󉏖+ч\|͜繫>Ո6d!c6iBMeXe\yxwLܴ2@ߴ67ϽgEY.Qox=Ȇ  Nk Ėˤ3[ɗlāmtty&L%(Z;KO&(lJqpG=):!f đ60N`y/f]FYڼ gcJݺԍ^ tPy2atWSN/KHZݠ+ĝ 2sŝ?Mi/( 23dn<0]kZGsw6m.[1v27ޡsD.}?uU>bx8 |5V$ixFe{c#_#ݍn83K'٤F$$o<5ېϽsxE -Fݠwd u-n5 {WHٱrc|^JOEprQmYZ2FF,qw,IN ګ20dF @qڹ[^+Z[t1pO ll3%Jn-@YМxj_as-,FpY##cd)8EKegqōIAױ'#▼ı#ˌ{qr MӕXPpF3)YV۶NYI?Y]پnceUNE)XU#pǧkEc":Aqu}n쏴1ZKܝB3(S6؜۷6vjoE%Q@Q@Q@Q@׏?WҸk? B5xEqa +@#Zb%EU k"FH&kr cav/=mQ@7 SWTG SWT] ?wy?wysE  ;  ;빢?uOt?uOuE???hpOA]Sw]OA]Sw]w4Q`8o@.@.(7 SWTG SWT]X)+)+, ?wy?wysE  ;  ;빢?uOt?uOuE???hpOA]Sw]OA]Sw]w4Q`9}‡CiBdY[f`@KHI<*uQL(((((((>6^I㢏Wwx ; _[5t7@w?Կj(((((((((((((((((((((((((((J`wZqIe+:qd%WU<;j6}WRHQHbbEU_[ e֦D&?esu#J }+k6H]c eu=Nme .!X(l&n%Z,MnmI cqZC-&u!vm${cұχZЬ6^I㢏Wwx ; _[5t7@w?Կj(((((((((((((((((((((((((((J`wN˫t!Td'qSc*Fc-i] @_ m] t5Q} 3gv13Io|4α].qWrqNw#MiL*J '!q<'Dfy4'%nz魦hZŽv'4﹇?5*Xt& }ϙVF?tv: ͭi5ṵ{#nS[{g[1E$ֱGg U 8 ;/xNm>y7[k#ۦLJqAOIoPm$6L;ZK0JQKwqxڂ)>TBoOV',2Vju( Ӈ^0?#(]Ugr+)ogb}2X4/^__?*ie<|''Gr$7{ ɿyV²ymO=Qѐ=W#(7/xs n,4Iې9)Ѽo8±Ȭ@+0qmhf# VʖfyS7.9j4! $(qRw}i5PdyHQNi'3jut7fB\(/Gҵ屴r%^0Sӌ w;w5FX@օmF|76\GutO9^30J*89 Տmmtm`71&Hg {֝agqZAndD%PAF}zN,Z}xaT/ߒ4] ҺԹETQEQEQEQEQEQEQEQEQEQEs^<\BJᮇD</=,?tOЍk QVHQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE+ӕ8lo.IeLӚ7Җ31dR,bn:oJ~Шo!Up.' xO6iV?},AڪH^603ﱛ7t^ g M K#;tA+hGJugL^\|+"gNj&O^\D<]ccbv=>2d@Lk {y޾D:D,Akk(!&Hq!|E[kny6]ZRG^qސxL*Ud{y2v;s6g "I+͸mٳ8pG3^<6kl5[,/.uS 4>b>݉< kx _ǧ]K6l4{cBa0@ZkV7']1Hvܫ#NH'k2wٮ1:@d۵IC96bTcAa "Q'l}3wQ)ɢQ fcap 208TtwiLeXok{*6DyI+-ڪąf;pH8-{Q$pRr+e{fF  ,X8\jzT/_a$l>\غMY\YfOYy䁑zb|] Xp]5;Bbz($!]]9RPxfSPm$4lPRF^Ea̻C ( (9Ȯ?!apCkW t:'FKsB($(((((((((((((((((((((((ow:(%{]'WA\sa]KKf ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (9 ѷu%ēYO gLPXۺuƶWvvتp1F8[ӲFQ\f&rnq cLίW96a-vm# w`D^f#o+}&.|ausi4Dj! )3f@bI\IHYs'̄cw pa]L2ӤM4XsaHd'Cc]>Z3 <\I|ӱm9l&12nb%9bI'П5ue²3 *=JWڮ|;u,W׏ő&>rsߦ#ҭ( 0}HTri\EsGE <_\\]#o0{SՏZ<5d-p8eyqVg$AEA9E8Q7s\ZV|eE_ū_XJ'pml׶sҪ/4tTx$}Χl`V~<86qt&%A*N:t查TԣQ{lh=lsq*zf[7_ê>֚۬+baSo|-gA"_O5̒GfYR&9Oz:q\Ğ<QE&/~ۧ8Y/."[[[O! cct'<8+G c𭍝٤R8\x돮hռ)k;A"96xY;M]nl$[K; V#:X'c${ڼ1Go.I(܊w#܍;_4VݷGa(D *QNpQ`A;o\r2I`n~ezrY\\ʓ9F1ݪUB`QӌW>im-ıKXKeN=j+ĚQͧ 1}*J|{~vTW9jw۫ai-H;8fc@y㞎'p) (y"] x?_{ǟ+X\5- ((((((((((((((((((((((((Wwxow:(º]s u/- ((((((((((((((((((((((((((c*FF:P (P;ps'G!vmt>gK-SL]O *@M|H+g#ߩ'4Yw|.UEyGI%-Y*ks`cIky <>nFB#F&ԮkzhIC# r=_sRmA`-ܒT<Tk.s!whB˾h8L)5;zk6zlCqUfDr@N#S\!zl&ymT#E8cy#jO @m/P̻HCIn oAO_qsɭPˉbDѰV$CG*xUj>5 Y@xgn g6a+dP[L]GlddLbW!A8J._%SI .態'3ԕ x$NKHEC),)9>xK1E%V]A,13m` :c.+Q΅p꤂?E{%r' #ILH7H`4[6Y[E?8+v#s'g,]ʱ`ʞbmnn\+7RQnHI,lQd ؈?y˹.5bv yg Fڸ``1EZvWWPZMup! ' OTfF|^Dț,f@ރ<ӧz]lJp2x*R$ 1mdH#3[{rPmN3MF=_qsI:+^jD9STGqxX#cJnk8-m\\nH f-XO56CRn5W(LFҞ ;Wq_ ŵxxL|eg f.d/2h9"R8EUBF@6TFGJ*mHDf MY՞E]QHg5,?tOЍs<\BJᮇD</ְ؉nhQEdQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ |mۿEk@O7@w?Կj+o+lPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP3=_V?6u9 WA%ydtPXۺPfL2ݖeaՔi~²y&߽یfk=KMO8Q&9Mr߃qIRy"[s$`s]rٱs w*qi vꥂ7q jN6b8Lj 2,9'$I'O<܉P20oo̠qSN7W2%3kXn%ѩiDE*C2G>x9'P`'|6ݟ3f0GgڭNh#oX d ܁*i({[yF7˷:0Pi]ұ=똶X.-\8i?3Oob,E#2 \cQ2iG|"-19c;xUɬei!0ugcr=vR_f(^m&$ c P'm6sK<CBN$fx=~Vtm8ۉ3w@46縉 RZ2[dbw?# O` L:jiki'#_6P(?z|i6d/s3r#BF(ZB)I؋#ŀqf%U1]AtfQRXQEQEQEQEQEQEQEQEQEQExEqa +@#\?WҸk? B56"[QEY!EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPkG+Yg- i_8]ጒ' 硋YҖx-<~R\d0T@ Ec;yD2 z|Ñ;oWב\1vb>Csoz5=ֹrWdhEij,M0c9k9kX}_iҞXo3MXI,vȐ8[vXI]>/sZn'KNv;]܃y{%EKpbFc#}gCkBj]Lr_K 2*ʛ.̸Wq֋vg%^ yγ@lq 0Dśxi;1kvE s&P44¤BCCbxl.$ñ;?)Wt>+?6n?=wD=Xg:ѶWwR9B#X ;Fcԑ(t^y}3Km?P%$/;HcCҵgycP2"LfNAps>?iK} Ϫr+ۜӡR0B]7!a,Wrgө&it$AŜ nsW\ntKYV)EEȮs4;ZW4hRk:\-˩YJ :s9>k +o9+ p$#] *dwEPXӨp;s*M>SEd•u{h(((y"] x?_{ǟ+X\5- ((((((((((((((((((((((((Wwxow:(º]s u/- ((((((((((((((((((((((((((c*F֞luIm>+F (u yX eX?d-$ҹ=`~{l/A&Ϳ7coV%5s4kdȞh e!^ӷ_Ix[8 @ޙnx'?s|BO"1_|<,ߌ&Dp}8 GпJBEj2!s$Oz,LO,DRB]0:gzwk^-߅}LO@02 oiȎDo{̖67kf8 ~K1''96'L}?¶Io>ͼC j,C.KyQ3xZk[MB-KN݇{+{s|B |HPe,r$Y3ͤ"|&ZOio<1cw)nQ!J@_a\xZi-5,nURSey%8$qc肺 7?)1{(>gקI`Z_4eUFO0cq5!Z)w[>r$"3q WI!F.g"f~+y"utH>OI'Nzc޳%cdQ!FFpl<`G2(Q!OūXʟâ[6nBYǜU>;뻛"J+$[nRQWSҗWDRM1={-C0am-S) #?s|BqOs__ ( E]v݌E,d$jw?(?qFܽ t%%-GI=][Ҭk9I~bFP0 =ޥ?s|B=OEA!FOEA!FOEA!F`Eqa +@#\׍ُ'?0Ҹ? B56"[QEY!EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPkG+P?| ȶ\k+$2$J\</`0HʹCVcL. Ds $1[k<8뎼g-uV`G򂁵O'8CZEƯ,?gڪtBq~[O֒r[Y\aU|UwrK |8>$_)JizbC,"FKO=O,Jktf צ53̼\'ɍs]Woc24'$s`D=8.g^o o [w'9[_E1ܼ6i'XRԄ6qor玚]G[<")&"X$d 9Vs5қg|r9?)^6,Ot&kt+*ĕ8R1~|K^IyP` p7e=2vEecKr΍?:g՝CBV6;r歏[\MifG9vU8=QlH_r΍?:/iP[YM"[ȋ 풢Lрlg2xY GovES9ѭ}GFe?bX<_<| TyK[HA2WiA}GFdAhm]̲"bS%L݊/me[W)q,A\G*~ys"m?:7/1NjP Tl"49c)Tl H+=wv-~ 䓱}GF,KsmmqnΡmr 3ֲ5Ŧ]m$pZʯ'K#HE5/ܾB&/\>wiQfʌ?2pGaHd>*Xw27t[pw\ܾrΛ_jfMWh9 :HVܾrά@/ܾ4Pm?:7/[r΍?:EVܾrά@/ܾ4Pm?:7/؃ᓃ/]6 x?_{ǟ+X\5- ((((((((((((((((((((((((Wwxow:(º]s u/- ((((((((((((((((((((((((((c*FF"`0:d&*FӬ.ŕ|KvS] 2vLVRݖ6+>EGWT-)3nHa1qZ^#\m%фHb.T%fĂJtԭ I][*i6i͒?;re${桺y)A䘮$ |jb5na.0KnF G nu>Eyuy n31L*Ly 91UqC}Oikd/4">͙ȅāFeqGSkUuvk|g[Afi6|7g*֘n$[cx]d"?(brhvx7kh-`8m[~WGduҷ@V@qFJHM4_<>N?^C8?ei YI.%7®7gF:U ynnZCM{: z3BOS`I6}RV]k5XryB'$$sUw{W41ڨrVC{ҳ-l-'[>fw9{_IY8,ggF7L?xܓI(wՖJ(@QEQEQEQEQEQEs^<\BJᮇD</=,?tOЍk QVHQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE+ 8sެVׅܐ*?$\Iw}FKBiAYFEh}^E<95a<_{ MBϐ n$3$6^I㢏Wwx ; _[5t7@w?Կj(((((((((((((((((((((((((((*G]֮dXmŕL/@e!Qde(v ]=#.0(r1ؒ==?s|B3ۖ4K+m$ې?n] t)-bA\L<,A bQׯ7?(?Txv2+e{vqϑtN/]$vil)mٍ>>tQOD.u ~r}?tqƂPuSɏ=w 1X ][T}8<woF4u?s|B`e=w/۟nSw/ܾnQ!Mܾr΀!F7r΍?:v?:7/۟nSw/ܾnQ!Mܾr΀!F7r΍?:v?:7/۟nSw/ܾnQ!Mܾr΀!F7r΍?:v?:7/۟nSw/ܾnQ!Mܾr΀!F7r΍?:n|0A9]. x?_g *OЍk QVHQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE+UA;MnQ|xܻP6 r:9R;⣗ZmY]=YchxɗcZxgK !X]U We rYm#"ƙ$B9ngKJ{g:qUͪљz5KSڥM2tڊ̻XHAMo{ =`h?&7ISrogoonN̤ĖI-Z8b nnӕsҒkgљxJ $Y'0rZ@G*ԗ6$aQ4y_,I# iё1y_~^O*]r FV@zϭAi3uo=Wo UmFvӖ FHO+$ы+[D)d9fi=K8,2] .3F@,E9.ys*ɸ L ;@{&tvX񄰽2dU@ `\541os$ǀw$*a&$܄$RNzd2H)ʴ$=qѽ-.QRXQEQEQEQEQEQEQEQEQEQEQEQExEqa +@#\?WҸk? B56"[QEY!EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPkG+@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!Vn>@tCI[o?J  ?%o+]a+7G.߈0䕿诐?vC$~!VnWwx?[ڵ/KSv 8P?././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/_static/pyparsingClassDiagram_3.0.9.jpg0000644000000000000000000106234114412577542020576 0ustar00JFIFCompressed by jpeg-recompress      +""+2*(*2<66caɿ!09@G9n {duol`eSFM2tr`s@||pհ2f"rw kd.SjZ;3c{? 5`"On#?S0հ2FS`5:,$M&?I]:u_&l4 UDQh2RkLjYj{88?IXNkw$lVhdU"^u`fߐ{S_un΀=ɀ Djzé1I\ߡ@հ3m-egwr`sT1khRw[- [#":oCcm/;L*5*lݫX {A~rb1@հ3Tcj)wT" eJ&?HZ1v lFT !:'P3tͿ+StmKWd?IXZ]f`"NX`eCNz{ёR5KKw? l\F׭bw_Gv⊵hm߬Y+5 =n勽s=Iyk׽{0$r`sTl^~-Xsyv*.1*h [#9Oյ+ Rdܘ9@U%kNJ/~ *">mho`f^؝2!'4"{? l\voZVƞlhhJdm| [#( ȻƥZEt&?IZ5&2ZgLV5i } [#"6P-giTUDsFfc=jL**ՖA_k5ywHVr`sMN܄"`"E]_4Vʥ'k.5j~f:e{? <ڌЗЊ~ [#*Oysx'f09@sknB*"+*Pp=}-] m\0tհ3ܘ9@U%kuovʒX} smB:8jL)qXmEAkO; =,_ =PߣVȈ81ߞߡt:톕1uܘ9@U%u᥵=ӞlEZE_4ޣ .پ[#){.Xz730[u/+EvkkyZf`d2C|j_}-Gz'ĎL6Y#>7ԸثXr {ٚ#VK㛑cV{'}8c牬mHL*^W gtI}xKT,܀V6O3Eͯ? l^ǹ½''EZG1Hitiەn[OAiX\~MkojK5lӮaé%c_·̙1inɂ>@9@Sblո* keBk-{UXNǶܘ9}zg6Ll2!ODl+Zy)=!kk/a3 gjI~X?.VWY-Z:-sMҀix-=b*" }KGIiߊߑհ2Xމ߈y(TJ3`&) +-)ZԖUDWttj۫ [h [#+kWfW JS4=ɃR+`sMեbҳQVMӌֲT&r z>vq`dDNm:톅Č& NgG`sO+7%[sK.?S9b1[;ޓOsF_JZ/-Zeհ2봫/30[u:} {zkkݏ@ M9@'XْZ9ĕ*k-cnGU[޺[V \mIYDD{5IC݀56.cI'+j^=˕\*}X)r3 ?v "`#|I@InKljFy7}#Z.Z[$n+򉭃SsCwǯOjm_ܭݳkO3P=vPs{7אܴZkZ$B"~V qz˫?& N5aң'!6EH':84e 7~Z3*"!qeFU1}6?k1N{~r܌w'm_ +ׯ}4e"߻}*")?Φoj>ڜLr`0]*4o8 ʯq @svrްk8>X@հ2 y-zbr^h٫0jrkLn›1.BC^PhO[c|eG1+D>oc`d &se!Y7=i& Nc[c&6|)9@s:ۦl&ի|kZ~Hհ2&/*C O\M٠`sTŖ"ɧ_wv"`#B#ަ_>`:5lҮ ^=ɃSJSl*<?I؊nn;0U?bmհ2?w_׍R7{ =ɃSμ-S3Wc69@U*] J6zj0gV€2 ["Vѩen{Wq_69xN5ຯf9@U?=k٥**"9<߄oR෽t,᫇ݠo&#gjjx? TUD}5I}M5ls]^[\gb^{lJ P9@sq=s*"(4).Fe\,d{ͬqjCdd$ܘ#VZ>*:ڛ Xґ"rVTLfmK+r'qm+}vc׮FWEkކB{J/wJkr7و)-~dojHjW,`##l*""b4u5$5үZw@հ2#}r`d+0sTV }X"`"[&)XЯY+=kp4Zum/$ܘcșpGB`}+bs6*罘Y; V=o:{p$cdlLW[*"&~N ~Xb5l1zJ֙7&` sQR,Uޙ*"9zC׌w`f֛).r` sT[2"`#G}kE%FۜX+}31Lc\*|v0x?֌@ܘbg9@U9ݬ4."*"!S(kkq!MiuWBl=ɀ|Rc߸k-ݚ{= [# KTP QBBvAVve5}:vRj5?Udc:7v 9@#dGxWW,eBLϢ"_"]?QA~Y1ӱ?H~ ݐ$2A8ys 3?1mkl`eӱ?U3Y@WXsV?6;}GLvV9@@j ݐTa's l6kv 9@0>EՉ|S?@gb 9@N7aӖX4*ӱ?qJ!5>vLHK&Z\h?k{#/Wc6M݆9@,y _v?Y-a+_qgHL*֠VZ@s*,4I|=6']5+OnHˀ=L>?ܥrC-=hYx%>*9@DzD}f!4-?`alRaamR)p<R Ŝ8s /Q,`?VZL5CNlłήX՟t&܁%0|_04?y&{cI[ۙ[+Y&Ԉ[pl *֐s׀zgϦ R՟FŤ ̀W%c&"cY]ZtG@ *DK}9@u_)f1Nލ5`õk?+c܀VB3^ ݑ[Az!ɃnF`p#| ͘ 9@~DsLݦ]6Sׂ*ZzЊr9@k|zNjP+3KX:YU0(:Ԍ+7fDUm@9@HYN3b|kÞ>K7ae|sm̀AkYQ֕8J͘"RRB'BKxwNgU![ 7s0EYޛGkMKX*? J4lA[cٶ95v[goI+?,@Ѳ:,0YU0"KT~'sV+ 5rT퀎8<?[؜+oUK$ǰXך U9`j6Yʝ8 0Y=Ip!bVle1Nk<-@Tʴ̈?nT1+YSOw ͙@+"e&R*p(05G^e?o+QkKr7+VWX a 'c}iY9@ B#0hhknFZBgCח߮؊ݐPsfiyd 7ݏSlif!!g,_chaƆΜ,TTBfcCgd`YZZhҭi{b $ ͧI?F W_j)/SCC%wDnȇ屩gU), .A;]ϥCSoY-̜tk@"?;VU Z' ŗެjbuaWsh[:YrZ,­i+vB#2?Z~"kۯb&Hηz[L>EK_.XHX{AY9@/E |[r+0U0@ ]IPf1 ݐ :0+7?@u 0WlBf1*ǹ@pkCX? Ք0VXd \@W,a\UW,`[ ՔDEe-T!))`D`*M? ݐ{@VY@dj{Cv8b( b%l+}_Eo}U9@W,`0+v@Y\r[ 1jH Y ?ە:4>W|\o=f|Bd{]r  Gm +'? ݐk`Z=UX J͐ 5[W`Z ߕ.)(v +0W,`Xb ݑ?dvg Ք b*֣z Xi¬>W|fJɳkr0+VPW,`Xc ŝ?b0ڀ(V쀮؀1,;@ZBgFՐٓR]r Ք+0ـI? +0ڀW,`؀Ud&Ԗ@N!"#@rTm?b; X[d U? +0%\ \2X]SXo!5l+_ ?c( ǐ W,`G>pWs+6`XfWl@fb װ**6뇠fkpiVlß<cdUަ_͝Vc Ŝ]n|@ U V]`v]W (, jŜY@ ߠD}sXr\/͝Ve9@iVƘ*6C{W7} k7= o0)S Z4O4vahdҵeV?*hVH81*}U-g7"o[o`5l 3f!ұ{p (H"@`>WH!?!`[d"4>51KoJ;`вĈ{gұ@CFon!gўXfBG{R;}csdVy3o"}V쁇@m6'$DP 7v>'YU{A?1Fm(Тt?DG9K˳SRu$ Ze""Xfs[( ,2+vS+6`K1Lݘ ݐeTv?\""\XUp4rٺiAs6Xdr'͜sn|\o1y@fՙŽlCosـ+pk@Iʩ  xjuP"dsـkI>W}#a?F{"r9@hdj'׃+v@WgB ?Y3>m0n+ )؁9@+pWsVIRp? *6WlA>s&:P9@XkOvp9@8Wz q[x*9@X" T-@?'<3X4 @9@XBb+s{@rN^=X ZsrS%vďҝ?V,@ d2Lۛ+KbbTsd8yz> 6e 1IZh?d 13:`ikhb)xY&|zrF6=yXMf/ypjhjMB[BOKo L+@JZ-p?:1zb4m5#dn(KBK<Ʃ'98ytM-٩{` ބKz#A9AEfޓsLHCKS-Z:FqWL/liʀ T(TAw?c4ͯ@9@ӫ誖ZW3#l<ɀc` #}"DK4%s9@e ) O > PT?n@"R)_KpYy^7[_s >##`"{^CFt*6o5*׏v'V{NlW(;8Y{$ XIՙPܳ@~>V2|ǛG[.eVBifaK[viHoRn`ibkբ,0hY2X`pyyJKЋԃ^ȭYT֮Zu`ڕL]?NFhDhyov`Z+زy5`~GFOO=OR]:ݛsRś CFZ,A2NToqՕ0MʟlȪOkHyx /9_[[R>KZ^*4q] Oat&t}Ţ*Og6b76qPjL5jM [47nԏ55"BFZEiU5,!TU%A`g*֑Z# f`?9=AֱjUZ$;aKz,ƗWm)j7/gjM"pu.M]k?H9F];#%c}FNQR=p,NӟgI bd|+:*q61`ùGkCwfVhKYڶi>#vr:zmVޱ䖼\5UhP {+f/GfhұjͫD9@k` r|(0k#VcӴyD}Ռĵ{fpQO~pْFFr*s+r>S.>nŃ\_tc=N}KĀsT-*6jTc8m5!:d T+pU /0so  Xgrs|U9@j*{@ V@\9@:`& ݐbh^nsFjXPV,rd++vC?7R‡L͜V*V,+#?&$tCf Ŝj ՔjsF#:5aW<&kw `*X7 1W,g?m^3BҞگҗen&g#vv@Y@V6d9@s#k{9`9@(Y@Vl+v@V,MeQR‡L W,`+v@ +v@ IdcedKaCn+ 5qoX[V?o>=Z~}R‡L!jr<+ ǥgC4 K 0sWl?@rr| ݐc9@s#hx:5K:`bcXYW,g?lZx3{{CG`jXPg nrsFܱS ~k=aC + +01iIV?l'ʼ̒ K 0sV,]\؀0XڕJz3Z'rl,0=#ljXP9؀XV؀\9]k9@A v ͘vsFBaC+v@+VP+ 9?m.#R‡L+ Wl@Vl+ Vl81 i5,(tcnrbvZ g[P_ K 0s 5( n9@s#q@9@W,`^c]W sF\}4}}:d=UkhI5lI׻sK6WN绾#l8<@zg9V=(9ڿ=?6l:}6]:`֮Je:jtzrٍ,Ex}:ލo/q4kF7־/l\>tv+%?l>kgkr`:`ǀs amJN-y=y{ݳoQPb T@>Y9@s#h8Ŗ>뷃R‡L9h *rK3eR=>AZϠ԰?`E;58&,?o;KhvNqg~'l9@z|Ymd5,(t輠D&Zv/&oaC+>. ,?6W94~V,mt[:9ҽ`BIi]OZţ_5,(tڅrTW,cQ``a؊3~jXPU 툮cfsF#6uq3j:R‡L39LZieXjhYS: XݹIjXPiX`la mrU߲s6?39@j yгb?6sMxfImgzpZ Vvf CchR>Rj5,(td +vJ+VPVkg?Z:$:`h V{@ גƧ[mU{aaC*WlAYx+VP+0?mnע#,;R‡L Ք"e=U brѭNb_i,x74~N:`br ͛_B\ i?m?Y 䤑?N/洮‡L2ha'A\ !'9@s"$VpIa̓ߒ9@jc+VV(y{@j;:` Ք ͘ ݐ~d0s Փ0/ʽkHdϺ0s ri#`+6`r@7chnep&X9@skHrd*vj+  0 ݐ1֭  g ݐ?>{FMY^RTDԮ4qVI}>Fݐ|9@m (Za5lwp|ϗb:cN_؎-ri36op92F۷MMh[v;8i:_+rQ<`tmFUB{@ br7>=г7B& г7p?y}$F9AX P g@@V6;v'sp "IFrgQ+5[ECfm-{eag]o@ȥmCT{MkZ)9ںv]`݂o-'6{NMw seBG+5BvTKݾǪ%hoq}8DiwN ij$MnhC qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8Bmz_~6=PwGW߻T+Ǫh qq}8we*ت\04*??*??OʳOS#7|?uj`ÏAÏAÏAÏAÏAÏA:gar#s=[:ߧ-y9n;ηquN[rwoӖ~[:ߧ-y9n;ηquN[rwoӖ~[:ߧ-y9n;ηquN[rwoӖ&1ll>Ku!l[?pUm5j;F:-^4U>*0y !dhPgHZ5$Lb`h=W8Lϩ$'B3NIzn:f9UIZ?$Mk&BHg u8DZ+O0U8,Qݥ(1mrIE|6|{nЏ?OʳC3U+R0>;C 8>;C 8>;C 8>;C 8>;C 8>;CU|مfr?,g=ʟνrqܩw*:ʟνrqܩw*:ʟνrqܩw*:ʟνrqܩw*:ʟνrqܩw*:ʟνrqܩw*:ʟνrqg~8}7|>OFXs)g>r(vg :/tڜZ=Qgmq7 j; /_ Rڝ oj~#ESJjlr#5vDPuNmӓoCdYZ+PgiJ HSSXZ%,PHW[]1jG(^vl^RvUݴ_hG|Y}5fQ9)n%eP&{2SщIese{LߚFT]fv^l:MIښ-sՆU}ܮlW Xz]uH`h딢(b ":g'js.}9:ϧ#Syxo>Mӑ㩼rMӑ㩼rMӑ㩼r<'y[zi~ЗwfլF<@91saz*̲Y`0T,yɈ8a=> _-j1Fh*,E' o:[Ӥӌ$m:L[YD+|r|/Ӏ-1.\ ܵFBѓW '(rad|X"[=zX(z6(-5,b9[mB? |t +??*6ӫ! >װ}-;^8{KCװ}-;^8{KCװ}-;^8{KCװ}-;^8jኑDd/=Ѩa༬X2rda{`̩aRcdS6ݡ 7)]Y#3uY]Y=5bODZN%mm$2Z"e bH\Q{jߤ^ ؘ60f]awz3F(9)2=ډ,"Jnn`MHWG`qIQ*FOGʯ?m/mrzU ohZOd؛%=aAj -vgRVrƧlR[xSw\@.LL`M5ynzZ kl/KXx΂.^ˎ=шT.u,TT ukCɮO* RvdD;K{< -u'òlMKY; UJ*7֝+Vц3Dm!VZڣ+h] =F`,smB? |t +??*7EWԁtU6,p>5fη&e%[v9g'5Ulu}_mY-pRsgRWAb/ c9> K2ԱYa\8v8+Rh1FZ9o+fUa9܏XP:l2TG[´$ezZ CصYw\.E;_޷S#n `YL%uM*ew*N{= ή*ͧ*EyR7nVfgT[jS* %kPm A2W2ضk +%$5.@@ݕn,XlcР)5: XjUACf\ tD-QTe:KhZpKH!õlV&<}ܪY*f0caf=m^;nWۿfٵx=m^;nW6W^est̳[*KUuTBCnWۿfٵx=m^;nWۿfٵx=m^;nWۿfٵx=m^;nWۿfٵx=m^;nWۿfT#%l~WUn:.d{1*Tzr\G\Gז% WX!bq9adwXM46zf:lMa:xܪ6,Z-޾wRZVT/8eE/ -5 bnt(j%CXl!ÑZn,cP~ h1/QքbUmB\4ڨ.V`@2 DK(f0y*KRs0N5}֟eM{^ΫܶT+ReZ-W:@s+EЌ׽mY-jR?Mze\%谅(z raH4a7Ӭu,k: ;K`328BZ:U]6 xUAp05{mo_)Vy{TV3`cߴNOߴNOߴNOߴNOߴNOߴNOߴNOqqXz! JrL}yG?mʚ"&j۫i5R{gk6V:}YPx㮑Cj뜞fMa4v|cb*v}))^`jIhJUwc;7i…(=n gl+&q9qG9jʶHKxSн.rxjՄW=(+JaPinvD55ӎqď4:b>UCi~ЗXHe0䂝n OlP-v=x,P(m{9oTc7Gr&W5=wQ`p[P^k}O {k%*YTD!%9p% ̓L</;+S({nЏ?OʳO6s@9эVl^Bx~km_K뫚@*[ΞJ 4S몍UerP#ii뮮\iO,2ȯ%.nxNY\lʫR[NKm0+&|p~z{QOӓTe}d( ]JHfL ]A<3ũ v[mC(,  ĂpҺpnL^i޵c#囷Uf8!ђcv}үV5o͚/U`ak6]b(0dbVݭYlF&y3‖jkEl۷Y/,_eV; ӂ\}ܪ.9LA@ XۉfMgO [x v.sf᫖İݫ+;-e}Hݪֈ_6WMv$vW: 6D^ɵ RX;/w+^EN wʁ[iA8FA4uzf &63iLZOt7 qP_NH͆7YZ^=ehg|VRZmf2r޶5hʫ)j-OA`` {TZN5\lY8əlk &:O3kUu{W{mo_)Vy/p꾤+z/^K2zT8&4}dUedB25U@9&cĬ\jȗVVK>cEOAh-Y #}õyG?5Uw k4l=,K~aNGzJ*QRK+=G.WsKd4A(tH*t" aVrĦ kfvnCS$z}"RR0B&eY:@3Z;uiamRuYzD:3 *5S,$9 CYv!hLx&&tKNuj=2W|X67=WMhTͮ 6nN`z:iff:\X][osp*6.^kRNضEi=ZZq |`kmi{4[(qٶ b6x4(qMiѮ Kl΃UyJWܵ2Sg9˭] WٜHn)TP}kPͽx.̕-B!bHV&1G>$m0O";_޷S{+zTYMyWm<3dE4Rk=Ӽzx+Mp6aUeZt53YCqe|ꓸre $ws!lOҸ }-Z&Wf7M ` tCIؾPHit*֫j\K2){R1+pvhy (u@M3XO:6l+ q5@5Pv/T],hHDU-\ZG\aPۮU $|UmB\m:UIx{Ix&/yb6{]#bdwf!Y׹ɥ(%f N8N˓ +klPp܆tyO'sM]0`l PK F7] T;HtqrUTMb>J>:^9lr;Uv,3qA{w=H< 5/`H(4;ѮNkg Y%жwimĹNͦ@&8ز@SYO.qga?^e.9=q; p\-s` #FAJhNBɵLgcN_hG|Ydx뵹\pp:1fuE|R["ƙmnt0,X \5@15[5*kCqi|jߌa(x-S9cmӬM!2Nl3Ս N6_pŘ\YX%ΡNc8\%{ԏ[tj?U*KeVqO { & а*nrISJ)6N5{&ej,.ٶHktN,A:XH {:4TV +"[fwoAlg59Pۃh@M'ZV{ViN5*9 m+gHHy}H3l|}ܪ.6 &۱Ty T VC|ZUQ= ̛kfkMc>avοvUaF,X|WQYWtaՕżkOy|)(g)B F@ :zu[.}=[U5"ˁ=1|^ JgbwL⢩j:V{3e\dݬ:p0;삞i*`#Jz*[XLVn_oJ@3QF`40Af3ٔpu#G e Vȡ{:dvQGvotdPVWu0j+5r =vVva lMLi[)\fiNܘZ}3n!0Hg3%|t +??*7EWԁ໹K_b#τҺOaXds`y:{UHdIZ\k>r*9A`5uLڪb8wd>YbBoM`\r$(]-BM/-iB@嫄j/V("wΧjEVZI{ԏ[tj?ة-sS}I%2Aayok6e1=aUɭv$R{&n m5fs6줭]Zu(.òCdrbh&5emեU7kFlm z1JjzQ*ulljkf|ծܚLk ~yζYy@ `] qWU7[>mAFm%Yi]‡(Q\?gOSum,Ma2@f6'Tکʹ hYdL4dRֵ% _lQT5ݚMZŒ%!s]͒GKhu\ժQG= 9 ZF'Ű^U'q~=PTl 5^yϨW6~Y8)g^k#~.|`9NNG=Cvpw3;ac4\3>UX!i2ۨo+b0GvVк ),N]%rs!AQrJU fI'ule\WDUM:L8MD !G'DLA7]eWH,9휣)b$Yl(ѲܯAs|m +G21.;2;cel:g1f[iTŢ!*%jA qfA1r>Knji ;櫆ʧoLN3MY"Bk XqԚt:qEK5SKe[=^,xr=fWޞe/xʜM(%FPLhMAdsmu4!l,뻶LLjfDRV5W4c)뀬g:5#h#YrxAX"İ6ekD[y@n xb\}ܪ.Bq+́q};f,c l0E9a]TV1mn;,u\e-5MZ5kCX Cӄ"P7QH^]ujVqp+#NVsJ4eT% *j1VwTɫj2iR l,F;ċ%Y%ª@(bQı3lⲂ%UJ)侹<ν_hG|YYmZ !C^1k>adDnPV63 1f?3Ol?29fK'k\]l{4o(͵ 'kS59@4.g%Հ_+_6[ t;m9ȭT. mAMZ tfUPڴzKXJXjmIӹ yt,܅UWQBjqdLp;x|F&ڇyvũbq$\ 7 !0îumB? |t +??*7EWԁxdjݫ_Lur=|O{CuUUTMACj Υ[WV*IcNYXɫrͥM)̼ P(WTQuj5ytLEɐY,r!pq>"I. bY;_޷SŢ*ye '+ .6L zIH#LP1)QtթBSf`<Ҟ0UK4!b9 zIH#j\L eG_a_%õ6RޮUCϊǠ8[Xg+k;Tms.J4qC0QDlMⶱ*9䲕~* ~~q(//*;_޷SE+dO!iYZd) JeW2& QE.MhJU@ݧA( Ȁ2v. W(%$e%/D3U\e䂹 r\̓_9n8%"%]9Dͨ&óoP:܎S9S[yjTݴ 6'FH҅9tysm۲A*;_޷S*ڒe|I\/46ȅrԵӃt$Ť!Y樺ˮV:jTQeOmS5N&g:\V0ǝ']zFj<>_tZN ZC& p8A #W+MS(ϘG%r>Kiت5^B.|@{,X/"Xv˥lnT9Wceز@E4́ڷr%ɲ}O_!ނa >~O;3b+h6J4tvPaLg93gyr>Kܪ+ հѪ"U.sWNhn[BzӕAU8laZ#R.n<0U#r,7Zvv$W7T N}m3U|¸g#m Rm@v3fؠdYW I"Gqc8G5lӴdl:?dALl%.mجVűtmb]ר)p,(Q8M(vcG8-cE\UU6V]GQŇv|cb˕7Zu5j>piJl QTcVgݹ1l5$`Zi@8 6'#iV[:@)ZsSo9|I8H' MT;sAzރ1~3nyٮ‹Yqc u:tydnr%o1JJ9ֵ:mpgޠq5JCS1 pK`n5W##kx]+",=X*9MYL[&zyֲeR%!mB١8M$/]W6ぱ9Jf t'C]34pռv ~WUn:GjAާ{_O:x+q[4g9&h=]QJH }U@6]&([Up||uu5 Lc\nIi*jDi\6ld{= z|dmº/}õyG?X]&U WmUz0q ..E^D0G }v)!]3ec9 Hl5+!c!v{VS0.>ncܮ2Ѹ\@gڲ^.сuO>UCi~ЗέW&aCcp$!3Qp(qġ> 8$2kV,'X@9k!2 vcPδO 6X&,n]qkY|îEjLT2H%EDAPYeA`I˚K$^2fm@Z8!&Vxƹ PF;b3ɪ*7HNX׵Tmdnģj*6kXz=OQb뫬xta1kȞъHȽ,@ë5BD|qY[ "5*ΚkǢE'b UXAXb`s^yX7Lۇ7Adܻ :Mcc,PQ7{MJ%YTʯ?m/ӰXNcbԫA^5뗜(⺚ejju闄z:2 N~WY3]Vya}RJԵ\$T1AW nⲶ!TEl ikd˘X6f9G\%&MRBDJu9Ř5,9͎~nn~;"vqv뤊Sg-MPCkEk$z}[aZvn´HD q5LRaC pukz+5&'9ir&: 3X'\ 0^ۯ#b1ҝ9 \#1B6 b1:g0Вdp(xW=vIW g9u\iu'#@kjUB18zPB}5KC&6YRӉYsSNUe(E" ˋʽGƸvL n;H4~GRIH{V-պd0˪7+:ʺFJ.{c5s-wسT@ZP~ gLgwxavl!LNq=61n/'>8j;%CZ^Z]VR@libU%YUWk=H [n0F QEcO1{mZrmFNܳUܵQgtU(Qv*9D*j $nV3{V]T2:{ R,DW>݌4ٶ}>nDݮVq$UY=[ ; A5<¶*=&75ɿ[9 9y7d Kx`tٓ61$TsbCű3A{mY^O)}ʢeS 5n_mXd7z-W^M;ǭ'өujWڜvPTF VIΙ5w-/mڬ*:g@mש YqruלkuhQ6S烋X5źVh,:imgWT ˦K]~l0B)sڲ2v6( %"q uzm)#>AZ.+ǁy5DG5l7W"‘iWY]5G7'T/v k'feU:=SW[+t'ߝA94* ߳NmM IQbvIe`ͣ]L|%I}R+$B[`nS2-?x~U,Zt |5I - -B!bHV&1G>$m0O3w*hKB p AشN{\Z}6#'OH=nK搡/\XQՃ``P֯$̱kІePU@gw[0DH!N+j ƳXiZ\u=z6vA!|c1cNUB'ecX0PhZrp,z3{mܢKfx| $Jֹ" ƭlbگs&N '@f4aw,tQZө\̊6C(š*\p{w\Q& 3ZYF)E6)E*$v݃ |)>qc;峡}+YiϠ7knO5ai^$ƙ^7꟮u"=N*[HvJP0F u˰lQ {qL=|t +??*7VR[@\+(k{- SaXp8gZl[]Ti >UWGr~t6Hz [+%]ZrsZ&m"anq]] k* at!d>CN,/[+̬!]1K lXՊk,.he20ijno,UˋNMneVGvH^.I^zΪX~# ++"g#,˺Uefl)5Y u@06ʧ-SWwE{=i ] @_ v"^֖ $ P[ nZ^3G"]r\V_hGˤ7n늊jlf}`]GP5,_ 1'B3(e)??M>:gidh\)z}^j URX N/&VnVtnr q^tvO]ֆ]a"ɬm, CTP! 5]mI+k{EU*ks+~SsM !FYSU׆aꇠ~({c ▹m;%!h^k#~6*Ks\RILX^`جŪ`is*u.LstHlրfk{c#ڵyθP]t;گ1eY@&9Vԑ *竭궛 iN-No* z{Hm%: րNzY(ҴͯZbP,wNm[hR~:,U[%zLq]cuW~㪻Uߦ81Uw:Lq]cuW~㪻Uߦ81Uw)w[5[`l暏 g@\D1̧:+V["z2[*2c\ilV(G9&4*lب±tɮ/s^eW;:Ѳef8אSٵٌ$X_QTϫEYc.;&sIbs[ /ty! (L#2.TFd*\@yG?{EEfDF5Bښp%KC6LU}Xtꢛv5e| ^CHbz(WTF;&qq[Pi?9H`&w[|9~_܈QAoɓs@{Qk $ pETlLqc9V9J:ygoהXtDklN)Ṟ-6FѾVקa/i Eo ).BwEf!}^q]g5UDQ& .!i2`+v' dH ]'Cp7A S1kthN ̲a]Kt9VGzsbyw%G­,eS$q(Rh|t 5Ïٝ (ijW 1.c{9\r_Cb` s$":. EP-&fMQ,q=,h}uWSB#!)/DEKEVl9ġ9 ozϦFlW|֞GgNs@-g7TEi 1|̏Oɂbc}7_`3;"mGl⨅onSt^}HPܳZMŅ U]j {(0֓ zuZ93A PewPfcկ]g 1js4tl\d')]`Z:pߓe]J@qF#U* hDCVWjKM[Qzg|*ؙRLED2葔gejk /fyG?.7j"&C#:Ե]%' ]-jdu\-hqa֞F:Ki]vRYBvz{ ޡŸPzv.wEtE2K#nL 2Qj;N+on eGQ6]r;Ui%ck `jښIn-X]k_Cӳ\#%Z|ð$h6E뻶[&53")+Q{UsH*[u#*Ӌ1z`Bqq.YY(5n4p-_OZQ 4p\n{Ea(N9>UCi~Зt]–qk3>@)YB0AˋKT=dxBQbksT YvVVŬPG.=u qc8>3i"b}R!%XαV,iMQwL )tVudrg|Kߤ5׍lem;j,J8t֖pjMN" ,nuetJm`8QMDU}x.A {nЏK@v'HU'=souqoUAs;C  k3s-u:UW4"i&i,)S,(:U,E5m k"]m4'O1`lmgP\R0qNniuZzSU)!Ԇ[}e9dp9Ohv+QyW0.:Vuce8ZUiѹߵYÆ`ٺ^ 392[GN[Fp,aԳ홶ՙfqPn#[\ g㴭㴭㴭㴭2@'c{?ܧYz D \(K>#R.iň֤!N( ^-߮pN'\F>>MR1X T7~՜lX3(մTdsC8_UK't}>۰xqvR?} jur~?~U1[STm*iy'*e0jef{ $yb0 YWNcWs XB( \JBSEP_X f K꾧_]s,slB] z]QrYfmWAᠬ, "G&}c\Yζ5 xXdHP25WgI@!.8&1(C2XkI W6ٕ&7xC;#>R{$z^4T + Jc_NjIc:RJVHhyo &ѼLsx)UַEi??MRڽ M%aOOy {R?tٚ~ JCQ:As)YI.tұwoBg׵+φK5fne!JY4{Ӻ}lYȠctXs!l'^˙ !ciO-+Nuɘ:"D0\L *. 1ρ͎ a=#3ydSabL3+Eϵ۷ȨV/EiXmO+^ۯ#hCzo2Pv7/OMi[:{\6%\jY.vȶMD͡VE,U%_Lwu όtzE-i:;&G[:??M>:_5miR"HMŃHu& ,SPbb5UhKIkuTUn9ݫC_yT0aIV]Qۭ$HF8Fr~+|8d7\CCin O;a^n8dgmyJbo[؏(JRT;L@d;wЩF̠ h;)N,"rjٷvӨNLvF#c0ꣵXѿOʳՈ ':F&f{Y,߆a<D`F`%*o& FFsvq~~gK %Ƶ_YOP ֲp)ZSؾ f1 G!T)$ssUq};Ι{Dy V AȿMu]qbw*hKޡ*ԡ{ *aWXM.c>*wu40nA|.6߫կ_M(#BNzНf5`Vl.يε;Yi;n fx[Od~ѹ iM%}^QeOM1B jTl)0¤.+vMX bd‖jkEl۷Y5خdAl\hl67yөy+z^0c]7-=P4 럳Gb6zv9i޹ukm,hM?Z~]vXonӸOj4c3Oʳs͌"+6@T\0@\_+` *DG `mA][X)@BVMEͥx+dtu",}ۣ=YP(ieW>tWr!s^k#~6WͱQkË8њʹd8; 4vl5;LE(Ƥ}ϽYYC*ccwv^3Hdqd5MK]vB~J+\9A'?w*hK K8̄-:@:֦]AEQzڔTC0熳L jzDtUإaqPe5LU1T9kjRf9e.A\r`:޺suZ382]'kdۖ\l9Q] !n؝nܸ_d,?n#tjkb~qVd=E [&vE5%bT4&t6;;.UJK-WTjʼ=6ucy6ܵڶrθ6-Rkի$i}7),/ lS]5=dgT@ Hq'ORS:h bo6FRe&1csl{";l4FTpNouF HrVr94㒐5>#N,CJv&g{{dDOW?g g@m UQY 36J'*G&xs!6dli[ɾ cZfҵ#Y$s'<\6) thlC].;'@pUvR?o=Ѩ^X)@ \QA2R5+Y0C܊Nƾ\F0^Llu@gF/ E'xkY@.dtNUvEabROʯ?m/k%-d'\<9+%1+EŃ\uݝ<%Sݴ|b=h, /5U# TNxbf"έWd7W _ԨS6-a-pt;CUOޭ}Q =KgfBLxW*){E sf 6sSW5;^r9Uj4>*.[,S[yjTݴ }s!bw;χ! D#食VIh'׽t&⧓ hlң9aBlPp:z,#b0Ki 1lgºU>Ol{:ze&onnͯF 4mj. 1(66,t}baFHX4S8l $MO_5{KgˌGo͋ Hx *Z߅WZ0J36 QqccIo17>MkMJh=be9c,I\k6,Rզ/4f)5XqI9rhjR4\kC^K]5h1Z9#)]BUeWb5 [5dekz[ˊ3VI]W[eV6ֳR,5 R.l%bSa|8s)Vy/p꾤+RNضEj͜\0~ڲUBmhd8+UM,"&-\;ؖGYe9;PyťN~e5QNUl}sSPg,^G^t'o^Af0mSUYVh2\'m]ylKdoDH,tqhdRJj-jT-&gl '6Vz{ҶifnQ( ZF\K0J_1{mЮYj9Xq(Xq(Xq(Xq(Xq(Xq(Xq(Xq(0a f]>\c>_O_8?'yDm& d<풖'}FcZmױSao&smH*Y?NHguj͉pk.TZ-ň'V [{bz2ԫ`K۶j"\$nQ,D跂d&Fx׫AYTT8@~q_/Un:D;[*Oko 4mB~<ڱP^-V_mf +MU-]Ze}!i}r-խ=N46 sX|p~z{QOʵQWdCrL*t6]ٱ 6͌@AV|NҮɖE2ޖ^ ɫ\5ٸ[k 3NST_nE.qaYyG(,{殩UT,XZB[YSS`@{]Rv=I,WwT΃ Y83[gx }*jecNuJ%miy0mj[u;&V1-i.D{,#ˏ_a_%v*5|ǪJ`:ŭRbjYVNn,ȿbw fUX_6^4n/5zdjt)_1;e|My Da:u3Sa E!rx8಍}rlݗcY#sM A|E 7;gUoض/1, Y^EL* 15^xC朳ON/1M u|OOpFJ۰ִ\.q-bq=kq^tW\3J\jVٳƆ!m{c~GLkZK_kYۘpRu{f[qXlBU$6T- tR;n/ꭻ6༨Y,\$¾`#gQHة0m} >έuf652j5wj[m5{&M.Q__oA,Eg0r?+o2[.\ #Xw+cOܬzi;M?rV=4ǦXw+cOܬzi;M?rV=4ǦT{k2sfO02.[|<,"J Kؒx_NK +IokY-rXkV<ī㾔дЪesIn-UCԮ@wp#7S_i>H+p~z{QOKpkL& + 6RGeY?|PbIhͺk.rriyӲs+ Յ:L@9lܝZm@BO6 4uNUکaO0 tK{D6:i(?e5CUYPj4|l5V氱 چWcвyuq2χ:톩e5e-Jƥc@.OWA9R22؟\}ܪ.-k(L g; huҷw A% 38nֱc-En2eg\ aQ<cM{nЏInjYΰ<Ɓ -hQ,4g ilf}`lLfGu-au0457MU9i؞#vSu;?XBEqKK|˗#O^P9&?Oʳ{U q]F5jцeJO*כּx*H!iQm11"#wKg&l4/i.IΫ88ѭ0l7&f UXҰUq:Dy+ukFC!Rᮨs/}õyG?Ekn-ܵW,? 9CkƮ~.KwrD15 SʢkB4 ] јk -7~u Gl[m whmvn\^ѱr>K+ ɪ"Tv&j0+˜ӨA3Z1[&};{zniS\bQF5 *+mM ={m??M'S=eolv̴6m19 0[-/(: m6kwE隈DxYTj;oݎ: /aA5#k֨l !2VPkIvN2}[޶QX3)??M>:gĥA[K,c+ rsH`BX4Nj*˴bm]qY2Mޅc%ZiiR1"ld7iکڹOW1׳H3tp+hڎ1r~K 6-rᵹCR#gdc o;fb:60p~z{QOʍo۴P#G ˧lr".wZL[p!b&=3jpRV UvcLugӭ˲*,$=_a Jmr~ڍAv E1i}UaGJS}9=w*hK[V 1gҊ .aK8̆pv5Qkz#nMLa>Ln,KVR aH3(N8emB>]'ǭa ̳ I,ӣØ~ƑY(:eSnW>O*fQ@CH@YXN5>5:4'y[Y 7lvW+c2,8Zx`Ygr.Q4WjvbgYq,c83׷)ߩ5Af lˬ$+ӈgB+1Ԗ>n$N'2hohW!Rh|t +??*7EWԁm~js-0q`,Ԛ|u eg97 mj1YhKc33^k# !BH$ǭAUeRM~ 09:qZ@9قrqR;4ģϚ_a_%=K2|l.F޿}.-(*mnyNe bEfَuR"RB]>{֯+Ei-p\'Ŏ.oh:X5*X#ltX^2 ݊; R\5Zq;ߜr}YCS.pY5&Жm=#XYgB&<_hGˤ77fqgN4'鰀!+k>Pho Ѻ&`CaXc!~yOSrh0.N{MV1z2uqoUAs;C :{)6Mǚemzm}qK@W&֑QzXqBoSPa)uQѹ<&;ư}S}=n52_([!PAOZxs&?Oʳ{U pZvoa)["+0PM[[SYsYUZYrN "JcZSMIbOt13Cq/}õyG?b6a`טAr@)]KOd7 W匚:M*(VWm*u[LA \P4YҎJ:${I8*T;~Qٵej m{ w|UU|r|B!m`p2\Շ&uH{W?} p5S MA`ck2~$tod1# #L|R1)FimyԞJFy"*Y*U JclQ1z&ns!]dm"1{mglXr0߉|w_4?~%Okkqx8}f;_޷S^Քv |h[k}|O)תt99-Z \b12F"((S8P6~e"r/]p(W\q8pfζ$Xw*hKzd ;wb'vpC,jJ'DtϲmlV#9 4觀,]U?3e}| z-qWŚr0J}ֿbRϴlH52孾ŪUHݮY3z9۝CΞLcpԌnnj.Yu=nRk\<5F[ ڵz$ 7م_ã}my`P ٯyO,Yeg=;#k7V:q^]71:g@㒻4Kd_TAR"`F/ss5- ҸWh6Tϒ-om9t]9LCє>b;_޷S}X"z/eBsVob\Tt7ſnB#f9P*uqCb^kcJ4vݳFoU9p풵u5 ='/*,ݽ݁W^vMݥƆV^o'٫p Ynhk&:,[Wh9Jn%f22)\_+` *DG `y.d!qPs*>UCi~ЗQV. >5瞅tstw\׬OS ,%j&X\(#gKMv( ֱ7G t!z,F ZeE<11K@:OױWHS`'6hhYje<*, ?}K -N>fίSIʙ\9o.jZL$5{[(}Z[JLT"˕J:UU)LeV2,5mB>r:ZD+E&u*M+(tlZ˷eW/!RՉ1Q^JoU 8k:b@P(~:g@jb5&AO4˺Y 8p 7^^QX< 3 va-7HF*i *`Q|[<-}b> A\1c%Ha ZhH9 (ouz$!cu|n,Q\[iZِEQ2fZrJsyhj[iee i;U~bE%A0JzUf"\v/d`#/ؐrh֤VSvMחV4L4&uAl=5UmB\nwԀP޷].3ʍUj*Vo_i#ڬ뙭rG[f|OrWUu4Y;]Ք(c9'& ,ZPn2#aSt[T33T+G:Le=cִhf{\"&8ęzoZddӾ޿k .V(]lzµy3dYӶ3洮Aa 0U}obq^< Sɪ$?5{m H,źG)6;tdUPY^jQ-uG/+% *.a3/QP jD\8;7|<6 1z9ݬ׍2+!WoBGl ԷMS͈/Koͦ^/wtq\Z) h,.J:UڤwsgNҹ(UEx1O&p~z{QOu~QMzv8s$(]uǵmj-]sׇV6SH:s{£5d{vZ;pˁN3Z7uF6 ۓc)jTZ \o2[_W4gpuiwmj)^*Wп}l.ɍSZuFpӅU6Mġh;i붳RЏHYokN3=mb,^7mFU$rtϫ$4[q ˏ_a_%@ ׉z ] \O>/shM3gdAA֢H07LSv 3 FD3+֬*+b6F?iqeYbl׊vqAX9T/dS|*֦Zj@6+sCvxJ.OR\v1>NiW\˶'";OL]S-YpvH4]ָXW)3 '۴d$jdGku||!C5H<OSt^}HVu[ ,XGcS GZZ+6XT^,9V<4 yN$NZv;r`dQtZV1a_ؤ(n҂ѐu.;_޷S*ڒeKUʮFkz/WZ}n+;53˻v6'V423!)Rlwv]L+'wVр]ji.l+\SՏ, huijdU ŕT80Yv&uNk[וS,0ھ)PmSZNy/mB+Ƒ:˦,j!CV`֩aѣW 3ˏ_a_%ݴcWQ$v*+w{l3'w󮼦6N\칥7?K?S'5K?\[5AN{5)ǮV5zP*Ik5Re-%Şf)5mtcaW%A]frj1 :r_hGXڟ멊E7xߠ+gt +??*7EWԁƗߪźk͸Ug*,´~{hpZ6MS 1Kg}$g(;bg]&7 X;QHfHZ_\t5kxb4 on#&;_޷S}BU|ur|,)r] a>iEsY*XYwc5v5Iy08UQ.g%:t(X'O'cƗߪźk.RX RPgƽ{ OJJ%[9uxuke.RrVS5]3lS"sfe>WYgr#fE%._2U0w J9l7]հ|UmB\^gbC^˧L#mM|NKFUXeP#{Vks[ [I 9ZUŴj-49ڃ35Ʃ\6٘ӼJl乑fc]Kmk24+˓:7ZZ4#쬌 {nЏqg961Ɓ}Bc.٣٭5uOt +??*7EWԁzeS#+VZ.mɥfXVè][ժNxayFڳEaL:Gɬ+`f\(5 c u {ښI /(KMGboWg\}<$Wy7HF:VٵQX v%56/5Rvl [6n]| )*UءCeM xXҬCU(RK_kP\mkZ%r3c0q.HSxƞhTՖy=lGdwe; >#J7"tN4UuM9QhS{Ik_*[ !Rֵ% hTy=lCfwe;1]cMO5x!#Y32-@5D=kOq9uNw*hKŁm_]_+&$O^3,'γmp8$18⫔zf#ttGTMed}x7v*d,xGp,cZgU/8=RS5 cۍT Fkl8Xg|_hGnοRh^ \WI(ezTc>o_)Vy/p꾤5ڂiWçvQQ~X犎ls۪ӌ:.]0A֙οwYg7HF:axa3|m̠~sSAr@Uqٯf JbQ8Fx1qUeSϦ'{mknI+xcvM]:vYCQy FH=5[mXrIX?ՖŞAPkdnKasnj=Ij.nD cW?} q$5.@Ljۃ]mh"R .)+Y@N VfISbE8bR Zj\:'l,*. 걫k ưvg4PelVY5ï{ RYjոD}`HI8:kqTW,ȝxL{nЏy[[_aˤ'xVLi{G홫fCV ڠf(Ly|Yê8԰KKMB6 ;6ݫ{.vw-ll8{w(5R8ASF62M"3{u=dj;_޷SoV[fC]8? Z}CK}PY`x}wtLjs46caC,1.P\r sg0sV&׍^ЛXfҁm Vdά(+[qJ S|}ܪ/ eL6n5=5ZBK˅5ړ22Hc1Jt}3/ΫZO6\ u3jJbeYl}9smԓq`BPƕLO=7|<u_RuhҠ ,gŋl8_j|4MesuMibG#hk##ͦjW gvVi b3!(.)ӴLd t֖pjdaJi:CfjmjUmB\r *ahb'AFZUV;SAV=[oTdyryfgܻ)-g{\mkdqP3b8աVGqhL@|\k<\ 崞;My.ܬQTOҭ+ l{>eOu|{?J'E3J~m:goCŮCsR-\bbٲګǸ@ʕu8NO{+5{ԏ[tj?ܪPܴ Gg4uՠ,9Uu5 iwB/֩G]W@~MNU}ƻM]YbkOЗC͞t0_5vHOהeU"%VS@LPdaE!@ ,ja)ѷ7BRw:jAoww<K=ǎ6vޝP_\5vPB|>P;4?whptzMð׽[m>n:yZ.7qC|t +??*7EWԁz/'s;J˜r:ؒB˫qb+>b;_޷S^Քvx9s] IWf'd^ , VQf*ʝz銬𒆢 ln6)NSG.Yln%@.p##xIR֘SrNPQr>KmZۉ ߿ejnRL֮/p\o5@%V"J#-]?v܆4jIwDk\)4/m]>6 A7@szds{i9QwJ۴(ޡ*ԡ{ *dGWU W^(j(.F kHӖՍYpSLaH\V우+6lgZɝ,״ٝn<&u;P r˭av^!.sڲ O!2!'Kku|'sFfǟS^֪|!S T >{o_)Vy/p꾤(^US4TBR\]4}\W濘@4Uɭ*_jl`EkcF&n1mG51VO``S&Α`k H ryYi6wVmzDr+]kv ,0~UaT/Ns5%1 9Htglzi]zI{vp%QV94rfAPD)n,5*+,Q f[s!dCQ- yUmB\nwH(t DGeP Ja'lگ\0g|hef -d>!f}s!ba3e4_:v7;{0q[N5[g}XS+6Յ:jؕ\*sr ATB>N9`&n!y{nЏ޿-R^@_i~2?Oʳ{U q]K uFŲ,3t_dUKh&v ͥ*U@X|8J RfBcnΞEa;],ܗ`5FݝXP33,c{EW-lpJh򑱐d$zjI{o +Uh ^} 1Rڸe05MB@] 2P}cH^RoLWVbUp:mYVi ߁w`@ &ٶӯ`O5,[6vdݪyz<^I3.iQ[+^ۭ TvkU W-mg>zKM=Aj9j/_a_%ŽY(' _Sژ.]Z] Yr ^])Zާ}e[3+X l+w Y٬9]m}%mk&hV*իȪ+ĕLslZg,L2%2t@akThѫrYKmB>n>ըfqcݣU7[g>wo_)Vyso_)Vy/p߫:},`SO~:|f%G^5mn) &'QmbV NaiWȶ"Q٤m&it.Pu-&ol@8q>3g}õyG?;}UoLMD64JB-؛u@qSQZBTU|X SVʋ4r&5N)[ )6Fj`ιq%O-[}ɺlcqqٲ  MѶUU w-"YUԕ,X>,H [n9nDVyQUF0q]4YiO5w*hK,UkYyt)8-P+Uq;uZسbPN8Y[`Ş45xӭıI f[l,V =9ivjghWQMmV*1 pڮ߃EUrY]VFlՙ:zx_dLcnSnhE+0VSG촚_xt :2rga6$ ]i#tHW8o3{mz;X,?.XO'4>~*OSt(+a,sHYƻҏN7V+R.^N멛1NiG36mZ&Pv̤ N.kdBQlr{{emC X* 6zx'NnF"8F4cc}õyG?m׆jX,7&ݙ/X%zK*PV945umVWGw uR Mm"x5c3vN i*bYʋh$CTa9'a,#ykKWgj @\ڿm޼J }y*u50k+e:ZBpG;bԁBK^f>UCi~ЗwfլFbVIq3ӱl-O@y#pϕA$6㯮DlC꭫Ӄ`{K cԋlU`>H;O?X{UUM:L8MFmLs8!Œ n 8=o4%=`]0Cp Agu|މ>x_aXso_)Vy/p꾤D{) ~}k{\-(+fk4Ũ]SpNIgl*P],,nk!^k#~Q v1gkS/c_d,:D_}S7V3ծEJn17ӏC7 z(" ^!.Soqu6ݮK|05m]]d!ɖi+kq nn 9,Nr>K/-@^46d1S=UɖZ*{#j*+jRWԕ+(5mᶴ )XJjb>!T;s9֝+Vц3Dm!VZڣ+hXjpK YPKqѳX#S+-wk["#ԓeMw K>(+[qJ S|_hGmVo,#`RUG֊cS jwo_)VylJT2%܅UWQBjqd⢻ZCtl)U|]d 1ܶj%mNR"Cvm! e9qI )^s/}õyG?kNp96C݂p+Vi"]5 ة3"μkfl%,h;Cyt9kiYnՈHn;ݓZz83NPͥ.Yd$%ܐǜ_a_%U,Eȋɶ56=t, cYaJzBoH>VX\rrpǙ2AB6w s+[[f2 >{^ۯ#rх{/Y瑮owo_)Vy/p꾤%tdJ~VN4lJ}ZUlx<|vR?o=ѨDU}x.Aqc81g4=SZASO2O#^xmfρNʶQ$wMpړexw*hKRZU~,-LC~$ GTuxDmVik5ɆNjΛ$`Cx"PUjp@0]  nRZU~,-LC~$c{,iS 픙+)*3A%T  o6aZ&& !,*:vbt0*+xlh9E3qWe{nЏع5D[ic8[[ г;7|<7O+zn`GNQk.ozzowow ]!xӝođDdvﵧ`xUSVbY \{IlNG5rFa)[#cWUY0/kq cM2nrbvɜ\xuG]I{wR+%wbWaQ&ܚp1]oض5΍6pwuQ <^v[Yнr~?~U1[STmAnr '&L`iܣrg'Ҏ Ai cg pt: veGf;<>ťM)̼ P(W m֝vyOIge?ا1r>KDY-O5}߱o* 61θ =p [D5z) MIɌeR3d$3s $ig8s{nЏ3\<޲1Ǚ?!~ڞwo_)Vy/p꾤 E*z+Loqczv1 `1Xg^2/g"5uCM1N9e#^k#$e y:+wa_%h*cvԮ}; u2ĔalM%TP*tL>Kc#RE\$UmS3Ujx*8dXf6;wdN@{n[l5QٮV,k\͝;+*Ց63CQNj1iCI"kDك.1D#eUb %q'{^0.sYjkF97=juR'%7mx*[9 #;>$U}X9$Iۈ`g^ۯ#v]ˆ0lq@))y"7Kl =V)T]Eml֏mT @jJUAކmMSߌ+ڮt^-5;6rV;(]vR?o=Ѩt-hOX5ʞ'ضk+*kID'Uq0 F˶j@ƋE׆cul9JbV!XځVW{V)Y?]B+J3Lr>K[`P+adl:*Zo0B9UFktVpn$d0:Wݠlaw.tF+ncb6("ƴ*R+2`Oױ/LcÁ5,)j.q PaSb{ ykBAyK0rWm 2`AUOKS58&-Ju@h#+l.ܢQc-DK v%ag^ۯ#wza?ƨD3Iώ|Yê8Ҵ-4Tkuvwpʥ[eZ>>~ί;NÆ4bf̶\SU߿iuXp)h5bq?3S}õyG?Z3ݕ~GcK۶j"\$nQ彾cƉbkں8.1p+=qBlV5cw],&+ZkvmKe*vQ9}{m;'AgOoo_)Vyq ^.Ӗ9/-AYBR y6aZc hP ƇCF`ةpx)3 kx\M ivel̮KQf6Xͷ&ϝaZ2w~Fm~kKW 6`1 ɨ e\3ԗyĽkzr]Um_v]@hs9ß`nI<۶P+bhrgnVUS`j3 Ri\ L t_$D 6XbbYڈ}3yFY(g{G(_[OS>~-kh)#S9"=V870X1՞ϼkZבXvi,Fؤx WX(Sh2Y\cr̿":N?@1sfAskE_+rVi0`R=M;4pr`]d1hruJw[E2k6zȽ1d›+Ybjlvc4ſK`mxVt36i!Gol&W־\%J9w*hKYyT.Yqf~ĪOQ _WG_J0h,cՙ{Xz3z iN~(\XֵZ.ݛ @u;6Vئ^es4ց[fsYm-nlYAY|ꭐǔ0KRY'5mB>gCϲm9?Oʳtj?mꌬ+ a]b[Ck=^.:;WMOuv2 9CTB2bk3+e}0Pnw[TN#R7\e|v\Ѝ\zECAmBUu*BGng q+Um&ua5)p#I;8_hPTZ٬ݣ,붆/){"1'X BR:J=~8ծɨn5OZZنluY*sPDBO(h'`O/2um? }ܪ/"\ׯӠEWKMF SXArԕCГMu٥, %}{m⢊Y0'?~*5ZZqKK! X UQg".ʦ'zK-Z)ihGzh,c_W*JWNȕgWR.\ܗ 6pc"0HsqfӣC073-X`ɦ%RǴ{^ۯ#7kEK7OIWU/$#rP1,~ Da/%t V:اR\ pC3$f4?5{mzAZy͝ Xk79N=7{mca "K{:P?77|?,3$hj*UpBJV X0Yk$m*rb9EI`ERLMoh:xk@Uo]qѾ-y19r$#ּRuj839j 0TV" QSU}ZsƦv̞$uC5El'fY]w_DEePHaq5I y=ARkVS U(vxĤg?5N91xϗEh}h01Ϝ?_77|? sG Xnhjرj,X0 \ mWE24RN^.CuCNjqt8w_:x<].CuCNjqt8w_:x<].CuCNjqt8w_:x<]8CsF^jv/Z<6oce657|?6݃UcK g 9s Z+)tUPTNU:\'8/rn?OʳYAY@$Cl7Š-?κtN}~cuq߻Wnj&1͝)ko_)V ":sj',j\ڟpWcqC续J<{r Ի3x3 `5 w4ن,G {޷a`ǝ߿ң7:gdR*[ga^{C(}`"qȹm7 j5n 9& :OH VtbI(k۝3w$ gZ+7`댾kov];zư|ܕt9gN?gN?gN?gNN,۸H UXbYڐ"K#[cs>v(,%S\@р¸O1eN+,ֵJlqN ]TG%F+ŬrCy"]eS]|ӄI BX猱g'_0537|t |kp2U]ŧ,[UtVL;7+(Yhu;&maZ3_q{ʽ}|wVϮ,ˬk TObF^mW-rmbhSS9Jsd𬷬5l89Oʳ5f]^YJN=| ZMB4u v.heOǞ3m=b|Pyz]b\j ʳQP\ `jg?,57|t Фfk9Gxo9^V[DjVw>黠ot[ѥI] XqkLw6Wh 0/"Thr|]b)k=?c[Z1:܊lIT8juEE,˥J T~S kkz3X?b&݉OG!]]PT+/`~nS rVui"$ռiSt!/;9ԬUDzB1cǟ!|O5h/+ƸzƥIw,Y}?Wew_/~;_OkZo*TjuwAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAjw,wAkP[JlL<9Oʳ&icv8=ճ>˙O?uZİq{6}<-qN+^Rұ; ǟ=]ܭY@ǝڿjh|M?x) D57|t ??*)VUUYZ~q ڶ^ѮK?uutkZĸ){N+~^w=),k%\#wڱ{1JOozE:Y??*[MX䙜&9ĞW/Jh(#)ty[vK\z~gB"i*ؿp }~(!xl{칐:Y??*;]"f.3ԩrfTPcR31=U"CVtVs I BX猱g#^ K>sH6L|:@7eKjko)Vx9OʳH_X+v_9GH??*)V|kyO%laˢ8vbi}FSLBLsw5UX?T lصPvtshre<_9oVǯ/I{ESFʹT:EWԁ$E.esȩ˺Վ Dg55M}'^!#լ Woqh>2sxX_Կ"9s%ƫ!~ZPtȻ>)a,WktƉTPҵT&A0FIW']\jV" '[493YPT"(Ju']'Y??*ϓlMa1Lq}㯿u@Cp{plx\ҚhvM2|sVSՌLbu+FmhכtlU)F̊W@'Ku{F{z5>;ѩ^Oj|ln&eQY[c+a G{F{z5>;ѩ^Oj|wףS6'ClL!QUcLr&x{F{z5>;ѩ^Oj|\hb 9bX{z5>;ѩ^Oj|wףSm&ʰ `8ko^}H)(g)B F@ 4OnV{niofXZ IFg56tQ!Aw5'HlVFOtsi_/XH;uzfnu{+՜jɳF߷&9&.a>I=ԣ>H??*)V|0;z|19U`{L`@,>@ģ{*w 1̡>%{—vxwh?uly*51"1CMbg$ ~ݦ[p168uvj\')8 Z"ט[rpI_+3UZ->QzKpO>OTidjK\k>'XpCWrZ^/ MN" *}_`%"ځ~28.N"8F]]w:Gz*~Aw'a\ (ѷ, Q"&6^Mai4n^`ɜ 4Pڻtw}{ v0p. @Y˶eX{IEZט9 yDR#|<Yso}S~ƛ⟍Lj{L31B;/^xg&^LR7ަlQ^9Slh7?[e[Ԧ\ݹySnBVksl$|)#ur063%Oc- ~o@6'hXS0Z{S*xu Xfx0ݢ=*ɯ(Z~LRSe<E٪,Uyo &SS̸" 8<3c=w)͠Q E;kW')݊¤h$׻NF#?|ĵZmA)X.,Cm{o]5};Anp m&,(Dļ?s{kvlUhW@xɦ#u~ȃyz V+7e:ȆL0!6l;Q VA-}.Mȥ x,fUbD׈f㓯˩/nJPEvUj0r2| ]!xӝ-Y<`mNj=nTK&Jl)_ɬ"xqU-*/ko^}HTYoB X+w:-u>DnPj_O5vwTWWWZY (aY7^-(lRʽkI aN{[׬.+(,VWZQK+B"LS#IG)Vx9Oʳ[:cqږږ7ОI4rood& 1O?mE/X vhyyɿEI X=il8ǞlgппппzJ¼sK/M+S8c93(ѣό>NXJe1zG)hij=rZ:N k:U+/8zG)sz9N 8 8~Jܕc!nT 8‰?1UcugХdnnsS5N;ѪqSjw5?T㹩O8jFLXբ&p"F+k[ H4y.jw5?T㹩O8jF+jJG8>3bCO8jFsS5N;ѪqSjw5?T㹩O8jFsS5N;Ѫp xb {o=u_R 6-$mash6Fz`Dj0emV'1}Ze"|Yf1^YY{Iu]NuY »&PMK8tZs'\ocאc9q^TK*pPXdRAփ7mQ5hHd(:nHsd"8 > IG)Vx9Oʳ wЍF=u+1T9R5\L繞AY:#_7A؛h6ST)9VsG0l?D=e$8k`c<6<\kYlP {2cjLsazU LPjRK7{ 1.`@ZQM;{`;fVED4{oV@RBA意N|ۥ LU'7sPqw>淮1vZ:!~9lzJxO`%f9qc?#f83cq͜jI^Of3K>^?%?ۚhRfI`KX (sO57z/p꾤(۲WD~,G9gg_j.-mwTiD osdEXF5(-$.,U+&-8JZbؑhةOJS8lgJyƛ#d -ss20[SX(JP2Veya 2*mt 6`r^ˬ\;e`یR#|<YѭOTFA Xc s=SX<,MzחO@$ɼzaXǬo6/r?cW>WN6:hձc1ʑ 1怙&Wc/mvL孰g lڒ?jxd-Ȑ0R2ZV#ٌ*kNp1B\vٯzmxڪlϔsa)f58g2R(O9vqݻu#`YIe ~̭Mj_4mIuR'g"!ضuSFag lv}Xg畱O>Su7kO֊DO57z/p꾤!UTh!Xo?H"V\:MS&漚DAZjIbSSRL,Ⱥk`yTSNf`W azEc8R X@rdBB,bgCGwb֩z#j'@*.K 1 zII+TUƲC s@~{G|USs2qc7Z @@QZcҚ>KU>l9W~;mK=_gu7 @Z϶UI0,q895iw+oxn8±챎nz>|PV?Z( w??|Oߑê8GMZMeo*MIIFl=uUj{كKO&[N [m3X; ]k3eȡCPڥOV=OA~U_#gQRpؼ+ `6+S5N"zFt0͢I)ZP0 F1g gcϰ8wZnW8qX.Umw]ʼnE;o)&0ڈxqG|, @WJ:b`QJi2Z}q%Nb<~M}>r:]~a_`XE2,ڋQ]KJo&}[u2Ā8`QOc(2LVYҭSK̪V:3 !cGi&fIgko>}HONdaB INRVC5aT5eg,cu׵]@31ؚrRR:Բ$ٔU|MֻǃuuJ*FdrzM!ڳRS jB I7j.tj ,VkL=W՗n[BTeB4PT;pc3hȕWx3m%Lɱ %G8A՚34Y½|<YscKrY]y29fL{q >55Qs>=pÉq[M9(06>x+WV-kR]@clUf[Xl%=y,ln ٍ`B#GcaVqαIlŖlL"9Kbd3튽.y&YKX He? eBF cŊ,U~bNj_ckڍT/ew}B ☰^t,^qfמ~ZjŻ-a`\q{V;މJkgJrae FQKҁ4jEyPG*'8g>Buٝ(gʳζ|3\>=S淙B`OX.ǑJ](N3[ǟ+ե&a85QۇL ?|Oߓ7*/*uV}e%>O??*)V{jMZKagpu[0 UB!0#JQs)g1Ϝw)$ a3p|g1fmnN W5L+.y.rqD<ˋ]gF2B^͍>UGxL߰&KKذ"K$6䱍!ϯQR2_#d~gICz-$xulRiE,a7IZiܼg+zEkuhv݀ИdKm?"֞NfK;izw{<UmVMvs_ӵ2+7ܻefy 6<{ e]_뮭{,^S^%YC<Ǽϊ$l}r -[K[/ ͊f2C?;hx<~:-o7oMlp)<%rmtҟF6:6i6Z,׭ORu<60Evמ=v%j8+paǪ<ĕZ<i8aF>/rO9 8B#@[=~873/)Vx9OʳޏllFqHVْ5Ŏl}UUĪB(#qtN_2#=B4)a4,8rlb R)g"Dgc݆#Կ7Y7E<;<\zn̷@r%m+}YaK{ƴVX BgXʶ*m?*ުldY>%g%1#WصXs?kgOs9۝Nyе1{ OIrsu 8S,c1بpSk οsg՞~Fea)S"8s% y{=qAXO57[VIi꧌Q Kicv5zppU\gt͍%ڋXl=?PfTOs]_̰FCϣֈڲx~N1ҍz\swш+OP+(?%ф bp^MEMdDlg \ڱ1u,r `zI`;K3-٭kͩ)sbƖ\7V,gSMm µ6sgB7_{g_TIJԽ;UufUۖj^;i$ ßdsJe1ͽ֕}1S8/n6j,eGqgq8k:^cxν^:lM|cwܿmEȨ;ȷ\{֩_6!w^yyrg|=iz%-WvFNIzs;c5ſSO!ϰf{L @+b#ylK0Q̈X^lׇϳkMAYGhNl8 6ڼZx͇*jk(1sJ\\ࠇ9y8\<9D\69uѱǘiGǯA8qc8>3?]s&[ɿg0X WP0 6 f4[O˴O*Y&AqSzwxj{269yeXqsg*Yӱ,YܩypmENGvI71ar+v `T [- ECwaEļ938U^~Ț@rZsd(J8eOkoM`sʞ9U~6a(J9(Osa!_bB%Tmsݭ>y 'OJ^^x'?B~ʌr`c{'-cs=R@16}|c ^ly8XJ92Gac9 K9.l7TXu|dz`j  &9_*Ξjmc8%+n_e=X?D;f/UV?㺋gڦʦyUcϞ??*)V{hݕ m,$) |y󓡨 g}{y= 3*ckrz\`V3y˦|쟿xtȠt,,*ФD)$ #NjP5bQTqY-5TbGX$xF>jGb4!BGqcjG>iPStב&|𭦂յ&$R?eWʭ׸ 8dW -ʶi8C;>ҽ*cBi}1!c/xZװ, }ʹSgK=AFЗ\|"-ڻE=Lu5E:9q;` vuT"Qcф<ضJRy[O;Vv=ZIc3T_m_T[z gbB*@Y=o (~hn}{$ )5nMT;aj5jK7Nb;fXtL?~OoYUR%k9|PՑ &6e nTuEda)'maxkm+9MK vQcyE[v k*o[u |tޡU_g_pp,~|É$]BsZ=Rڦq~C *pt䊠rx9jgCճm}y8gi \+Z7U _cMvA3ώUS{UU[9@x+\FuU1gqV䝂Y;3Ji+t#s6{OATҫ 8`z. 𲇮9gqVS`7SM:`@b4Uٖek&[&e6P?u uؕبsEgZ ,|bC֭mZ{pL$Ɵb]f:͝^1_g׍*V!ͳS9juҐT_]PWCMCF1dɍl,)UvN`&6[^ϰt՛X=yB8cc;L^)oZW֡NRAxqc. Ԛau ia ~MGT~r2"x\=WEnXI(nu9_J3$2FcAugWbgDlrvHն27\{H:u& xlems)3(hVkGeo0V(W^dK(̽u׎eEMDVUWR'Z3Yct)T@ 4,UP 1s[lԾz;ޭv,u[bfݯtf?p*+75ՖG:nƞI'dYsϲzo1!YV<+ہ%J,jZ lxqC d-mGVJ0#@b+]`Pvc^5?_w9}wX7>:tLH p<-/˫+طw.m ["O ct[ȹuZQt_ ٸ FYÌXmW)2Tj6럱?,ccc_<Ę9jj*UP 1ٴU.@ijByNv:y'^fѕYtG?#,AGNkze9v ,j+^T`8Hr< &¹4ED!@vm"#+AY4[fITҙ9GjN*Ő(Keo]$K3Mz=x/j1gJHnOR̳nKun*%g'2FX3׽g-SSs,V$ _#|<Ymn*V,`QCjrHuUKWHA\<<)E@T\_R@& #9]حv3Gy#91s3)+bףBF ~zyB@C ,Fe,P b69Lyw?ugLJwLk/XSul$MgH' 9y{ .BUeVZ`[e`+mUʶ*g8CvZ ѽԎ=ϫk,Zyd,y[`a7n="bhEڸ?U-qSȢ@r2@ ک$Ax/wRY&D\f7I%٪%yHB&4JuS'cur(/y|t _?|, &B8S$oo3Pt X5zq(1cǃbvLxCjh낊ceRP:ȀG2aأB`FYJV>]1vNjK%Φ*:W"dzգߏ;cZMՍX,K8+ZH Zcc<y4/16x7W\c˿wT2:lwYNK%Xqsgՙ\̧_S0XYz+y0^¶q ])m~hcILŵeUR4,矡zm"MW\,Za>%ԖL C??*)Vyvfh՚#`w*Y͋#q7|;Y$զgeYqa1r Q`Ddy45!dhęo˜(H#9ob!#8##stNE)ZgA%Qp@"i1yYG10<c'? y𶬭HX|LW9ygXģg>3\c;]17q:|yY K,y&aYY[XfOyt _?|՞5Z]RӰeiqm >[-AXŃcLjr1'6UDxC<Ŋl,yJSVb,Zl0f; lcEJ5(1p~B>OfjO͏+k=_>zĭۮn9Xa.5kfΣ5֓糭.k>U3/MK\xl+.SV* q6ǩ{!81sXqY". % Q6謍)W(D_FrBS;l ;l ;l ;a-Ũ*Q:&A>6x?Cg6x?Cg6x?Cg6x?Cg6x^dm/O0 ܼ}u.-v}n r$$zP/hA21֯j}xF5YI?#e/]Kاk}]z[WT12 \L8QN3B~m ZstZueq4ûk [Y'8^ұ5XV}b+:ZT0 Ejε*\hZ%h^q^vozDdaOZ/?]w>9B6Vſbsl!ōe:?7[ I؀q,|0+*ּvQeg03+9 1"E8s% W[dϴCߩwjqvʞT!zH2Brɔhp+"h9(|p7* Y#Z6j}.UB0Aˋ3wSKaWȖi(0/VFyq 0X 0 HEI 'uV O+Xx_R.׶'4$3/ZeVS6_4d9"Y\,XEH2y?mZyJqsz ]jjW״9a.ҭ\/TY@ bFs)οqmۂuKlϟ0[:ɑ)q5 9' koogԜwoId" g^2<1ق@ r23l*)CXlW QX$㹌7֣zcg0S:jU90Fs3eŻ.0 ^ `8fs қ;v 9Y-3U9/혪ffŽzk)OzdX: ,D .C:rg֫9<2~b6C?kxFɖM`Y/U$9`9 |noAs{<6u둧 s̵TCjpq4೴F"<ǘbSW?6bb9)FɼF D 1-hn a< C t0ѣFٿ=aA%'>S[Ufvx}^Rgı83|eDU}*x.AMz^6RfNҝ;p&љf^Srgd>6uw ]k՚~btҩ& ͅ1YֲgpmٺL;/Wɦ g+ە*y/x\lC/)bRzMYV9s8th-.3jSl\>`7oe}8W*x۩,nkMINR X%$zVxv_rԸ1D.U0uiZhaYiG6mI]v;$sIlr1!v/,3GZG*O7{rif&YfCK9Jb)Z0J16<V}Iv|V{T++_Ii|X s,ؙfk2m6h0ߛ 7qm#FC_ܬkx cg*6T~x*wXdzz$[ڧ̨z֛8v= \ܐ5pf@qVkQ+f U`wSw {zzuv?;O?˭]ʻ$^`ak6]b(0d,Wm>4-uVȝ+FZyǼܞmݦC-C e#EŊ5d[M)ݐ#OA} 7aDEsV{{TG'LmgOW5xVs:c/ffp5faSw1؟ͥ]PEs׮)#i6FpfKsuC]mzJʩ+.!uőK^f#tF6+9 竊ɨ]bɨh%iiӡ+ckzljJJXc8= J2IfK DY:"$׵ڔYM*T6%ׇd‹p_[i$H6f$(S`ڼF0*"zX^UsoYT"$f)*86sld8u~͖m:^FG>W:j&9tG1O^W#0G<7Vf,qlٺ\,$ƻu1!d,r kllЗGQAt~ík5K\62nmvM| g}H uJ*45z BX9[k!!19[yabQU\Yh0@vpMueI\0=AFzf9!bQ`Bdž%5P @zr(e;Pԣk, ?庝*iTllnRـ8把T=VE$<HeMuOˈ]qdR?|O"8^sTr8+Mq:εNfg>]k\;etԵdHf(Tv`9'0T DzζAAJbmMQXʃ.9l1Z*>^f}mOf*/Sg9Hhc[\Yf"ndܻ :Mcc,k-Cy6&[2WTbuA^82Zʎ#bny[X*P@b*BXYk_ iPVK0«fIj5P!R :3E;.竷[e??*)Vx !sGqeuBWqeQV"۬WD+, %eˁηJ&Lz&qt;{M(jqھ9Ǒ8@'J2q(zuBWqηJer 5޻ a*j ϋ:ߡ+g[%w "U\@??CjU kI4n mq|uBWqηJ:CI຦g[%w,~ŝoЕx=Z!#8#l%XhE2O:2SRsڏWP:ƫ91n:ߡ+g[%w,~ŝoЕG_ׁ8Bq3ye<ѯɮl V|VY-eyηJy蒗F;M7Oak۠^#(s>|fdWTCR@2 m)^-|U4"Bq*Tl lCYMώkoWK~|y85&abF<'=4mڮz]o^bxv}؉ v1e!bqByU~;gɏSrg}bbw7zbzPcy/w7jYϴ,}iLp{c89MlfmON>9S?uJ޴V1]ggѡ}T끚% DU0  ^?NoV*P ń jomW@Pȷlcc.aI;K&bjy蜟}z,s5_qvG[dفnNOrѵ0M~wnߍ\[s6 nyaʼ#bqY R9Nђ q:딈8'}TİV?U\T+sonm7 `<}j$'Sm|+֟wmy~hOwj4vyR٣8x-b矯Tjkz3;u:VOFLUQSRzy$^Nl^& -a&вfAU&ies7ӝoj30uWZ􊥈 -lTǭp g`AN8'2C$#8Kً&)L%y\?@gW^1ϒG}q LU<Wd02E SO57νM7Ik?bWY5[LԕZ7SXtko9V`↿4uUHz4W;}jN>.}{ Z V~hx6}5mAFftl72if/ sj"K9RڗiY.J>l+g3#W)fp#ؙy|Smxlxs]޴!em_]fNխ ׹J -FiG]%bɩa q[\6f"(8}xq]7lXv1mUɃi6Ny…KĔTpe{$h9F}b(_=O~6Bc&1c1QXc 9sF=v|g@ظfqh3tWz~<<Y??*'F1N~M8,)<0=Z8DZVkHz>x"(@q4ccwxYH=?f^5n/{X5e[ .lgw&ðK`wԐBA\V??rz4aX\,*GTx))|{5 ͚yφ֢I'dzTۊ˄{C8sZR]8Yׄ[Vsʋbp1ǰ|Qcj`uNH{`=~xZb?4죌f cZ= Q; kls6m>!jω˓C^@OAc^?|h 'MW֩袦?Y:zԯC*}z[X_n&$<@ qģ,sg5޲GArV1Gx7cٍ6Ou1'>:t(5?Tmz/i~On <* tyw^x_Aj 1CUStYc,?>M3y~65_ζjM<1.ejX23lK7ۏ!͏Lz8gjԐǯ?kB9ǽ-v:9bgtX?[3:s#]UWLPKs"RcK81|Zv mfz+  -BN3j Yynv,κ8;5L& 1;Vׯ1cLJ|<Y?F?"3kyݪ-X-pBsb^kmԣoaEEii,sDlc[&QY,3Y ?<~#dL9_ ! B8 p#yV&cLk/>uGX,Kշi+QsQ~pʞ~ap,"A^jTQgZrVm]C[.%LYbw]ky[zk.z}ef?}+BL ,q!C:en.FQģg>3ŵ:wuWE{gU6zWf0c>GO57أxUcQ"=lWZsL~N^gaժfsBHJϘY+>ccf^l/^ a.n[402.OͻNѭ,G.@O nϏ7}iX|)n:"baʖJgm?1&P #ީ/k Tqg] E>3gxd'VS:F3,c89T8My/P6'Ϙ3v $؟Mgq0,3{QOYSb] @7NmNPW>)ée=} l)T m-2td[9G`XقlB5WMQ K6+`JjKZ%3g`ֱY5pz0ڴǝ۳׷#c&|S<|U=b5c*.0??*)Vx QS)I̧884Q}7iwuk.pfexEpOEUFY(RqU< ˣEqEqǍ_M4Q}7xEqEqu[gV uϙBGfq7c#9b13;%3ۡ՟(6Ƭ28-s#>4Q}7xEpu'FvJg06sƊ/(gՒ>4Q}7xEqEqǍ_M4Q}7,'X(? -,dvM4Q}7Kfs8JluQ㮼 uQY ??!ѭ$'hƊ/(.ՙ BYrixEqEq7U.]ȹsy T(yg!G(T=kڼ1"RJ<L5ь+ӌqc ǘ~#hsu1Zq^NyG4 ?3,!^APrQpga^cc\x<=fjpVS_h=(e%8/%.3?8="RJ<L5ь+ӌqc Ǖ={|(ϱBb^ e$ikoPF1ϕZ8z Gf26l%(ό3]½80yZ߬DZ͏ 5XY-Y@c8JMLk4 X5!CbْF?X lN-DO`pȥn UEe&AVJ_oPmlM<5RX*5́TmE f<¹1~E !.o)Y~>>ly6xקi^k6(X.%Z?USlqmv-z쬜M#UJajV@;]k\Vꤿ3uW@6*;z#/I<@>Z,14MRsZ݋ukcsjĕWc5Y,NP{5.l߬uuTP;f72F0(9F=;q,ѱ<#Vmƭy乜dBCr h٬ e&CqaZI6"uqVETMA5ۍafaجg:Ѫylqn =O$on췝u0 y.FX##ݶ*kE l`lfbjܟŢHEdHᕙ67{4WS.IesW'̾ƣYeoTyѯr-sG# UwīGQn'hٮwfUdsskUGSx_RټCX%zWrֵ5xPVu;f5-l5.qnJl7}Mgazi> g1UOcc/[ &c': 2aɷrNpZwn>ȸqe9m6oArJq{;V/~޻" =)\u"rӴYC[hG$. fzkFtjsRlew1Y׬*ݍ]..VUp)3dh,ܿnhg![E<)=v߂mRn{S͐0NK 3(ƨveHb{ő\ŭ[Wd,gZtϺl`E C4h9{Ru\~rPuF\.[7v싇SϠv)j]XlK; mfWfa9<[`䭃tl@hgHį]Xט=}/cK؛ySWHW"U;lpŻ_\MJ.K;svƷ+T5El=F'ݘԩ*3`7r\{4WD@K7% QriO9Bg8ba_%x+ ΤW[+G]A<* Zl]*1j);cn9R%ZzyEhmT++쬵ák7BmVTvkeր-+=B -E>! /s8qܟ& úfu ePO&1i7=juR'%My5lJn:Skx+e|aW\€(D܌{zPJ$zNNj˰Xdž8٨O>=YICGO57!\o2 o^nlj^ u'l+a9=| ҹ-+>* ƮZ[nUܙP* /nbjwbާNN/%Z鍶%ݶȄY 8ԝۃ46\.EBY% kUt_ap9OʳU ܼ}u./5DV 㐆5dF͕DZ5UdKbv2NsTp{iJ u,0b|65uzeb|iDiY7!DERs4us`pcBgLPY٪N$O ipB JKM<)ZW RV1`G:)zS(\ 3t`ݹhk&WW,cqj (M ge4t`SXLiiKҘ1BHUFiV;ej:暲nq*F3Q)S)P0/bl!hË{N]r}Uς춱u`.,]41[Sh-s\0 t eA8iv9_ X"/s8֐ثdr0T dҀc?sl32jW8ӽlMIk $4'riE r uzl\`06Vc13>YZ@טl6+դA鴤YYY>UW}q*,즢ByECԶ[r$loͥ_2cMRs= {'n _a(mbguvuZNI[ 6Vz{Ҷin• \f< Q4@@حȅ\:XPVѝl5k5kie (*m˒? ޲ XG];'e,_uJޕPgH44 #t>٭"(f4,hk'oLe^3 fVRm}MK`uW-nf1OPbIF=+*, L^k!pMV|[Y)[3eSesW'n髂ĬR XtWejO>;&]G{*UQ˒H24E`U YJb)Yke={tT6Hj*&H 7'u*ZmTuW$g) PՉ cش#g,xV\.#`\.Ku2Շc3>i`qG߬`iLMFPIHP4dmk =tug._'ϏO57!Ο[܂ҹ'9@93N*4xUP Fq]jum z$u S|Ɵ}>M\XHlU29*FUi@js Hlt-mӔ#%֐] j]xU*Y=l%kĬv;rߩLSv IFu΂nܻɼ7SEWѯ1,9;_c=W[ԳwT9b&1ť-mVrX5p,8!1UGOgRG"?x6/R%KRs)ņgqcη0{Wn88soCH-aƴA1V !%0^ %vj jva,T:lj+BY::mE5Rcs]hY^. Fhb"PPFSWŨt!\+VIzj쓽yt6vM1i[-v0]oNX9cNj.Ӗc,c89k=T" P#xoNX9cNj.Ӗ=L1b}qf2/M<]7,xtޜzrǏM<]7,xtޜEZ d!'9/>s `'.Ӗ{oج}%Cރz⨬|t _?|ۋL203>ݏ7.Ӗu)f;ؔ鞱SYo{Y++S}g6b04o\$0+lWl4qc 5%d :g8w*{'d,un,nFrq&|ڷm6̓L: ᳪ0(D$^S>lg GMtv!bU.Ӗ a,8fsʬ Qj݉=8x~:'S/+7Dٵ-vxH=`M;U hpdž~,k+mʶH3g"tQ 8:ָYlc&kzj8NMz5eB $vqvSTH5 ׫(\EBs%1Ɉk\Y Z᱓uu֔s8/^Y-EuY1y6(gl<Ja)he*~Ⱥ9 sM~mqo|WƇ b(0#N:~VQji3 R1$D0=Z$?0¸-^4U>*0T _̴J m~EW##ٟ`2 Emv4iJ6Nɓ<Y??*;C_!EfAn?"D?:(Vv _34!ߛ0:y* -&dc^sl6֡Al??|O}h޵]UϝF⽊E.5GutUL'6#f(}8.V~8rNj<\nqgs`ch;-Vv폋<\nqgs+?[xYzV9ן!pBa,쓨A.Z{h%гAuB??*)Vyݓ=}g?$_ "C$akUep(G"q#yH޳d(;<-vΆ4^ofnP+49knN%)E]{F$>OO57ZW?c T3X^Bu5bBd,N|u.5($vzSjȖ^4QC(Bqģ/->m3>Srgة{ƻ&k*.BG^2 G>qm_ٯ oEY>ǯJ|~?6鮬?ߊ-lRs&D%͌p!sb>MTՀ1hk;dxW:Q }~?<郙 ]lz҄wS:􌾒x9m2|4Y|C CH^ mTlS"VEa^w2,Cd F!0>C??*)Vy{ly2L|A8D. :k@@SgyG` E9F!ٞwF3Ȗyخ; ;c8Λ&خ`J(̺uI׏fd%U{{ྷ°%ӫC:[frp,g/=g3όtwvWd+a=AAS-C[9=)|<Y綿c;=hdk?"*rDa%2O] . Y X>^mԫָW=}^Xn؇7Q>wn+gk#HT˰l{ 8F#vhSW^UWt _?} u\c)7 \VfC.1v桹;HbX'I߷LSsO b؎qZsUSrl~D}]JlqA0K ,VN ˿i<{0ٽծS[^멐蔨#n(a-RTԢkdjՆ& f XRi57|t ӟeIbU<d~w8)8ţq:+^s G??*)V{XtZk0\7׹DjѾ|*)vZ>YnX׫Kay\{7kgQcKߤF?,͗A1M.~~u+ЯV\xZu݋X1͎ϲz ʩ{/SuTr) {wo=P(=hKzoi?;Cyl vZ(cNqkǯX%vX##|<Y 98Qsx(Q3jR_ڵe(軫m;U\mϑ6b9ו1SA{Ggg1%r߲:͹{x5rT==^:+ c198&Kz٘"o*juEdgst _?}36>U3g}`&f2C0)Lm^Zۤ7?t3ۿ ea2z;Mb2J$g|<@8pܕ1;3rV|xJϘY+>cccNjTDŪOq'8jxINNjTDŪOq'8jxINNjTDŪOq'8jxINNjTDŪOq'8jxIN'Y{o}\~D:{s/*ރQK qramjҡs6`nZyM 2kg]h !ZgSȺιM wzUZۋrwMFv:(CIWءѧGlhMuEƧɑusV+sO`([\.5gAȃZLԾQ ĨSp4+te:.çY5=Z噆XvEInΧYב"M b5=mnYVLVɝtJmiU}qإR>1br/%vb=U+PnB(9 =;`U됝DɐveN/P.K'_eckz}Y8dAJ;8^L\{DmhL+=[cD1\D-6t =uNTN ku=>gSYٍOm.Xp4ڧh1C^u-6(2k\Я(]M'aV'W0vg5˵(AVLM5ly+o& [ܘČS^Ӭݚ@tK0&ጭ9Kb`''ueE"Dx#vuP#$ׇ@GD9#ᬷuZ&$Rp+s\)Ly%+Qƞ[5WmkY O#$W?kϳ᝗\M׵u\%̹V}}츆X&4e!^ۯ#_)Vx9Oʳ}<18>uQ㮼 uQ㮼 uQ㮼 uQ㮼 }__:au~:uGW>}__:au~:)'g}ǿi sad ї_:au~:uGW>}__:au~:uGW>}__:ʰe4/t/ko{r%ٵV@_` *`ak6]b(0e5 V/cfKuN Nbf!kZ8KmZy y1UuMxM} l^~:PBS\ {NS4zkܝYǰl6IUQPϗEj fY*/hornAާ80իȯ琯ד uƳZȇnkUՍy׽ X3b_4c/koqp3 NJ|UCX$.jZL$15|Q%:~9pjȳ*dg)KPPTV'&Q"EG:޺suj3834ki/.EuC)NLjj΂WMY@tbEt۸vb'2"ηj8Y3AYK:SKR_8ML)ذ7Uu P$J&ʸʵ]P t 8^%J$"2*j jz(yqlN} kzoŚZ1fRb=E&0da:!u׽r}OʳU xY \g_udk!Ny}[jU8YKc}Vhk; !uڶ67 {Fa= Z=DYSp%7s8-`lv+5uݶgq26Z{VQ']*LmLymIs̽TIŴ{2w%u=Eɧ캋5 䮯C gU,V=ӛu]3ʨ]Pt,ftjDLΥWTJ8SwYdT e6 IZV66uq٩uZj3 hG7wL^)[P Y*T0s{0B>Уzήҍ,5Lu3 o%ZNjҬ`$L5a VE꽜JҫbUpɝ*b$zUzD2}:+sYҵ,rzy ]s_YM'FX%ktяdxO%ź/-xnKNjt2^<[?̗dO]9@p!F116^;rq[eoWϫuByki Z[r!kXRjMw%lu u~ѮMG_BByxʀy *5D)g#*;;MMEX4DwrÔN b. c+["Pc3&S["Vp[tg'Y[Y@3xpf6>9Bʶy9af}V̫Fc]]n<[3u2YM${Z)J%H^11TOl;pƳ[qLr!RjКҥ]5h^UUglc,+7 ͻ $=_([4E`ri]޻~QFsG>2ӵ& j;l,*݁-B t- bm?tj/\,dfic]!תG^R)W@]6B2/+A9y:8t͈)ż]6uNbW6.,ڄWy&&'K4Go9= e3dVk+aI-*9׸f['c;<뵡gy@0uWWPlk5& )◰,*v Af_y|t _?}ekĉ$HǣO%ź/-xnKNjt2^<[?̗dxO%ź/-xnKNjt2^<[?̗dxO%JL˃c۵hXdnKNjt2^<[&aDc>qOʳU :b"H,}9w.2>;TϓSΎ$<1cY˪~̏?fGrꟳ#uOّܺ{Ųz8̜W^X+[G=˪~̏M)ZҊ#4q-?fGrꟳ#uOّܺ@ B1O4jl*Oqܺ]Sd|w.2>;Tԥl8]Sd|w.2>;T˪~̏?fGrꟳ#T׏`v筥CSzƮ(kQ2}zg(l5X (1B~%O k>@0 5ZB FV\Q8*ZF$c(mi,q8q߉|w_4?3ж㧜lJ< w_4?~%O߉|w_4?~%O߉|w_4?~%Ofձ^J0X(!NՕ:VMx9}Z*M)~%O߉|Tzb1_4?~%Ocl3o | pw_4?~%O߉|w_4?~%O߉|w_4?œW(yf pk(C\DҏʗE~j3]Vnqk߿?|׭Zq7b#crrgs-C!l`rafoʹpᪧ}qVk2e85===el Of.3 Ke](FwsKT,=g9|~b=y8 3ki*&+t=Go9OʳƹV5(fi(*}R~6n<'Xedœ2qxf)P&s^yaU5ό@n7GYz`X6*hאP[ҜSv⢮?j CV6{z`u`X6Vn|-(9~tlsqvm a :K DЊ7-1^Aw2zw`7tB3g,&^trp8sB~}Nñf{ AqWXT7^ MOmjc8K`Y+?D'IGte3}'8˃{U3t҂1`Uόg+8c–9Oxt +??*=믌M* $BN0qϙ{ա9#Y1g-Z$O}_wTWS;-q)yS}k-)Rş]修99#7srɞN&@~~n1hXJ8ܭpZ=cE]l]M(:T-*hXuYٱzĨD b/6N{v96daSp{pO[D;!i3yn_mW=vYt6*sY@e PG9cPR HjFέ7@g>\fu֥9\U ةry>o_)Vyݱ55I!k)SYeX (!F1VLR3&EN;ֽ<b~6-7` ?5 TwQz]nw-iIv 6/hIg({dHM*EdV Ᏸ>-)2T=~%^>5@+H-SSN:]q,åwkĹ6B%>1] 7e2n,ѱ+fkU4#cqWx.P`pŃdQZN.Nk?)*k[E\][kVاnϤ\sV[4XuzTf,Tlw%ytp* t,ۭꛞylRP7+c:gē8|46Ptw Hw4,̢ԭ Zqj%6PSSd@Y?OʳNp$BKqZ'hgvq,eF(9!sXuD%63ЕpIqoghьy͌c3k4HŲf90k %N 㞓\zwq?p(pC9?n)^[qJe xJSu+0qg|;HdԞbŹ.d! q " 1}ïTzw+۝Dv/>|aٚ2:Q蜡mQ'Z ؿyеIm =*hoޢ=u g֨}ə+B3͕y.Iy0LLtfsba1u8H㡭#ԭfҷUAXSXYcݷeb5{cҖu v+,@@Q$ڷ?j\ljV97;-*o1>'˦"V5bʾ!;^α1]S^ xNe}^:q삗bBeCx.=h Kl駎E[q kG Gm6YzBK_t +??*97 #9r9=8qc=guZ/B.WnQGun6%ƍd3?X1#4cŖAX̓Yam :ժA`Ғs 怲X#hgi*%j+as6n]1Gɣ.v kzeXHQr!gB8̉!6֏ Z<3ucgS<ݻm c=__鱌VT"7\y H8DzOqP݆3VVCNҴi-vո9+UW=4Qslw Xg֭PT GR01X[%50e?x-zP9q818sL~'ܿzqπ@ p1k.RLilǂO=#“bLAvKa..uʫުm Q`>S{{]v=h0QN3F|g9,5"q|Rڨa vg w6WÇO_?Oʳ͸⵩DH&=JZ&vP33 s~;9L/qov^S󧉯^_hTTS UA~-/h! پJ~ᝇa5< Vms7쪅m]*V=~ɶ+i5qeKysccbUTAq*8 Jv UruIV+X달?ȴg1&N3G)+gc-fд%-8,}!XqE<U1HA=N]k?l7gy>%^+]c;V6X?lZjSAf"9d96풢BP_37|<-_^"dqq(=anj}2xGq(=anj}2lV=b{?h{>2xGq(=anj}2ggDQ̌q"yZ`"@.ae8Pz>Ä_^9(3.lK0]} Ŧ`C&7H?PT?e;ŔyJ_6]OUxڈ|lkCk1nj}8, ,G22qcw$F72vn>kAd5sXÏAÏAÏAÏAÌ9t#9Bog!",Kfۜ?[yv>B.\0J6x c1c[luq${۰,PaLbYƎs0OAÏAڤ"vM؈ӡcgZV:C /Zٜ [ljMbp6 ާ!& ZlaW٥<y|YO_%voQd"yȷ@"Vb(ǩ'>SȭQzճsCKIUl|,W:@1H4 [b0##GcIU\ BK vWC\o%?o|'aC [r_W/w-Oh8֗|(K2'9l1c1c'_/j2`ϟO!-'P+Qqclzb yut,rBˤv<^}H^M`<\? Rsֽ]*%>UgO^ `8fsZV*弜X_raG )cq(O_?cWnWG x_ՊƴPocEu}j,o.RqEy $iDQcr+X93jn)g{ufrNe^ \nF{eqObCָ,1QmOt-nn(owZfJ\{&^q _N} uځ.ڷ|fͮ/f*T.6 dдXe>9 &sd3,<>xVoݶlK֑0$vꞚc\;jbs)m587?8޺ԹDF=.=l&(+2Ilٕ|ƭڹ*4ADܿ]ʾT5ustFZЯTu.P謝͆Q9 ZMftGI6gsֹ!ӱV/~/(֨)-X)h[z8;9FmM{fu:-|a,潲l@ԭűUz mV']ka5W۬MЭ9/ȰqjhY鷀rlcʝt"Ѷ`]wXJZcT3{5nߘJaݬ* YbSZ_J=1=`OXԫZ{6H3jhp؞n̲C#H4Vc[8 )ώI[W916;Fv_ɧ\XGɱz{kӉ)% FUzrVPMq)ň1eQZxbj=Flװ{ե}YSE5keg*5o/Yyİq g*f6pT*),*kTXυ=-9$C<$P)p;/OifqWֳMik2ۛ^F[7Fh?ZfWfeyK K;жCuffONzͩw;*^+߰0s1 6j ogԜYi79l[u^1 h(@Fظr ܳVo"Gj ("0C 9Y{y|T)IX c8[ms8z]jiiĺӬczR{ EAUґxVoOP ɢ5'j_$%b'Y&ñMo%ՌeuhO]^Y   |/8 pp̤b=9,|5\C sk}r|Y˹*%p(}f4Dam UQY 36JB+Ga]#YI4 9' iA0#$QVKEfc1zFg"kB3&%wDg w:ǫ=Elv%u YW["H+ (aFdt^6J*+PFY;״Mb4`LݭU: bXVZ<^5N9W₦2gsb8(RG+C-*+:@"!xvO|<\j:Pq'`>@Q 39B68thcjͯ{d?;.UZu8p)J'fW`-&[S(+\AFꯀpYظ,QX ŔC8~G 8R9*t(ҵ07KVv/d`#WU_ER9oXk2g,-eЮOĻ8.۽J^%M}A@L༞\IFp p%4}80dCk+:36\;TfC[-3](JnjļWkg9ruF2K[*Pv=bu.FVg1MR[bil*ʙ^[ßS[ . 4f5 sVQVT"`D81(^US4TTIFS~^ mH 7\EY)H1Gk! O{DK^ %5`6UaEβY9fV3!nKy&bI +c_5e-BaT:6рb ºS0nR0rW C_eրw/:` QWu wT$|gC} mܬZhڽÇug8{'qgg:uSBTcS32#๹J.gxCARNG6m<*ѹ ,'-YBINrqi,K'$!;`x 6(%bKZI/u!]bx=cbu(02UUN|q{b X l[O dB9`CliiE}R2&DLL.IwU0< K+o_)VxYln|XoqW{*[O[3c><&h31 Ŋ,U~bNj_{g5bc<`<3cOox&m8׹[Kh$?B\x~xRY\jeo˿prwo, LcĀpFyZAb hKE{q~vta89@9iA:cf\s4, g=A>;~kHRu.3Yz}x4=K^=UۡmǶQm㼵\{bl8&hZAV5 dhcՒ +RCr6zw8 c\a.xJ{ǏZxC]؏W(ؒ|n1$BG|}S{\g"hoU5Xa#fa-BĿP-6qO fz f\p{2ٞl]>lˌ/w<0O_OSqKGep~Mku}a;/y8rRר 3,k**թ`Ǔ7|?.l\x)o#RH #PliAt D!B>9q81|Xupo/?)F̥c>r3>v1T:QeF9'NJu 66N< T 95;P{Ng c11g Y[Xmڹ`!qqqqqqq␖ aۇkm+|? grh)%Y4+dG[=y<_ipw2q*;N'4/w2_w.E'g=>G??*J1e,9,cB30R]J:oZ MZIADK(P8UX9"1 窬{&^`WG/?)F1̥c>s:~~Uΰ\0H!I0؞ݬHm06Zcg5mjAC>8$ǩ%=I(}7qIC齣RJMzPohԒ{GgWX̘6[3OԚ[G>8&ǩ5=I}5qMC魣RjMmzPkhԚ[G>8&ǩ5=I}5qMC魣RjMmzPkhԚ[G/')Rk@쀚ug̓5\NS٧;jk8GڒB8ccZff&y)I x1zptb}Nv%yoĈ#8#;sܿYCYV{s@jw#Y$EGAީrg?V2^??* [j۲o9g.5JHYc`^۞e:lAD E>U ͓]p̥B@?hl< d$B9nݙg&>xcY٧h|v_=c834 )9 C|t oTRI.8 c8;+8Pz>ÏAÏAÏAÏA0m+V{(n3,[o D8nj\\SqÙa/ hYYllc11c ԮLv`` !KޜY5N3,XY 6Ϛ)L#G2#fhf'9lgxif'h:Z7ȃ>Oϊ]뻸Dze";.l,d|t oTRI;'!-'%q7̭gx2)V|yi^kG̔sZ2 qDq!b11yЦ.+V&`ի \ . @BF8TT8AxQՎpFe4r$@PZ!,>0@\6 ϑ7SJ'Ї7ȷ@V?2G>KޜYG2qcΗҎF.? ލOduLn6Q2%>y]J*xeCc1sc,6>GOߖO+? 3M12d裞;?{O^S׸Tu;?{O^S׸Tu;?{O^S׸Tu;?{O^S׸Tu;?{O^ẕF爧q[r_Wxw+X/zrg۴:ӆyڂȄ0qB8c&e"ϟqg961Ə2+}\g쎮O;tzF#>:\xv Ѕ96UhA>H.u&mrzk,)̼KϛV)1`'jDX-ֳu7PaN*!`y(Qw&ϴB *0>^厙'݇=nTQlbaj[VTy84){#0KU3~N6P˥dLF5S@/L(4kk%xo9VV3|t oTRI;'! 2Q[J2qj]]qG ʻoXZXn4|A/G SMF5LwYܫW=saOZgiWCP56ZL´$!Y52ju-9k^ˤjKO\^R#hJ9TTVpO^Gqi:֙`z p}YWZ#dHUAj׺Wa0V,Ծn?ru yg.?X{Ut7vɖ( K\R\B7̭gx2)V|R !I,Fe)i1.hbh'1,K6Y߽ lU) stUP@Ǽ/6}Zـ͑q JXlS8δ$iz&YLT0R?w7SJ'ЇݜsWMW5\6U;|bwi(1\VÔ&)% o}\~ +g4̭cdU.=9fY jk!0q1\W>跹Ng ,_WقIUu<1c1`\CީrgvOhClzZTOެɸsV&׍ͅƟsq\9ѯ3(Z{~/Wy?~[?ԮRN}|]-_K$;uMR!`Ǽ/^E9>M_/ej ^/_gaۧˏ>La(~?yoQ5*!|t oTRI;'!Ur6% xlØ׬Hb uWV9Gh1간֢[EЊ{;MFR[ ' й?yqtŗpl55!-)e=ME.8']#4 `..щvv\%NP--'%q7̭gx2)V|BIF6y̳ΎS|s#5CaxoߕN=ZM.ox6QId2SMDOl_?O}kg^!R?y?~[?ԮRN}|>>(',!*dB1OF&ʦ~}oGଳFO t0WPR6Xj$%[UA>~<[N+}Ko[?ekc%NSsIs1cKޜYɜ3Hg8p/.Plhp?5L;1*-\ >'ϯd;&=~Wc|t oTRI;'!eQUr /i^ĺX*j˜z±VwD[eUW5ZH+!e\EAɋ-ZL| V_/_vlZm`9=5gZ9C/ݳkU9]LD9M$z>B#VGQAvI:*Y@!9?!-'%q7̭gx2)V|}#|-W)?'d>9RTɬ° W9_V4VA5ĿPQl["5-ѱ6é`*Cdu09=ZZq |`kmi{4[(qٶ b6x>vM](b"yG"q[r_Wxw+X/zrg˚ћ1+<{›=n_7۵OޟRi|kº1uuQ y-1|t oTRI;'!qJ3,pr'HVv#d[&֨Z<Ѐ?Y^x3Z٪Z wgc,OיK %a`8BAkFTT`ReCf2Oo89/~elc;Ə}9Oʳx \ϯ',(L.S{?@- r^oS;.P`ayP_k"7SJ'Їױ\^kIһd#r|pMWu&:(fSZ+JIX9:{b뜨yni{3p% 8ws*̝]Y*7'cY+]Tfmz63LJV ~3wc]zg@(yxeujqaVij/*莓^zk(V9'(ѱ=*dfByc"q[r_Wxw+X/zrgme"3Ɛ%k?z~x _%=k%?( }~a}KPa_=|t oTRI;'!{GSh GWƪ"&!A I>M+Pe+jZ] 6;LB1h 9k)_*C $I>9gspƌYٖ :›XU+g) Slb9״[UH{5Uݝ)eo0<ȷ@V?2G>KޜYylf #?SjIӹ뿳=~P@/?ÛI9cSG Ny#9΅HIǚlOߖO+?{B_wfլF<@91saz2.ssL]n$8 ;@L?E侮??V2^??*ϖX79iϱ6_DxBځ??e {u<Ӷ۳\x )q>:\xj\A[76&OkfXqkEy\Y4k: mʪ"q[r_Wxw+X/zrg{yfϚm\(V'@#)s 5wvgGOߖO+?{B_-6\]U,-~BYxXTD`8fsvZ!`"bѳ5pFXbԍvkd ϛ o}\~ +g4̭cdU-쟔ش~Z2o?s[%s>3H{w~(L@?'ϟ3?yZ nTQt oTRI;'!( U\\ousOSV! <XLέ~d)+!q@AZB[wQG>%G84":U I,ȀJ K~/axP=\W3(uc?AyE;6ylloOOߖO+?{B_ȵy;NK;liQV=0ضEU,{>v:jce-c[/BKxi?Z_??e_?m?kϱ*Ayagؘ43u>L9OM鵫wnjL ikOKYX?kQ@>:\x:pQq%&5 Ak R d#L( Mhf ȴZ9GNH\˥ZֹZ&D-rb1##yȷ@V?2G>KޜY}~P>WrT0p< _s{`Cy{@V 9?^m$Ly??~[?ԮRN}|0ssީrgvOhC[%fXW8δM%?JpOKO"vV :T4ౚ)eYfee#agmc {e g 93\GU([[ ҸV1F?4sV3hZ{Ӕ|>[?{pvW1|,sg(uOC?3w= OC?Zp[Svبo=ZgOC?3w= OC?k F,)gyU@&?J%u_]\Fى>OOߖO+?{B_KFTc=}vοvUaF,X|WQYWtaM-fV9M0)+vfZLIqy卩=H[9l"1X:J|&.֕*cVַuvI ̿E侮??V2^??*ϖBl5KW岑&+ j/ژXf8?~^JRA]qc55dXI%4Uy nuD<rokȺN㜐9GKfy8bQϞQ`ki-Fh1a(JSWC * GީrgvOhCSv0ş'!eNq[qeeCx4c!ͰxG2IHє::+7jHFdYF9ї&3UC +sssc3j,'>;K.;K. ( cc8:6V1B=YqYqYpF8M n dj`9 O}yZg?=㰟ҖvRÎݽѐ9'a?,8'㰟Җ GU|s/<|pJk7#3{ )aa?,8'1 biF{ )aa?,8'ג3$<rozO=9Vaqv3ldn!saqe(:HԢ`be^kIB.E( 5u8uqf)kMuQUp\.E&ik6%)`UY{o}\|[(Wg!_V1#m)Rȑ+EFRDWʶ;&J&k8BL{=zN9qdWpխ'Akj.MKx!kvZ/ow[sf '3\++8T-_?N7=DPGR``ՌȘzC{;5 h)Up˳#}]0K5zWzMsjToigW,wH+m4Wr֝ fSu]z2C=M""Eb=-7Ш*@j]S(Aʴ5h*wXȚp%DICX)e`J,oa&-R,'JA oYhA76uP|a{qGW;QkpK0[E ]bL?>9CY{0THO۲޸j4YP7w1AEg>>:\x*޳췫2nsQkجNb܉POz-{Ō̒P%]PM@qͼ]6ŠmL{Mb$TjHkǮZz_M)ڪIv vMS٩k1l4i,xU!Ug,i\ez|RM=?E侮??V2^??*ϕ/ȇ8i,mӡfVٸsU3Ms3Uҹ-3cs" JODZ>j %.r5 :CkCMRWlJ[\P2JMdshfU6jm N1դq9{LKk*.N:]jՙV0,b\`&"s[٬cY]-{(+0IMʺX+*tlcZԌ,Ҋ [6-jlЩ5Ś'c6oS9%h-\(+t&HVSe:hUѓ', }gjTlO%g BM[fٜ-VM ;Q a fx4r4mWnAR2:ޥ(g&5lܩ{k& v2PSվXgf .-S*h}wg5oPˡuyC,!'בQ,FRy9n۹u<݌ yc*⮢ʶʭu%xY<gv :g-E %^dDGB1abKSSmi]_uEXJZnkBӉ$ Y3UWvo^M0dy ޹5ҕC&∺޶/q:_@[km}m^㭵 u{/qӊgT2EC-W)?'d>7EjZgKl*OrJXaVw{n"HlzjsO/EsO:X6 S7Cܒ-qΧ5jIm=sR㫵s9M[֔TR;4 l70ٶp[ f(D;5i)sV3hZ{Ӕ|>[oSʽ0uqQgd}x#)īc1V dNKҋVeRɲVTcK(H-s$tAN: bP2]rbNhJ Xfb(~-СQ: ǭvU)GgjHϸ ,b=)[ VVx]mi((ZUHD!Sa[ܢ({` kh18ͮrwxz=u߳Ψ>5N 81gqg?-W)?'d>h& 4FK{Slx̜YqWshn2Eh&D<[N+}Ko[?ekc%NSiLyfRKTkIt[s3uvK<Ӱ*,>PB=aj[, %JeOZ0aU.h̎,D'vթ::sVYtKʘJvW(l?*xj@8<\I2*Yu5f'#l@jъ!Q]38,%-'%q7̭gx2)V|oPSut"ˎQȞc Λ+X=S mMuҒJQdw:QWYۢ,wXŖ^Bvabw5zM+ylFJozO=YXl馾{\z-p Vm"y+a|՞# cUz)Uy|0`,׆(!tz{f9WU:z`iZ&. ޱeܤ8W0޳e%q!sV3hZ{Ӕ|>[k,6sֳnVz8{XpR >f~:\xYֺJm!бjF4*=Z7m!z.s~0a-ݡ`dT(@r>Ȓ^ʶA Z^<ݻ6υ+m%NdBse),,[ Ȯ(i0Zp,C]TPl *-UvXc؞:pMfZ65K]MsHDO##^'5-f8ZjHBݍSqx5"fQ>"hy%UAT@={FT:o$BErڵQ~, ~}4 э@Y_ێ@$ eX9dK[N+}Ko[?ekc%NSice]z^+?/l[i87<3]Eb!,Fe, tiY[oLA~xI(`L|7SJ'Ї>_v16ê.QUG\w4αGS۱"+uj(\LM/vCcܮI^${UhZ[ MXQnlܖdR賈>kLF+1e2.97u5vؘalcE˔WJ| 7\΅T @Y^xOxwKg[Y2'h qFH \1D&q:LuQعJVʌ̵4nSpa?E侮??V2^??*ϖvLPR;a1ŮTF4fNdb3-sCr|!~NLAiPPYFFǚ!4YT( .iZɧ)TozO=>N{GUl ioY_ggfr81[6ib'cE7y;q[V|26vT] Z0Lv2P{GkF5{h*eakjx/1N{H^2OZ #ngGmKu鐱Uر`; ­Zw4n0ɿCG:ǟ1٥s{O?y| i <-rADXd [N+}Ko[?ekc%NSeboH> &x]_Bcn9Cyاv^7$%j̺ȟ?~[?ԮRN}|<=hlfe8ogG68eΆPk[ᡍVctUVfu12&]n1g>y-'%q7̭gx2)V|K 2HYi$tYdžs9ysOx҃9d}kgeG:Ac\8\fɬcod韓3~ȟ?~[?ԮRN}|Ojl(z @ sE_"q[r_Wxw+X/zrg\{ъfC1yƋL+k:@5Cqqk1,>L׹ٶgad!/bb8?~[?ԮRN}|>.Jڮ.LKϿE侮??V2^??*ϖ5_?^ #z}>+1wZ?bzm 8 6L3aC;A+M>:\xpXZ {d'iRc\NE].-Z{y o}\~ +g4̭cdU+2>Fyf*ETU စ7tդWS&~K[-vv=`AWצ%a# 9MXozOɁ%sEg8i;?{O㾓i;?{O㾓i;?{O㾓i;?{O㾓i;?{"B#sB1o}\~ +g4̭cdU+>\{Z=ŋ,Qke_of]-ꑲ <>K.SaNx~KoԍX-'OߖO+?{B_j 0ǛNڦ9A,5#'&eg Dp.C6%b޽VK.G'%\%o}\~ +g4̭cdU+_[$G1u9Q3>1"_G.|;0u87aHΓ&,lY,e,ɟ#m_3} |7SJ'Ї+HTT~k8SETE.떖iQoշoL=\4=c-n,7e}~,MGcgjiYO?{+KV6)ε%y:j9OBnJ4ʂ.+vMX bdh1WoLLdkSwBmYl11$lrlͻH&`1\v*WG8+& N bLXMc!qCg 6O3̂HvchWsV3hZ{Ӕ|>V.vezͿ!O R /T01aU%[}q̝'jULk>Pc1cEp;xS ptX"#0bO+@QL-OߖO+?{B_.'&yH'iގMPe\ƠpNw 7B<>VlWZrLJ(pv5SѾFx&,w͹eBR:ؑHJes;@ij`#Ju;m+6+JZJpOY+B6(̥ 9,#^VlemTS= q]3VK&>ݝ |_ǁ-'%q7̭gx2)V|<4 R`\W$EjuǢV{cPT[=}?c T~Fˎqj;:If2_h6mوȽhsj[9׻>CީrgvOhCe|d$ebmػ'B;wzeRh_vb)u\VƒMŬ%:}PB!z"ŨlQ̓N 9 }SrtjM=j¹./e ثTV}rT^8lzч 'a3# U#F09cLJa>q ?X411r4lƽfMUu̜J.f ,Db9i*ڭqbøǓ~JpWs,|7SJ'ЇŽeRvɘ8aqסS;bQ-;fa$p&g[L/7uIn+B1CF5PrR,|Q1oY,H419›{%Ul9BCiܵ́Zf^JQD)ģ5Bk)52_h.O7(tQAi!4coY}9įS,Z7G%)i u] |tKdSmםro 9AmʭOU9í.W`<8?2G>KޜYՠ))޳4s(,O+KKFp޿)NI`q"l.AO)?Q2onqMb^js|EIX<`51c1lcɧ嵇g?-W)?'d>7:7{µx2jH/]Gl*"5gb-_PͻUJ QG'\]ziU&N…pŽtgh*kVe!OhZ9',`,KiSٯ:Dsνֻt^4KvO֣Q41i-uP"Du Mg]c]QD5z/g6Ҳɭv'W6GuA)Δe(ҝ]i)1TNh6 T,3֮OTkeĄ(yWvhA5:8ڡӆ/)7m+ tձ*Ve{o tj hsYU׵Vpvݶ^iW(aG>$m0Gȷ@V?2G>KޜY򅽚5N1ϑ x+ \Nm=/)eyCƈ kc/YOֿmpxZʝF!n5?piK#c1c[ 3ƈy3%\O/-W)?'aEt>/(\1NYr}Q^rv5&;Xtdy(KYMѶňi `6J~86J h 2枻Mٮi 44!QSU[}nP[ XXf &wVрܹ} Ytb OkFpv"y&X x3oĕLslZg,LHdS1ú@@5QB `볬bƪBT5f j5q.K 9)_o8BYzXq7̭gx2)V|{ [PG"fYfh5-t8PME֏D(\9AŅᴬNֵ! VѦ=}IZ, 2 |և, Xs|7SJ'䶢/5؇J3K]=yyyyyyyyX6%~_?~[?ԮR?2G>KޜY~9zKp~^یH=c}E9|1g3όN,&2G:H2p{skM r c`lDU:|~emc;Ə}9Oʳ\dϮy7u|ܟkX*'m3{[ Ɩhoo @vH$Z u*|([ml}U#^>髋:{]ݴcmXަ__________________________ Vb;[XACp!{ϔ|>NOHɔ$s5h(ѫ^YGg\38T7+#yaU=8B>%3e2bYhQbMf23SG(5M x o_)V|\bty(Vv3&lyu7o$ 8da8R2mak6'>)1or[noBWU&پEkMˢ3:-!M;Ζnlu?W0 |Ϳ2{߱Wy yVAt>l]=Fi2IV.L u2z-EFn,?hna8 Z8Ǵߋ!|t +??*ϒKG,sĞ2xGqX(Ƶ!`+Ԍq/!29hs@9}2xGqْ[\ ųq(=anj}bs#?!`Lc0N6K9)\JsxGqG22q<]3M1& l*πPz>ÏAÏA+pݟ86i@1at(=anj}2xGq(=aoHDM(=anj}2W٩c`8$&<sϢ8cYЅ2xGq(=ag;JFs0m+ÏAֽ]OS&~N6ABHߜVڍǩVVkUL:OuۘgVhڰʛMV\g:ڶIu™E ñ+$@vv4IwQӄRuUlVWxŒJxGbocJ :R-cvV[ 5-K=`$ouSV E]&m7]ΏTL>YYm5ʩBЛ,)M1]Hlc7]QT>J9/K]2_ XfMFN" 55u-Mu-Ս6v߼laEkt]mH Q!lMgSV6>PocEuwE*`' @/S9b+"Ԥ_z|~_k'>4ˣn@-_۱ݲBP=6!R C4 Y.CWm{E6{+3X -_O/Z;[j6.BP-}y˴bm 7eS77v;г.T '=tZL6_}t +??*ϒl#^1rp>LAF$8KHPS\ZK0KV4SiYdivYxw("`֞꺠숬"Z鞽YDp`W'0nusCal쭁DTň‘SWHR,<'ǥk4K2 6T`6K3͝3|ep'msu&Udڲ`]Ƅ 㔁,V=}YGTÄtrKWK=Ǒ3U2r[$ - L)vfx.6c,ETfɨjΧԻAR Elܼ}u.-t]n6;D64{5%*:> Mׇ^vV93qw%U1q+ڽZL *-GN+Ut|ys=S+W*UB@2Ѱ˜pgflmsjJv2XOZ&lvfN:ˣ}7|>JηM ta{bNj_"9^  !10A "@Qaq2PRbr#3B`sSct$d4DTCp5U? ,JG.f5}v뵿Wy$/SnHU"83DPN =@QDJA":]K?խ6'QY䰘pG?:_]ُR[ƶ5lkc[ƶ5lkc[ƶ5lkc[)FVLkz:0m'lpa:OW\5^^cnq;Y%9e}k F⢊9co5 +V7ln2^Oo&Ð*(a˙b#1i?ƉhqN4I\2Gj,&Qĩhvts#DbQF UZsm^/SGm$ٺt=Y^;C@# *q9!Hww^^NkjZڭmV[UkjZڭmV[UkjZڭmVHWߊ-NG<"έ6Q"dȭHNMs>Y׳$cj+WHuG>R+Sf%л+͐}ZeA@pO5YY X!$xMCl`Ci$8/W\/t.@+\sGW4}\sGW4}\sGW4}\sGW4}\sGW4}\sGTq4x7.^ZϪWS@iƌ8*k"b@PVl H2KHR>m 3Q6ZP̀iR eX<ƒiD<4!SwT6\(居*MEg 6BtrE!;> $2ʰpi5auJ@.Ĩ$o`WQ ar2i,\fgtP"NGPG|A럝~F<@f(&dc#|Шyd`2BGf8#|5Β8S!"]`xli%3͌S܉"(a"9s.!ݣ.eBβ 8)#ydB fcN/7,Eɦx` 3*`c3V;ϑ%f8h+.8|Gt-eFWwgՏ+b#+ ad5Z_"M:"K"FO,j)(`A# .-Z HfN`NWRcjO”1x˔5,nKQb"惠1Ճulw]$| CQ*"*P"`=v{ulw]`=v{ulw]`=v{W<-ڰ3~u}; fc*b'N̤i'䱊7-,*&mܤsY E[^ Ck{sn *d6 'n!k$bq.'tRY h_sKEcc\ɦkG)(bLSLM΂F 1Kq1wxn@r=@/E ۋ84h ډ>])25j`k=ET{˜ :I,"hQ r]_ t=+E6fCtX8jA9RqVN+Ϊ ӸRbб 5*'΍>\zcoP,&Jv|l6_?Mku$Ճ#%qV[LJLhn3Zax T8Bn"^dvsUlZT]0c[:r Zq?Cyj8:G2F`qW+leʠOdHLPW|1FeBC{DԶqē$F3$}5(R,QΎib +Dѩ\C38@&,ȖC qrҀITa+Wce(1K)iĄM,.Dr8U! W;][$\$.đ.NWInki Je$Ze;VqxG|A럝~GlBI)YH _Cfm.xɖ'늍v#VMM OtjR4m@0&8u1*$O p6-U=`$N(ìPF [,@"e=婢IM@5k\?.@8 s4]2Źz-hBE\Msu66kB\pǂu"Hp\[h`wc+W5kPD ,B .I 27l \OPe*gCo ##x οoN#"YFUjNZ>i%x b4U.*+v[<7gH]@* ~1nէRiE (n2QAt UqSA.$4lI5 DG]^V*ǔgD{9AQ&\84/f-7rxRϵe1hLJP*k$HgwM"tez;8KB!x\zCyiZ,lv )o-v8i7{VHhL,'+Kn meDaIS\). rE iF]$-l'(2:ĒoFW5O3[pKI,Y94*#*knwkLWou%6ݔ115/,$Fr/PRDaR, 91 { h;-̑2!SG0GHָZZy,&R lƭ{cv*݌x#PdTdsJ%{aʌ<;,eW\?@R\L &u2f *1)˨7{dRKxX.S̏9cQ4UIWŲtWDIwqo c$Dp>CJ XcV 8I>Ha@IY60L>; @bs Wqb&<29䁝y.7//[ *"TzĬT Y٦\pØ*H-^F,bi4#YSjSCm"ō@hV Q!p T{-.AecX 3FFUF\X?:BI8+`el쭂}O V>'[+`el쭂}O V>XPt*YmP dS!b8bdS6>uĒy/!@ctQȓhNRc+';(gLH!Bi}pmyWPN$PUБ̺-sMt}#SA$2'('mz&g,ANzUE*Ľ˕#Vb>Ji vR:Ʀ; S+>MK? zNC$;eFZEX;Cyjp(8R p[>Z͚Tpw𘶲rc0kBڻhek *.J[GUP8%KD8 6,JD_MC-EiYtB p#Âdc0z[#C [WPr]5Xbhw20Filf-tj02jo"s.b:jkiȹ >5JnHV ]$W*B:R>+% 7u|A럝~F]Cl E=!yyN2S,FQe6p:j9ʄGRDBt WTA#h̎IW1MES+6IҜAm0H|KH:C4Iތd9.S} uYPJ0WVV"8e og ,'Qgߨ^vݸ☍;(X'TYu3pNJI 5ӯY\(H#gQbED2etj]@qFj#A$F\W.tՀNYFj.:i% j]w ռ^W9v uaY\(H#gQ2a"05Df|VFA5$ntHP\{CyhD^hfLuH `[cF,xVD0 hW$\2/I"(Gh f#0dPs\ԑ)2FX#ԐڑȄ+TT% ) u F M~P @qάTjrYrIyɿQjIM=@j)#CGsXIl6BmhD,KJp*&H.˒ BK9qR[>XOJD12htw2|x οow(X"()w|?.̕mʌ]]iRAޒ(_bG.09QmnGOvPI_8QMepNRu*"-D >tوS9if$;lgC*7.$ػɒIs*qۿdUU$TLN5 NIU$-䫍LfN]gԢqBOXUK.ΈA t,$ֆ'qR4v%.I *5!l$%\o}x gN*bX.27J*$AscQM # l54hh\'\^Zqm3#>e!qɩM"X\ܺuh݇Rt$*72"XV*QhiYLꫮRWfA&x!~Tjה㺝'*-mCVqSJtBe xwHi&)``tҡ|چ}34G5jpQ >qCJ / ՟)E}"YI046<-1.Ϝl[mˡ4jה㺝'*-Kt#vh_g+FԊ^GٻjFY%4/#jE`FK m0@!#όTr'^ݗ#3Rܘ"T2q4n0Fk6>uP-ᡡL0!޹~"wg`f Wj5si΄cQXKc-Yq ƔB$ݦbCM\bI L1f l.ru驗DK 7# Q ďX dB9 Ɏ@g> U:E47: 9F0A]fB̲XqH]o[F [%5VsϴBLINgI!'H=5vSFdHÔt,z"h-uk}KIZ47.$̀4 )Wͱ5G.'NY> G<2r 1&2yF'H UIFd}j^3K$AEHaut~Os /  )z䇛P pC:LPr4'uJaOѷyQp$7@lNjx-.Pđb= Tk/"dȟwImioۿ::@ 0ZI2]dus43/ 9- T;;⬹MgxѢITDdbRW8eeItNT2#3Zme#VxZF eCASB)v0b<*73 ZC/3*夶7mߝN K3%HK⦥y\9ҀI8*i9*Y+d"y}tN +Mmdh9ߌS[M2Η@ޤW [מqPYEun"y Zey6mob2M럝~G#DUpaިcFlIȱvy5rk#p:MXACN@&=cQC$ЫHHh`#CӗDE44yR \3QicD7pT)Xn>[H}o d2o")nϠ|ФV$$qfK9WT"gO**ɏM-$y֛m=KUiN W@>DHW@K^THFt($HaGe#aWƮsKi#ЅL~QPO2+1'Y4aC,IDn+@%w\eP^Srb&C&vN:ڥR%@ъEW:)YBHW“Q[,qIQJ#y 6n#:WG}a:G8NlVЄ5,rjFrxbiD<4!SwTr1B|*";i\|$"Ӷ3>lp((Y:9݆8) u@Tw[98֫2e򊵷va %cHfJ6`߂= YA'HڄR\(wi8lV&%ʙ{yڢ C"()Kkk%K.s3=^/=s BrI$xe27XEv';!9Q`1".#"r xDf0Z;meƥ|4ӇЦb_́IJA^ a$riDJA)&T :~MObKwF^JT!-My &cZI(nd-44RkA*&:mHՐK זH T-S6&nd'kR~&P{iQ.G]K2hrԦĘReiEmX)S-FbEt :^XW{` hKE`S4QOAcW`V7O$ziu~Z5^ZC`_.7]_<\6Fӏ bҖ2:ݶ4b5s};F=usRFvBqtP4]ZȂQ+D"YkGz,IL a2daԍѢKqPz"g0+@Hu:W uAN9a.^ 2*nL7]:q1'YԢ2 Z1nvBXZ=ÉR\AcĎ.h"ڪ`AWI &QHLL(Xb>"-0\kB%<$wI ym2 n ZccZ=?JƘ.hoW\?@v-m9ƍrfum~/ZV^yƎz>+7i͕!뫘% $D(H@ &ȁ"2I#, "rp> gHN$Γ7r2Ǡ|Э,u@/ P=JrF921s,wyGZ1uFA7gG%|Ob- Y [58dlk;`Z=c+ #x婢IM@5k\?bpx|72c+W5kF.ꨐ"|z_z_d\("<jY  1.N,k|\SH ':W]8Xpt`pk΀*G @(} !Qwzѻ9v$䍔R#-6ܱ&L=O1xѬR+Ĝ"Ƣ0֫k721xʱCzpLZ!Uٹ)kl{(xhxJJ&xQiTn`HާuO1x,R+Dt_-K3pĉ-őGu "t#t:O糈 ס#:W^/=sX9%xVlsQx*DyJ"0)geΞoixය(w)>x}<(+,Fq4Ƌ#0T.+B%`T? Ta$blwfǖ$Xh Pj8=@b].c311'ݾ.bVC 2 x U)-!q]5s/VH.D ^O@MYܕ̬fC,/nl#Iv5`-vf2Pad-S 1:5b2!%ƨmj9%yxbCOQEl~r.RZFEDgo{hZ\Oʅa# gIو]HwVpM2\H"T/y:=ZJ =~u};Q VJdZ$9q5M ٵb.m&w,U1^u@ΈzUԱ\DZ > RŎ54]\8#*doV23Npw ٿ:|6ڵ;ckfMӺנ(t6[ceAwcM <vːA\$*@£T-K !i"Bl'-gTUD%iRB`7epi!ukd٤6Tp  DbUo;X8YٔXXBf7]$UD<[3Lf.6# ?:2"0tHURMgC LA5z-_B5MNIQŴoA4* lK*kg`@(:C! Pj6RJA#CܝfrJ08 <(྅Zb#"\kF>3,byTdFW!oq28DHeH)#Qpn"״6m&Q5ď AGYΰ*u ,2YFEZ"w슪$D͗|z!wY "gqh{8ɇ ]AcVq b˺ I(B-Xl)⸤ RVPGH6:-PGdӴ;IPWut/_z_ i+rY09(&Vy.VPdTKjcDvFPcmP |!O78n> U̍K F8tI U~àfGld PfzVFS Umob Cc% bM]Hk+N_žZ>2ԅDF6:l(byFXŪkTn]9"Ƹ$Cahu( < i!*YB 1M.I4KwZ? [X.OAK":xn4tBr-'iH1#}~c^_ x οo"* fc]Twi.)"-F!04'¬ s!ӧ lgԳQ33Z]>ZL{7ބBUȦچs'S} Hie`2BGfoP)ᨌ&CŌъM{}dɧRG*XX؍@F*%Gxm^Thɡ`Q} 4 qe(pt1<$)mƼjy<[ᑙW&XVlMY\1Ƶش5#>sVy^KXVxЫ!j$[euhD.duCz?EmR+EۼqYr$KxUE\Է7BCyTTrɠkc%Cd){1fhdm0YMv q8b3#x5ۋfa+ӊ$6S4TjI9>MQiZяd$UFy<`G&yصLᛩ/PB 7R]:ڌu q*hI7aBSIXaْ"7A|A럝~Gh]v[pK#xg*sm/2%tn \o_Kچu.BUibb8EƢ&+;#9:2 Z1nvBXZ6y*<,X cAA11­ DcLW4ݏ@/Hr5uS /}ͤu Esx)25j85@b(v&wr4I($깅&PƠ Z<5h8t{CyjWԙ%y{s4NT" ߤ2yA{VURIk2,. <[R5r:H/_DRufX.8!)EY54PmcrgU/o#'i)@d:=s؇;NiNqNζukC=ºo[=n*% i1I/ 2P9dHC$yJE# 3+)C&ܘhVv~ЎǠ|ТTC.둸"p,4WZ#"+ ΕlNbّX!%V4UFm4gN\> To)BA3Hn98"B6 Jh;4V6M uniC䖗յYfBFDX["UFȩ&e Ռ[:$I $ ~PRRr}5k`iĂF''mA!<1qn;$u&6<%Bw1l q5^x⣗\Y.>FBֻsrs2Vw/n JFI1I(@@:O3[pKI,Y95Ԑ2s]}(@l*r#I#uЖ~W~'5r[ɰ1*򂊈gz̰ڤywX%eltQFQ.D05f})h .koY%.E]Ie.\ru*]RɬAmhЙ:Rr5~W$_**{n\|3]ܒxMsk'\> 8O}pmsk'\> 8O}qJr=`?:bKhn,ŀID)1ц(H!RD[ 3^MlcXĪJmD8 :C/Vw*hD0 7i탅((w5K=lTp+.F`]T"E0!hqz~*kk#c`vBF,OW #U; h4MN@_)?'IC4ilLppsI>M=4+B0(DadS"jvNI7o@D"0F#PB 4cS-o*kf+v}Lcec?Rta*B&Ur.M[‘koh&9hs>hQ2N"q?yJY u# 0XUX6> Nk`el쭂}O V>'[+`el쭂}O V>'[+`el쭂}O V>'QƩ&yj|o3H$CQKk :KC2j7u $,$y2rK\f+t$ 0LB0f$%uF-݈) n䫆S EKm(x0uؙefMj&XCcj&2s{TP>cl1swf$-+31jް#*yM~2L6G^: #REYܽg3#DC6uE]u,B`HPaCvB?i}^.hLʀd_@/5i ]bw8aw`)`4x}pj+egcri//m+Vqol^]ypZ&My9H+nM]T$|PruܦPsAim3TGQpT\6Kf-ڄ7F(6D1``9/5ۂ4bY.[YL2H]U2A yByH%"]D9w3C1(OS6iriI^ Yt1 |,x լors:*8?:%Pu*š࠾MY$FRWFEvrkQ+܅YP+Oes;YǙ M>]F4⠶KiL@|(NT( b!s6:Nj.J[GUP`qqq]Y냛4=@/WHG61kyp#r76$!bB0i*S V}9l4A&rгH2;7M@.I] cCGSqEzRG,*JIqp*t\2##8aBic)Hf1| ;ь%wU:/Cyj ,2@t$AW2}'B_M|(TF5q,m.0UZɭ`Ldp@U\xh _ZCZ]Q`g ,'Qgߨd*V"OwiO\q({Nn"ʒ)+͌ѓ\ bڷ^w ƆWXQ6#H#* Ngd5E4T#]Z[FTM8Tr:2PֺqFHM/"|jDmA F2q$AV)E CG5~HY@:ZfEƮƜH2 kkZsF:0" h42Gd]tg 1.Z3-YR r=c V-Z2 Qю+PU9("Yr1j#Bu(s(B#J O[nNU%Ś6 6y ʓ\<O2,ů0pchq2MFA(bT&iq"2FHBl'-D3*N(ͽ1d,77䰩&G'KFNy[P*0@{Frݟ@/V5t"Uu*QsT0p$jdoJ4LhѕO ;*",z$RF9 i6P{U2[0Jʙ$%xFUQ"Z[KT63Z %4nolf0S!`мzJ;TdaIuuG#|籴K3cKi^YZgHMGu[48%cG:Ԍh }VkfHA8IS,JĐ 1 띍rĶ\+stnҹW7J\+stnҹW7Jh "מ6=!UTo$ %AOxNW/& RD$XP̒F4)#uGq>[*HrFї9 ʯJ.rG t#%i"J*KI_V wM2щvwG@D)bWǂ5bN!J͠G+iR#ѷ_ ʅ5˪I4;8 rp)-0:gӳ$c52T$F5gN /Į5| lx~& |@-K?l tykgm'BUЊZYcf 1$r&$DR~5$O +TErC jHNxΘ dLH*#v঒c/%IiSSlsV.ii4O2_"3`(Q8'Vʸ8=X wVm$Xy@&#HgbG) 2NpT`" [7{y8k.2<38/`ι̇M46t!$=Hp'}SF[e{us8߷~4iãFT'w\lRsxsU&@FBʶ\qTjL/'$8@%.vV8͠s8mU\䗵 97Ӹҍ*- xr<QS:W2Z֖Vq$ O r~ ߪ/n4va,:IBF8g5ŜF03!IsPGVOo5BHˬ2yA* 7-[tn|]1+i昩RTQsFp k"`s%3+SBeQK<)z &5E/ 5K`"J.sBQ!x#%f~!5ang $$2cd2UmV$yGZ']hXI-? TG'_Wvpмb ajce;OY*$3mUIsJΗ.2FƒkB0Ch#( T6:@/V+.BFI4qJBF+W XQ%*di"g:8F*$E8Ql<}$#s%"F.#2U8AUiHwrGaD.ApI]f-4ԑFf{}ZLe`XA-rü dBsH8ayf 9r!Fy 1l#p%>b"f((5pGAt-\LcK&ZlR7 1@*PJpCy9N븘IfLy MD G&7rCrAVAldž~PAo (6p*[MLIR)Cf$੦-4SFĂ@ 469aXlِrˆ՚aYWPᇌ6֪tz/Wy"y\9ҀĜ5M f;e;+}-D2󦔐 Ypn1"1M⡆LdStp&h/kZqB&JLBh& ԓQsKm\& ĄG1 ) ,x5K ]$D\Qc64F |EK3\\FiU[; 4J/k5V:rXKVm?C+omQ$%62 H;a+VuZmiUVuZmiUVuZmiUVuZmiUVuR9#U?!ABI [ h8sSI 5ʳ럝~G:InR3 W:LF%ڞ ym2 n ZKy$a(@U#H. ncE!1BDž]rX MF@]M(Bh.R(33R!5BO ƒElȧ7hM Rʫ%xNiA$2.IcIlw *bLre JBGTȸG+llK vhոr|ҽ?ن~#:bD2Өdta" *оs:|Ǹ} or;?sp9%zA:H"#yT& 'd~Th_g4YdDsxqԳvD8d c8],AHh°*ijNBvs\j+F_NHNFNM.QbŹ[{bOPbptmF?:{,EQ#؂ymsHY}FyJ0~dfBpI (BcAs?__s;2(de~NTZ (D76,b SI1nuž' -$72+I!fGl Vi~N)bȺ U͓ʂnN)E!jcSO 4}R|?Ʒ®UmkM Vay9 )s귩m۝!n׎$aƔiQlĬ滑^ZͪY2Uw-C.pʂ3; %Eif.&u*;`&0dd8 n tbpՂ4lQԆ >wLlw`1n~s\U 71*JQcF@,*P hM00(\O!ARRAGrW1n~sMhX{}#3z_pÆ2pd\C*rP{LOC;C^tNwgl,Fԉq9!HwvbXs)s9 )e. &GQ)FUFdȹ:T8=E~Y8#\"If4l:ɩyZ[XcvgP|j4X aer='繺wqV3W)t-,t4zxxjBvFS;~XHE'{'142m/!kxZ_'x_6w!đN?#伔B߆~CsE_ 2SO[(뉒S>ճL ڶqkXOaQ2+H$*2uW 2{ 8t>_B|Bˣh B%[E^S?`Tv|bhdmDcZEy2@G $:@gTqG,-#O׊{hR÷Hq+#^SOq,O"A[(-Ԝk"> _g$? 4-gDu@B3¢dVHTd#}uȮd$pP$^èԶ񼫎r AML AbWIt ^*)Լ$#!|vB2dyJ'ꭊi\q3FO4-I⇬T# ,o؟Ui3j'5͑'e$jS[Fdb2d"%d%"-RD8:p&B:`SLLGv=d]'Bb8<&&[DZ\SKXzhH;828sK$Vrjr %!9. Y}dHR\,QˤN1g GGE+S ` V̂]yG|8Һ8+0r]ZveI K#?GW\42|T VwI;<7/(#HgcuyElN-ɫ.`Sl&TYR;=btPs2/NeWY&'=(~rWpΒ YCuLiK| 3q( MHfF\q]Y,m؅kv\K0[i'{j9ݛvK7ihz?WQe!#"^iX hs/%efCg+`d Ici`򑨖=x?:@;1![ Q0Vpn#ysN nߔW,_F 3Qe'$'؋QmeXvmڲ /\ݦOa=྅]+[`u+8sNqĸm Ghhn@\ۋi6\Ǖ#FCGĩ4#G)\z#j05 =#iwP|> R<2SrnJ1|3GLN'+"@x05ŷuK彑?t~͡)C.r&.c;= 4hDQJ V4mUM֝k#A[D'WjB"Hyx>leI8$dVy ŒF3[-YÐC@5ĭ ;WY]2qӓSK|aK4G948bF3[-YÐC@4HK攇~\FLDcY,m DC6/&Mb0DK[GG>bd_xOu+_1n)$t޹gt 6}:k X.TE00Okq#u1:H#gLFu.ŭ`KA ^(nQ"(ђXhv@`0Iެۂ3ƌTm;x$#DOxxf[s3\en=`WȹKh/ff/il4; gPW##HP[2[z!Cr@d ѽ&FQ[':WXuFQ̀l@^'v$qNSC)K:S1$i\1w%I&[q^aIdq剹;dq9(dYrwX5U)*imbo#ȚG9 #2EP;PEO;Na@P]_CΙd\!Ktolv1km7T4@ Z)fW!əh* ;,Ӂ)!4ѕlj^Sgh .$_υ˟hKCz]'ɚqG&?: 6`U5m "#\DdPԶ֐!dS c`_IlYBFv cI`XOa^"G> REVV]U` hյ-%9DH$ X qX6ldBLdӬB̤M_g$? 5cYV5};BKSO[(뉒S>1[47"H8"+hΎ4VFm" /5Fg)&-_'d:?Cyj%!cĄb 4qCyvp-iqW)O\TɄC^¦$tnV TI=!gyhH4 +i QB(Coi/U5AE-8 H4"P:A)DTvͫ\ ־΄?]CH^H/u_n?:s9ϖ4ՆGP8T%:+ 5w,'Ř3iCuGl ~C59|ЭL4 Q\M rG9QO4Lc BG9sy"|f0evwnILjقs [JNKCl'rʽeT.MkmY+zZ4KuzYN#Jz]۬k)d7~u}*o"s.b:kg&{{f>#9wJty8y>X3fځc&& !0#Lάlj`ʙ1́*F^9LC} GD-X0TdPrEsc,΁MC%2N F 0 |MPۘ9VbDWqr |j V# Rbnuu]1NB&IL"1p|. wDiWQ$z(H,I=+ ,:?KDL+{m97"c59%5 VH=>"|@[:W aa"LG3ʔ!N)3p'揫N(ɠϧv?EjMj򸐄:HE[jB,Q#F7. ˩C$:pGV!y\ۿFtפppf G!%OI>t8k\N}=7~u})e܀$sFm. 7EZMR r=c VL6ɽbe$RpNpc&vbK 9*F1 N\2&te(\>ww)HE ځMo,b]gM}zHW {VW /yTv(c1l TJWb 1 .3$RK`8ԺA4ilg2?8ZsVPq"dEkx$ VC"l+jȫyc+ow mVehW$y1 :`>Ux5 :W[@ϪYHmrjnSH8q7.zZ7ab 2G((üa SyeW))f0\~Eԁ1%YWD3I]Ta9Y$˶t.Yfpj;=g' ٿj8},x*lX#*ÔRqdJGo7VhPb&.HJJ1*Ô'> *⌌RQʎ#u<@t^Gkٙ Z_4ӹΐYpeXPgNwT"r|$ kIې犚TߙT04U18#J#քivm &`~_B6zm4RsX|1FeL͌`J-3i2iaFiDcI:{1"KʒHoг,zdY|ؔHb˸=h߀.r3ƦHΜ$N4B! *^dBeTWyX.%>.P&4ˎ P ol9 )sk΢1,j K$<0Y B+ :S>P;cR ' j4Dё-C`!G|lŤh`̒\ɨm 1D#&6 ĉ듹)iDFf\c*璮@!_qHe6 jGwF߇Qk-Ԝ!ϛ 8,LHߍ` xeFnek廿#Z>j}s4:/P ~~Lc"`3b/_B*$mX{dS)FK][\iO^82T$F5gNchV};2F8VϽHo+Ď0\rdPibpbdz":L"IP5! L::Ien^ZfhO80YZc "I"U*yN6ۘ|V7Imqk$pfgz-] Dnȁ085sx׳30[&ruVAӔ"gEtlA[I fk`8)ﳦ'JZatneK11"DrMf\0OMt_9_@͸)\~#nai:8Z{yg0O2":79 T@jE-dAA10ps0:b>h8<'dLC(KZ L?),1"T2H1w:OAӼE jFj90J +մ`r}Zew-G9ӔT\ۉ<.C95g}m8qHWQtΐԺ ?etOӨm. 3ǹ}3V%Q*0 *2Ǫu3ؒ92J"pgb+.T2`T] C-/~u};T!U_cZto:Sx@/ZI!xKU2QA!(А^GPH_9 (8^Z-grB0O.QC)$(ظSPrfkDb4MqXV$0E,SK:7i$*!(hN 7.t()k;|wTb(' $F\dx;tKAbGl. |uT@e : ^IDۧx/% L:-;οo<3>y9:VsKsw>F|ke;fNLq͒7+)yx#}m&0ѫd*0(%[0 ch X yW'0svz+L4"Xgspu#xhy5۝4Hы,QSB H -<-h,'YMos6SdFqBCLQ 1V X"V2U *f*DPP%²a٭gFe4, "VHomQJx:WXcO8QŨ67ʰ-U3a$r_)pG%y[15ŜF03@.QKpX*CFH`*Kt^#}9??C[g&?: 9`)cY*,Mo # Yhw;uw>_CiY" <|(9S2jֲk=`0$|%\("Fs 7tx2$(b / ,OF D⍎ :cХIwTëyh]Ql VyZ[y^є"w`2HY3/hPQo^K tIcWlgInGI\˯^#:Fj墖lNY Mc#G-X򙰸d.1@PJxj "C>LbI:UOZRR(A1gCnD6Q)@@!+NrL,{vJo KdE-ݺQlcb"8 (Nbiśk1,p o=e ՃL2Lr KANQ!T `l< ţgApMi4iSH e]F(߈dUǔ2yb ;0,]C]rHeJ}྅,i$mBP˨2Im)SctЁK9"4cb("$IJ6TPdT1q&x $e1v'MMDT%)s."TSg SO+g(~nRK5L*+9̨M_Gmq $E )#ajjUswEd%p$2ظ%xĄ5iki->d .9³Ք6+<3ythThP(w 7j8mQ]Q{҃©$p`P˼QEj'\<hZB<1R{* YjXGhՔتv ߼Y[=7W, #$#c9э4 G1H\“>0X0ND`BכI$9]VJ@ `N$F5"&A(?$1)IK8 'y&kȕap6yS#&̡:̎z'x/\;a?t~.gi~C(ۂN_EEq41InE=Jau} :շ(;è 9$u4exjkt۽ݸVE$#ƤˬF0Os=yF)ߤ2¬z!w>_BGXDU4.fYf΢&$D'Y9C 8TS$77B$ Zn,%󒒣ώhd'9buuUܖ m.PIA8ݣ *inf<m5 yFtKYVHD"J,9L$rrc,iʼnx .8n 2*imd>#iQd`A>|s |\RM,r89/DL$ߝ&pNvԍM4<Ȭ;]To&$I^i!s! yiyV`G iuk[ R7E̹`t`魜iIV2,SH 5xN`5dս*9#ySp;T[S:ȭuĒxjroER)äjrJj[y7 C3iqP$FXMR>I&A,fB蒉9*2S;9\8 bet])cK1=dt^C W4 ו z_TI (6l\!`hц H,*[klL\2P MqjMYEDGr{e *l(h +A*k`1Ќӯ$b6f"UrPX^QZf h[z)]Ǖu ^L(PFɃ%pGl!˱%5{b;T(a**?ֳODKʲ QT2A4Ghhn@\ۋh2WĄ!Ӏ2+\\݌D2+Q|4s1N+W/ #ЊxP+hݼh2C!$ [ڤxpQ]HlC"$R+jy7QeΜMwn9<,1>N a#5$I9'xp,t~-vsȊ`4I.?)#JJ t?6^B^?:v,<@lp+M6{mM6{mM6{mM6bXjp|ШL! yB46\ [,H5 )W#$cv|dhZkf jc3iժWAL4qHѿ9w@"$Jy x)%`ˀ@ Woe#TKy|G XX dB9 Ɏ@It-\LcK&-3k܅;3 #WUltUѡP7x@ i-XֿXo-2m+*8i9R<}^.#h?t_Uԯ H$ \rqS[T;%<UeupX{i @# oaW#`~ V̑<Yd ! 5-ȍn#\9 {Uմ K6Р4g26Iޥ?]mSοoB8Ub$:79*nӒ7BqgK;óֈ>S*DXv9%ѡ <2i0i]g5^[ Rlem5k; WS6<ǡ#_-QKmdVm'xuBDa| xZp۪"IqqlȧT-S6'Nd'Jh^Bl'DЮ[VLp<"c":LdUԑ\^-@, fe&_H+OxngII럝~r}س:vM0 H$M4>35$qh* bV'{idu$ uE# It UAq=lֺ |2oEܭBIeFMesg8!)#7jkٙgyƭ/rxGMZoi^G|1fm !kO1cɆ=tZPM0nփ\o2sRO i( OXj* rWS$$ٶ;&JS4lf3p^^9jX8[BlcQ'\*jЊKHA4Fg :Gz]萵yQ=ֿzO\/VUb dѫrU…j!$5+Y6}g9^ZKL]H8 KXsMDWcSmݕ i I 33lз ԣ"ԈaBD5xUń_B\d "T=g滑x"'I:O'~uޯ/rɠe'},XEp wN(Fȑ.QTwX- IyVlmd$dt~A-?}87G럝~淫Q>?B{$6/6 \իƒ%۸M!ybnLA)e2!72oCpAw=6*LW<KAFq$ۑnx" %ơ0{XG,SQ 3.$菿H:*k׸Yĥ&'H$\D=Ȋ0:# 1 -t~>d!AF#Cr){oSx ;+u|՞~xNz_`?:cjE*T!j?( OK6sF`Q:Amw-&H$pVN'GFEkC\RYROvf_ ?6.!Ѻ7H5x/doOG럝~6NA@ƣSy2$R pkI:yղ:'POn0 4yֽDL/#^2N+2 $}=7mοoN k g9PGZpb_#7t@T&2tH۩(+DNDS!n=r?:r0yxmVJo ~MF@nϕ| G&_$~nƗ!$VŞ1ɏx!ې>+n:+u8U@;llll:h`o͟o>!?pV,]/𯒢@-Me"1:E-w>'T0ie#s#R0GQ,gAx-s>խκ0NT2G3v۫ .V6QgI]5clq*[a ,PmhmfL ~l1HH5:'iד̺ED|t^;k.P_Q펟4z( 6KGt^W"a:>f1̨F14^u*,؍ \4 1W tKEjI)`g|:=rZܛn툖2L Jm$yǷh.xNBwhRYplAku=7Ϲ8*+t^z1mq'Qwdqm2~u Tgo)?kv>f wRƺyubܟ>mŬ6:/\s{p_d)*">t/24Iyfo,K9"$QGT=7ז?)MbR=OE~o}o521G]4$wIE,k#g#=בz\sy6[H+J%~ _W+J%~ _T,d 8cAi|¤ -Ȼjf2L~|>zB~Ot[|h:\s@wSYX 'nNq ܠ'Q=wɍ,dq"ޑy'Ù%40c#+qn)>W#%ԏ|HVBGC|OʂD7 #B$/ù n1<۝Fǜiƽ:r-VkR ڼ>i7ոyP[`m2ӫr78|_3L*[DJKXC-&@Ɔ"cJUhq8 :qi~(n{o7y]{'W.fq~ P)˴>|Ig=t^;Lq>ۧ[tknmm}O>ڂkvpsL.(Tfbl>ۧ[tknmG"K{}ocTq,z;V^y1jx<~j>!\D1x- 2z9 2I'e# 8W.5.8doc^:u;{}oyX(x\ Wl$g`,K@Ğ;)}H3at5C_: ausEF9 (QS"!ѵ,3Z kgQ4r%Yo*6Vຓ#4!]oͲ6Pq\ˢ'OTm4ܡ;٫HVɤLdM"xb!2ѫ'FxfmL4(3IHH`i :@>@gե{GFD#] @ |M] .I&?Nȃ&̘mZC|O IhH$LBkzsZqÜgZΥLz73C,M.K7W9D.a$d4ET]!$ 9:z>)(Ն7r> bڻlYf$f.&  N7XZyBUʂWl.axե{3AKf<s\s5 *FI!ZEϽ+`0"UDQ#P ø0BW:V7$nvokvok;hکMXP~w5;s\s5;s\7\cy6h +w W;s\s5;sF"&/$W;s\s5;s\$DtbFB7v=sωVFƖn+oVz/T2 m಩RE\۴\gO-d?Vq=*luwd0Qiy4)*w1 k%}l$U;XɧN *BqunɮWtHkہ*|B4t.a1+Q#D.#$@XeN5e#nw"48 eTGyѻo].&)|2\Juuy5۝4G4{YH*:9qitCjD$L^%($PJLWHgpd5\hAhOCI)`3dxЃHIVomp g )Ѣk'R6+9~`&Uy9vN&^TL,3:z4V m˛|_$Qfĭ#'׊Üo?4I0?'ӍAp-|J n|O 8&jˌ iTjƜhDm . qf$WPF N`52F!1#x 8 2!"HP8` }5HCv)Y IBGwQ=t2|xB  Vl.5VJr "o3_Fcu}cVW9VZjVZjV +PBu.rKo ZDcsøc̎ |+PB +PB )N$ )NpzE6Hg!;i$Vg Q-0ug8Jn2aq;w4(Dȉ &,aѫFxf:yK#DG]FƘ9Sz{}o)K k*&,N q# ƚ! b(aR 8i~(M9ȶ22͹l$ |}:_,3c%y?),R+oxOt^?(rj[Y؇UdIA F>h x{y 6ŀp4+e%Bb 䱊7-,*&$IyH1m桵x'I@Ѹq\ c6B4TqRB$>2¯1.-&RI +QM6SZ9M1DmUug,"żj?ai~(ŎG4n3VW7jQ39XmmuDnT6ҬbRVɵcNn~j,>ʴAx#eOJ"Ĩd8f Yv%*x DG@}sωLダEw鲺cɫ݇!UUFI$28ZX_x63^:s=ǫKG&dee<* p~KykziInR#q3^]xד_ؕ_"4yb%+x$fRO:ɓy*.|'hH)s}C|RI"or[:kxkeƥp]~r)/GnWVZ޴O5kl4 }t˱1>^2*%~2@ƠGE)f3)TFؘtvP~Ri*h|jkq/cXi\'"OxOHb_4F?bB6_ۃqqC=r/C>fV#CPyg|o5:|N[W$:W$AX"$_U[g{|#m؏Qݛ2`OD(:# L_tk:S)r63:ο9 :VeӲUp](#` ?X[j7Z~]-"lbI?wdCn`֑075sMͬQUM S<1Π6`xԐ# 1$c@0gե{~k7X>'t"OJjL%>}#I! WR$-Gv߻Iavj5O`Ij;xZa$T౯@FF*/sRD9V+hȅ,D]Dd C?NqC~o~+RKo(#x0zUnPx4]:!#5cU̶CHۘ rmڳ~CƯOޑ>DE?ZVRlOwnft;+0>|O em%TD0I$_zƳF$mYM*R3(X GAo($T[6 / lS;%cFK.l U-VHi5(Bi7ُܾ٘+a p$#q Ȧ&Qddݽ02/,on"a?~X)  N\g $`>"jC3i:y3WvI^3z 72J x }o-էo. ͊<$Uޯl0~M xŪ5A)6k涿Nr k@R1$|xP7o _DIOH4;C qFbbqMx|)x28=׊hƸϴ|0G 7he?N2 Gt^?*+բT`9߾;bA+_#55;2fIqĐ`fPqinY.2 0{]u3^sR3`I vmI2"j)c_6&̀lvw$'!a8_%o'- Vެ82*èϼ}s+\>ϼy;3(\):%8"ϼ}s+\>ϼ}s*Yd(߄+l#ς jF 6+1}2i@UP8fgx͍`2ԕɀGirҷ2(!XOs g\V?ϝ?c5 2x)=gN#>y1pw<Dl8~II^ʰ7g݅itBF_ep8q+_=dL Xձ"L 9|P:V$Ob/YsrGi@E5tq?W̸ԬTyI @'jv2MJ0pO)K-ğ)Q ʐ?qKXX{>7#$Q~x'g*OԇGr뒬82*GQ&'Ay>iUx)8[3[\~\Y%ZT2(|N>CP^nHJ0(2EGfXd-$@@?t^l$EGu&ʓ rwDM0*#T6i*XBTh"ϱtHLoڧx9xX27qǺuΖO#*CQy>i∦֜VABGNtǥ5ʉe4H<ۭl@H*|GaMׂM <1" ~`::#;3 |K=q"m..aTZ!椎9U)j+kܐX?*VwҸ_$QS"9FgwB(T-2rHa0>yοi[ıj+{he {tp~eJhИVXYs² -m{cU-I(o[&P4W5'j(V/mה"R%~ɐ oOP[ߧHd>^r}'N} '%[O4EKfS9.쯵lrvt>=؝sH5 9bCƍf"4c{(̓I\o.l|^68QVOeo*&j5P#TVdpi5!,’Fi" &#o8?27g.Zx;EO3񤋨wʺq7X8sSq~EΖOGQ! CsO@|x)N8ʄfEX̆6| ҜX#t>0U\n789hdxѸuFLRcnR_*ҜVtZ&|O^g x|1{$>y%rff`~hXmb?>uwQZD b4PAxl~IK![D>sΗ!E^h'Sef7+ +O+c\Lw.`Wua<=b@~vuO{yQQ+;ѾKo!O J??va_6b5[IjTrl.ϛ35L<*S5XR/>67?kϱW쑉YL|v%Oi,+KW A?qVwVƉ?xRFp|G}o ˏ!^HI߫SpPm8(>:7({BԊTxGY'ps |@eswXԖj:#^&y]Yݎf;=|#[|J&R;0;?>mH<ΜenLrP~E OS+$;U(w@|x)N8ʄ5bBog5`v7x NUJ \t30NSTm!cߒq~UAy\iOcƢ #}s Uk\i- \zحĭ\b^ɳ;5M~3rm1Y1ayʕzviK|/>/zy%~,'Ny$q92ӐYo뮨\}GFAէO;yrz{Bч 3IJ`jXcL\fylaS\te'73\ `Q`|SA If{wG4CGBp|ws’85cw=A%rW*>V/o'V_㩭XǞ[c* U8wM=LCn>./e-ɳPĴOU_ $<N/y;cQq߻o+$$y'y/–W:R5CoE~ GW- ו?KHt9GNgˉ%6# G5^JʾT'Xa>8aAS%ϙ.Tz7dϐQnjRq7Z9 жq7Qpg&~pdG ۷3|:/\g`Q5Z܏Zq\|jIOWy˚2O'8oUi.gope"ˏ "57Ϩ('q[?5(D8Dm笞u@~b;g-G+MMǦǺ T.B`.SAa#ٹ>60)[$P 9\pcBP&>d!5lOrl'rwMѴcjE|<1g%e_Fֲ1&z?ofU6@{̿'qS:FU$nC4FABn1%{#xze;ZBk{"xH&E(XdJ3JpZ+rze9d#F8G0gsKVK3wŎܯžy_91Pu(.qʘdƴ9VVys<|ߨ5ac=Ip)?\Q{:8ne@|e9]q Jg1S6iwxhxT"!G+\tQ8qp#tclk0~ ?)`[ yPMB.u9uSRK#7[OD l72FX]Z-X475<\ ¢^.5H .g:>`N|~"2(qݨɌ*~)4'nrlFFB\7;Dr?" IG +L>99GUrrRz)\" )+C*pͷNQ0v Ci|>2xwg Ȃ#3'`*I>_'P9~WA!j<-OuhB4)(B>6d4u ã!agXʉNWD mE0>Xx%U9YGpy:;ov9Zbt21ۃ@$Nh Q7q%P#GQ2k%?r}c5by{KQtat=HI:Q2c-ݸ-zi>azJ*dZ=} F,h>f.|^yb8'WSw}&%Q1J(|o>y=m8,CȀvc8li ߒ)UQU eq"9#sHpmĎSJQDQqkv{-'?1*CM3Hʌ=u~7p/h?a?{w'g5 zE1^5q1!sN(h 'FjǥCG3\!. }Rj#㉏ȾT$x&[|;' Es&L ^T8a0dqc݆%>Ra39ʬq_7Z \BD?QJIEG[UA-7𯖮`F@FCݒ2,N^׬N,{;OC+ERj3qLy]6"wdZ»qsmLr19gsNX8k:{g< f;4DJ#OɈnۙwPcLryAw`d:|SHc< |xR`Ԡȣ|,|iSVlqky{(ocoPK1I8'sP{ pgQw/4s?X6s&oYp&(;ogڞ? ~*Uǝ:/\`x5Ĝ"zV܎8a'M#ґgbrpXPWV\@eM"n Tuu"v~L`m]5I"0@?(QLMo4?z :Ww}@nO'K|8GS0#xl?ߞ#;ՇX;CԣuO3CyId+y$}ȩgqC|L2as;;omҹW8J V5Ę'g$d|bP:ȱI2vW{39 3N(%sp^rGg#C IH9'~{Bd7 \+%ssu <'?/ _!YJw]Zl!db>eH^1nW8J Fu:Ź2(]` \(\%8+!*qAN ^ʐ ,I,@\+%<-G*7[U߁:7u7q8FI0Xq=}|W8J \(Wdg=IБ ~p= t6*@ sQ\M]zIW8J \)$ AʲE9Mi ǭ6%H%5i;A뤕cyxqpW8JI-a{?*傹?l?{}逢a) rs[E唸|WgIVqWAFEFYJeE7H=ci:u;^ǬkMtNwfE.FJrNAAcbҳ_25d;>5@z|`! K"L&0>AM"dUʄF D⍎ :if.&eF{N0c;LUīg pE27d EwJ=9 2I'!N R=n=sm1Ɗ0 ]K@ HFK3hr}::R1I22CQ!/gW{s2:s22 >bAҏ:k#Z {\Y9TcZR+ Au <{$0\#Rt`rw_&w r1} iI(e)9m=xaʺy9-:9k?*9{M[U؅C%7Э,u@/ ~vOGHŤcLݟo|~1XV&-ߠ=ѺA'e:p0-Ԏ-#uAw,y3FCj/IB?7qV؅OF >sԫi2kbO~@ PI$y)/&WamvŽ*k$HgwM"tezx:pDSҬ4@p{0#f5!= ark0rpmu-JLtLEO iN؋o,IcTFAߎlEo/ImMxӤ(/ 8GWų&hɑϣGf4C&{VW$mJFCx̼_ JfyzIblۂլ9'Nb?2{mpՀV }Ƿj:G&5i" >#+q$jMWhe5oDd?<Dz[5oDJg,cQ5=Lj $ʹs2PD.# ]xyu-ݠG/4)#DRI"Ĉ"jrLj $ʹs20zb-@'8k߆L`$( QSB4gP$RD`˩#21;Ul?guDq[%1ãbÎjKh8BZNFd0 27R[FXc 3*;h$d9  iMr7cOk Hǧ`I5qʙr 0!ƪ6|jmyW0J<Y:SUL5 PN7e XO' ljHv[gPV<WX\XyHDz0\cq4m&5S[FgpĄj9dcap"+T UCqE a֪9#\tnF5-2 !VC G6PȡR38 8l4lR(Fqljbέ:s"(ô 1nxօ $GATJ|aUUr3|[N{n#ލ<u"ɂx>;qȨ" GQ\ [FXckVAD Aob@zH{xf`!MƉ 4"P:A)DS[d3.5G$` q8 b$El3@`nR[Bw*춎,W @&3R*# PH5,Jn$b"fSJAm b(q6_qyAFj(4B"53#!#'A5r>  5F:s )WY T=ϖUJ ~q$J;T)b@h )*%)KtR`% TqfI5 .q]Ϸ RQeshAwpshʊF#ŕǎ9TJ\?eshͣ*4=x\?4%#.~5o~W6mQE~e/'$@׃\?eshͣ+GW60CKshͻc6m\?eshͣ+GW6줁ݰH=Dͣ+GW6m\?eM;\~ͣ+GW6쨡D8=YP;:y shͣ+GR@@:նaи ]$ĨW6m\?eshͣ+GW6m\?eshͣ+GW6쨡DbIqJEw,x )Qvܶӱ<"sO/†hθ_*DgRx~({ 9@mSýsΗ{ 7cRƼSe<ɝ#;10=?n^o|jь~l?dtFϷsZc$Αн24 p5#sҭp!pbLˑF\.*`! <[;#m7^vk(C lĊ{7Fr$#?PHIw4BHE5Ja4+OPԸ[Ka㹏tLD'Pm{ʀ c;džE/d{]?&f w_xMi7͹d'q_@7EsO"fAE?NRpE.Jx7U,SXgVCyIv\щ\f4[JRCOg5}efEս`PŸ˯j_mIso1v2qN ^CCƧ ;7Do7ۏ==)ތ8ެ:~zK!yr=u8/]71&/鼆"{>c}rNE^dJ)Grd8~Vo ]N ]~/1VVR;eI6'r800#hF#z*ƭ[D>I<>k*[^VJBXvd윟63\wi&OatO\ۮQ6gjF Gq4K'VKeӲT}{tw*[W7 r}7L`j?xˣ VUOa oVDogˆ#'/sh9Y" |$:έZRo ΋:_)zo4xnX2toyx!Fʣ>c}z^^SptF2+đ#M}|Bw|" ?9!"L1)w9 ZB$=g5!o.̸lNz}OoOL:ua#O8ϘM̏;{*_fOa؈$gؔE:a/6O*(?ػ躀ҤITh|WA4VwWEVJrԑGD[ʴ6 W<յI?k饳QiQVRod W4RXrJC~$eyKu2=3_Z$BHX9a#.s74(p 3d` sJS9ؒ{>;9(Ğp oVDo=='aXì㻒0](<ِG\| y5Xa+$M7;͢YV9u9t?P%9@AQdzLVjHDϚK(WiSB64iq\ Y/Ԍ$eW'HmI>6XL!CTa8΋:_<7F/^õt߮#׳30Ϸ?(OV:c)M+%|#?\'Ek[Ien n[6&ֿW_bj>(ǥu A*g7=H%A9PVB;8EЄ t#szKZM-cҟ(Y5_O(BDeK0ݟo>7sS^xY$iO3t~EORN4VNx8*dUV|SiK#jq8})CP ?Iuu*VPi]%XqGS:$ p rbx$ lIUSz:irѕK 2}uQK*C\F7n#jCOF7pYNc}zn^T)麶Vv>2e1XA.EqqS"1 x;ؗ&F:R4l@i#P{.!a$M0xNAuEp Hr{BN52M;wTO_{@Tt?='a@IFN>xdqц aX$o=* ہ\uOe}#Qdܱ;c?BD !9A3j6 X#^]x<Ɣ; pdCFTߋAd`Gst5.$5z;9[t( MJ&?[Gkhm31 A5?[Gkhm,hm$O=FM~)so;o>{{?tFpb}'8ns(阑oi pJ!ÉxGQqD>H(Y=d'NF$z F勁UqFS;G"WSk|v+8/VS鶼=ge/]L4o'qF8t$dz/hNt=ʞVLkJSFAǮt^Y yCt6V}=6gwCvewEEm)ijQĪXuι][a(tl*Fm,7BFd*jdV#sgSAV+CS-uYV# V(#wlQ eh0<TUV+Ce20<5:´?ZRO C?BXSqŽZV+Cea-Ϻ/jl Tn$rO h?ZQF9F#Sep`czw>ljn`gӇ<`w^BǦCٞMԒ}8zWy P :8 >z!q)zo=e*So-ijȘ`]i/eʶΗJ }Y7V)*1DYKW_3]:\-O-Mx@?+ql.PsiT :럳lA>6m?)l]q O&WVHsr"D4.T,}S27#&NwW[G}ȉf|ި=VgKU@!K别˦K|Su(Ebkfܼu~QLu8r%XˇׄpsW+#Is0\0FFPȷ #vQGM$iiFv%/ (agvI H!g$k-%Xnd.@|W*<ۉehfp_6s1"C?N$C36:%x!THWK}Y$t)g{l̜z r c2DHAQ兽Ο7&;DsЋ+P 4NZfcAiĆHI0VCBI&Ma)MQ29%_#Yܷk/ wTKk_mICv+Qβ%>C!xf&'ijWY=gU\)42,4d4uOg=j;\sk;=9 O Tܟy3= pJ8Ia׎a B9uղ4)4rŴPʾA^&[ Ҧ0C5n&tTߊXi\'y58P$AC1'*heFj"ehZ\ߤфOF˚e nBM-Y^@[0S{bjˀrKn%$HNxШ!7hϲ@Y)\H-mYv!YAwBz&6h.M6sYӳf!ڪhqΡ 'sۘ.@D1J 2o;PU:[uwX _0 EI\2< BMaw=:H s8 Oay?P$Y ҼgMijՍ 5TI Ut Ku:ihszAAyw%HD3et5%_4)ncS ąs(UxBECay;d&xu$&`)@4֗<!؇ %Xb{c!VI1Iv/SVMEW1IU q84ʦ<,u2rwi)YxdL0D jF[xnF HNtμ&B'n T0%i ON#X[̩F6tk};o=e*@ëFp§]T](OR8),N-ۈ#;SnŲl Z;P۴@[E.y\HB8"Wb p3 #ψI#cQIR@*G723Ȥ! ~/Q ʓb4 9mTW-D|XeeUۑhUEN@6CTbC($BqZ~IV#|F8[RX(qLE4(&8QeΜPN L2W8=VTdʒ. :8 WI8a&uT6i* )da.p bqFt8-ǝM7UFnPFb!u 7aWIbF5 X@P5s6NwR5`w5yV9bף7 tBca"8žo*Ve 'u[@g5hbAWHPXE n˭&.8#&B˷~鯯Ijmm]*q42~}lW$Fܶ#+x̒cQ$~%7Oqt~C"DIJB΢LE#Yj&CFr>mM#I 6nlЂ'{ JG9SHM[>x0AT֢Q' HT Qa{bLQ6N:NH(QEhI 1FcQʜts1lBtK0ǡ^}``Xl^:s5eheQuBuRw;Lj][,:qeW֌A0iNB&djhbᰥiz$UMC &S<@8C XjgBTHnR^I#NK*X/+wΚHG6&t0ԵnJ1|3GD8ܸ<31 wn$I!Q\D+gI$w c crF*g;!",C9YJ$'91p鑜0V4 #LpR D|e4@,Xd}j_|<#* HrcSzc)+)Ԭȭ6{mMJ!3p@'˽aEPK 8EiUVouZmiUVouZmiUVouZmJWQ + <p 8mM6{m] #T'46 G Iǔ֛ouZmiUVouZmiUT2rB'-b(?UӨƥÆ"{mM6{mM6ـI`"D6,\8F5VouZmiUVouZmKCcVjqx8mM6{mM6 Dze ,FVouZmiUVouZm9'q## 6{mM6{mmB #T Qq5VouZmiUVouNru39=VyP|ݟҟZg=sΌp5VouZmiUT065`FOCn{C`TpdyMiUVouR%]^P ҋsi+?k09QM9gw:Ш&6<%Bw1l e3.rXyn m4CM5T.$Imyeu^A #cp yЧ9rEVa|vü܊KcD#P=jwU+w,д/ clɢ$ Gq؉!}5/+Kka lFnS+FT)95,\D^Y`o&ZC;:q \lH8KWLud؀*`,Gȭ6{m]P+h8={&PrmZmiUVouZmiUVouXCT`NYNe>0FEiUVouZmiU6fK&N7}ǷumRef>Ş)e̿ce|CNY[ͩ; pTdM#p?F%P{\s½~aѺF:x(z$MEM*xT$upEF3 M%jLPՆ;-Y[W9{xgEhMuhNwJDθi&i +8i-W`zά1QƼDkeE  @DBgzKx4bI%TsQ@IbY&\ʲ'+1#Y䗋LI{}'2o$+<f pڢǤ~hLW '60d>zyBniB2XQ@:;~|A= XI4h8Q2d :$e{8`eh^XDB:*'EiB'Y= hͬЂYSMDYlM:GErj[\tIy,NMD@$ 8 J6!1/&u Oogr$;2*(8Vv=FP"V*aL6ɨk0L+j3;.w H4A&\Ҳ}ǷxU*kqnLRmOCID[hFqN,NY=gUFI>AN0YψKsxWg(Hw6>.}Ф&#eeAsIVՐ:&9&Tt`qH#eq%@쓍XF{Da :/o2h4~]FCx{~Ĉ2|ML@v7ƷTD\;=F>1[y#/uxWVsHNGX&_8H:eX+.>Q\=n>sۏ\=;^>!vsۏ\=n>sۏFҌP08aĞQ̭sn2Ƿ{YH@8=\͓~eMrtOxA12Er†hmZ1IKtTl}g6(s(n@zE/]63nuNL38{}oQF㓃R_:˱CHȞU~"$?ސ_݂XǛ\vPXۖ J0c'~w7G2';??wxWӊ9?{lL_N1|p#X]yșYeJ28R:Hrcgр{81"T?%)UFpOo224X/g o4RJWbOUrj)/[>,[Z'<بb?Y|>XNpH03O:/\eG6v1Kt= `|9.0r|g$>CK=ϳ FUFI$a'&B:|+_{Wy,6H_83qP-[Gy3.yCSD&jj 41Y-Qؼ |uZF&j.F\/s=uQ<&21G>ST~_S''MFUTp΋<+/L^o-`=5 HܸpANvcv'19c~(߀]1,r@G 8,"Oja+ cN=9{ ~Ra~6梷w BaOgX_ <|lz\xo٫2Kx`:/\Ϳ'U˟ǏRx}~S3Hi~G<1~)߀|)GOFY"^DcV8GWy.QXRsV d Wp@LI<ŚVoj…ѝTCۅ>'tH~ O˼y"C=^Hg#ǑxWф20A4' 2m=iQ1ª$N0a__ǷF* 8m?[Gkhmm.~:?[Gkhmm?[Gkhmmgg uoZwm%'&q%rIZoַk[mkZoַk[mkZoַk[]UÕ'p \䚷G$TJsI׍3<?2Ws݃ #@8 mkZoַk[mkZoַk[m;3.H`zUG^z~ 5CHS$H4KcG{uGw~cRnn.IrR[hMo<146r%ᎣN"rk OȪmc&"D15͂5U,qA4W.\݃WqƮ$iF\GOO' 2pͿV[okUV[ڔi$ yV2jok6Fd5ͶFePElF$:΀{jbѱ.cy] Q-(|LR3ć:B|wW9#RvТjξT sG- \XfFT HBkDBInJIM)O 7DMAC!9PU1mR sVcLC} CSY!K;3\tLI !5m3Cj }FU}4W ⠉c]DƂFMr:`L.'ɧļ `Ѯc*f@0[UA[kxc#H I5gAXtrR @I|5$Hm!> K!)Gj$$$$W@g'vp$CޖP lC0qNdP`٩l6r; V V2|+SӍ_8r:j(#Dq$*ObN%\9 ŊDY:1P6b \9P86M$q@pqU:h8dp xZ@ # ߎ[okUV[okUV#bӻEN!j֫{Zj֫{ZK pcgzD U,W1KJ0 2 B#iZ'@Hs'R 4E@8lArs_xܥ"a*M+WG s)N$@$Dn:4Jud_5LrH i,#2](܃42g I˴,] q銸bL Ĉf\ى`q'Zj1ƠG\xI5F+Ccf[NucNs՜=3z&MHvUp2k Z!M &:\V*˲ [ h3b, ~)Ufi!m޲ fҵu$@C'MD9:Q$g*DX|ucq82s#dbMruw5a$'DHܒ{L&rS,c_ lg\dC#kL`#ZޠFCn0mAԛWm4.pC,W-!`Z.C)I= {[1ΓN!լKA 56\>ѣ D]$P2ژ3&;Oq|_FrTE7EB, t9iEé󊵼I={tȠ͡)C$ᨳXdΞ8%nN"-8 hk5#8x/nۜ)*TuU2GpA]bErRhdt8p<:/\& *$xVQxB{%1$|@|uO"ĥۂ EFLaAؐ38e꭛ @ ƶnd}\{*IriV 9Hd522 z'&.cINxa <'2dL$S^$x+` kvokvokvokvokvokvokvokvC ƿT=1ScFlyƍ[VϢ%tPzv ČQpR<"0厞;4b\K"\'(c ̫OmRfnZҐ͵'mqC b̀cZ7KR*J{eeh\N-\ܼ,̮g?"㘜Hb3z&ԑā y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'2kT.^6=&y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'y'^F%?\sPe k25Y"5qҝBXCG7ShMDsД#2D@EOrNPAMrj MgK43"MRJp ġ=9vg$H`8=\ܼ,̮g?"ۘPc,"5`Q=h4¥imbY"dڢ:M@av56b:hM 3uQtumᕆA$]Nf#y>b ٩uUD\* $R$QtG"s s3]`} i$Ycm' R!)fԷ(<4#UI% YФQC[p涫~PΒG1B29梔JоFJG` QjVrP:tsBxG4 N\=!pfV &1!4'&MӭԓJ ɩH G(\&߆]]t'#yaYb'V9NEA09%#\CO$zcM\D|xpE?,-J'$wnADd?8ou oTjx 2㉛P [xô9`2Xj841s1;:MsoƠX I4H(Uext^N H b@)21BIb2pTF30#TbRfuj8%$U@$NèԖXэN >Pv4S|I$Qw+BF# ۪דinfMKxF,Fjkx}SSthƤ?3gƝxiT#@#:i#U@FEZA3o&HT2~j6c[D 8d x՞-c x+fޘfHB!N+.&I.jshAw:rsch#|fyHJ 4Ek XD# vXZo& +&&x%VDDОl =MJÊUᅸVq#q 'P(FV?V IFaKo3Ap*r6\&8, jT%b'_ҝtK>V5 ֯m%~WLLm1op5mHn~^`j{'2n2-rO,IIqW+jiH=c]Xe0bt`HI&60z% 4̠hR1BfENU'G:^ : U8YZb]5WܚcKq1wxn N IöYj-(#_jQEyHWCȂ+w`;dxK4շV_[y~mշV_[y~mշSM# PZGSrDBbomcIx:۵g#[c B7c܇j]L[eTE/& S]uMe2TC!fRRRR꭭.>VT$UսԿԿԿԿ?ԿԿԿԿԿԿl I_v%u*r < PPF5e"ܮet5$K dSZ\%bo%hqOap )cs+7?r%'tZA7W1n~s\W1n~s\W1n~sR$+aQL++ A [İƺI IԹ*Vs\W3|IMs\W1'HzB5n~s\W1n~s\W1n~sOo$(beUP?\s{}W.rON<8H'ƷQq"4 )%N/4~ 91qtCBsW'p"F/x qZTh7Ҷ4^r5wk *~N$ؼ#k*-p~ q\HW"$pji Oݣ{V&)aܬjVcꖯ)\?Qu#Gz_Y{*<=Rj'p|R}@ @ےz|omQWZ5T/̭`Ñg5 jh< !GT@GR0cil?}+\RigW\Hd~[ /;ȃj }F_MyvqV %'W*2٢rT- [Cۆvah PD/P(&(,, ސ˩<:f>sIscC8GP+'ge(cnuNL3`r;LS9SQ!QIi3j"A3y ~u-H27ÀGD'n>r,ղrGyI.+9A1~x~u}*|(-|yR* <`<'@Kʃf>Qul$q\lͪc3`^rk"3'yE]%P{f%~GA0\jdvG嚠sɇi"!c1IOX2z_ դ-QX 3얯2>ewGrEha͑I/OgFԽTfrjؤ*éBbܥteL6(-IPBTvdljcWOPyn\x?'W/\_^8Is iyypU)awsaSfX>0x;2(:Xpeb¹6Om~zy(>0r|fU=2*!5y'?/Hh&B X" j3;hr[|*GpcUğRnJ0T`$g1KOhWP[љ"fU?k*[ܟ58cXP;V |ڈ{c?T&(jKkDe^wzaVF_I*K+b@"`:Įx5~jt !@;8=AT,pȳD#2Wgpx#w*JK|L27E"No!P4 I>8B^I|σu1\Emccc7ʎES[ J!꼄 c"xU]䟩RxKoSz_ȱ1<ڨx*,z(8! a;LH+%{ R<@Ţ5@ m\r-xLQUQw!Z[lH -m0%fA|,@ y楉,M Ms-h:8`?85w}<*2~EglO&&7R-uh`'6&y6Ql y,pdv) ZzH弽s(n{]Ǎa~lvXd2 Ӝ2mIwHH"Dzv!c'yGPǃJrAGdc%w+xێgChF )W2۱KWοon 2M7 A)|| *&>4:b[TogZ{fso`#fcϤ˰!"qcdڧa~nMj5{eO//䯍 Vrxg/DjeϐQHQ)OM"?k\m5VZT?R5R޿aQI}\K^8 E ܌WMrtrQ[W(\$նyV^/jFnjF8.o:s=;\[E,?ğ:5G 0#HOtY|Kpv|lWR3!۸C!3w]Cl,m`s)I'x ⇬qSPdŞ2ُ拁9 e8](2:_Bt x1bnD:OE럝~f$glkc[ƶ4b 3{y+c[ƶ4*w"0GXѮ_3\L?JZiTdBհ#5lkc[؉n c6O#giiNI w0X!#8'p lkc[,I44*QBvR[2INEZ`+³fJW:9:H7c4<0-}Qԝa3FMLL(ѽ{&IN8:و5Eޑuj4rhP&AN/f_(]eR mē}7Q*1kэR)@J:9LVRG43:xu 5mo`0@3$q,h[e4Q1hycbUKrU'MFpr8[o :rP>O3\𹩢T":EsY\F$X05]JZ Qa"v-dyEJD%`(ÜL ]ǧ LI6q[> E#k,\]LYQPiJ0hIH;pSh >@gK"!p)X1f;YF̛¸Im*o<1 Th$sqF ev-lF.PYâD%IzF(d!sFr{>$j&Ka^(/>u=s캆VV!.g!4J쩸ngtb 3E 1R\,QˤNaكQwݎ"ri.bR%g.14"ӣSN$۷9`#N~٨cI3.|"-W T qԤb,c%9Ƞ[ @dhyseĸy&:j[ؤHĮSX<WHPAhʨ!|Uж6NѺM$(0DxsJc㡞BWi @ps_=!Z%Ӝq$[ѕΜy"|f0evwnn&1 bRsS\3i^Gf8Dⶎ6cZtB3w˼ݳ*k(gfSKr&gchЂ*7N :S1+{p@LeV V$y`''ע;LOPA KF#R^ܢG* oc1W7f 7#fr2GP\Ml"Y6d6Pixus8\+H`2kSf27q4XRvsRM$١fmNs嫋|Ȳǩ}ۊL ;j.jinh;9t$:)MK 2#'!~?8PMnw>2LlĬdOP{v(׌(z w1$FKq{s;Ž^7Fh34;8iRwj{?:KJɤ]$b[5Mypd1Ԅ iiDH&-:$)wD.3@=8jN` Q'QDX,ӧYdYDڵvLVsRHҾK1،jy[:rqv2WR A&W$قD/lN\*+$'sT*qƐ. hK,i2!ʬȌP{QTd|BaUuSOa~3Fu*U4"8p>i⧭Hsq,!p1>ϼ}s+\>ϼ}s+\>ϼ}s+\>򤻝ч1K$-*H}s+\>,O$Oq FAW>Ϯ}I#/ZB<; `+\>ϼ}s+\>ϼ}s*[]Jrq,!p1>ϼ}s*KxY sBA ce8sy\W>Ϯ}sy\W>Ϯ}Ks4NA#p{,#0]\p >q"VM[X30唃ՠ @H@G Tݷ]kr_?.gp`bW?Gz:x\]h6}600}0q餅)E=ml ?B5j v#:[?XpsmPu;)PE¿ørοo@;# x$,,NNFt8B5TUrQTdMH4wʟg pqc?#"826NK1!p?g <.^n>@K]<=C&R1|QPtC25 |02=]|k_]Wle/e_0x-HCJGF83=Q[SN-#y=i#s Ip_ Pu]BFf|K@`8<ƒ5-C|7tǁ^y# :22;4xۜFn>;-4^fÐ\tRI)D]ȫ}eq6d{rpIL4< (zrRI)D]ȧ tɓJ'q@,Ic\I5lkc[ƶ5lkc[ƶ5<JӜZR `u=oFD?ȷa\yŸR,5#tpQ$ʑ+:so>>;cz#ԯ{lpUlp?ǛyQA_ M \ƧG|gc# x̶9Ҕ?CzYc K*1Ǯ|w=Gc-__?FضCR<47('k*FpUA|!"OI <]+PdG!<,JώXG^~+ǀxT-K&-zt?|HT}C8:V$s?w߫IS<߮{q}~߮{q}~߮{q}~߮{q}~߮{qauGc-__>b%jx-9x.B@?t?5t6#@_qT,:QUI&G#2k2](I=G8\\Kd`W?f)]Dт*Y "C)h'3 B0Xe/)܉װe׌A; PH^t6)J$,wԌ)ؙDtĻ6 M, AcYk'HWTeż.ˏ8-Rܘ )Vb%')E-2nF o9"6+bZ[M OQ^R t)LV7#qqP]!qbTǐKWW=o1#k.iXx%qך* 65,k"IѺ,k '1{=sV=Ԩ8'Bh\mS#]& ыɳ E RA,"X׋eU(9 &ĩ ~Lj< gFvE!Tqz5ZR z8lJG=FIf_|OS|R6xUQ߫Pb0fZkWWp7 I"lnБ23[wfLq򨤸E[H!D%o}@bmLw24t!=HsZѯDv?ROo;NmLQݠ ?̵z]Ǯ|wǺi6b d$43$D'< ]$z~{A X mw=g%]_\cO>Ɩ\%N-:9jkXObI]2~hF:FKWW=<-,zo\6YGq^ O(_=sV=лk.ي95]]٦Q޴HFm1qF0+H!HŝƟ&y}π!_,$/N>;cOL0H)z;WQ,!"A&"Z]‘R.eSMKs2Hb@S%Ns}kԕr$2H!0$I]LT&t9U'#kԵ~|[[n7ў><|ó5bO'=sV=3%kPr{LAAM=#CGrXQI6f9f!иwV4[!6s"$ ⭥T"L IB 0"Xˌ g5!˜~z#ԯ{|._Gc3[(е}Ǯ|wǺYXiC R5F+BܘT()zk[ ZrW%Rt@6gqIu& OB@]_b9RjkRj,'?xTܭm;Q*6BTR ʾI䴆$m,dT7h'ZLJS2.vXkԵ~|ȃ5`Gq'O8wOqjײV]F3lž<{k}aPc?FWky1kc&P1Hr)dui[< [-;nw:i֑ɸht5)L AA-PrSN bq)AsIBG\̠x4$q5#IGc-__;!E"tޝ5/;(53=sV=ֆ}R%TN4fqPMp?p42ĬQz#ԯ{%O^_^澕|&!_H{F0;\Տu#_-V<%+ GI379Fv\p!UUFI$Zc/CM2H<rqP >{u =Gc-__;S/H.g4'OO%n>;cX6LWԄ1odPet^5T8۴;-h2)As434DMr}So4ǥ26xH]B:79 T@jC45#8WRz#ԯ{DZ:ҷ P`/s#pz|[ۈoiYMif+B8ѯDv?ROo+Mj:5RQ_Im)H{\Տuo!&G,-̒K srSQ@bLF#Cr){oRI1*.k .F&':)9f~SII#lW7MdV%SkԵ~|NtK8:8yv+m)AxHώXNHx01fU@7VѠhߊeOX PH @ _: rrjR8ή9UomJpj@^~+~y]?ӱ~G~V|0n|uIUV $yOuͤE}Fb%AF]$T!jw2pM %k#Wjf"tiG9QΈYrƯ6FLLAJ+67RXOz5ZR w?̅d?KM_y_)w^߫ݵ9$[=s[0kF\T)[P[CuL|)Ӎ9Z$$ Gaɐf7LBT dݼ݌t~z#ԯ{ x̻͙Ц!(t NV[okUVA\61-j֫{ZjԺp6qGyo=/mkDZ\ՏuiJDH'ejgU]rVr2 4[ OtmBG!ɫ @(̐F8Լ,o2ȑoނ{2UT{FvՇ(2G2"B&WF |^z#ԯ{ xk"{38p3` bjN;$ O ϡ|ȿ#=?h[O瓼~"{5 9sV=֍Z@F34m5ʎ ٳƚӜ4"ǡF05d=yኚvIuhĈ|5|R 6XUcu$GRPTс5hPEע;jJ'ʱ [oVJ[+omʤYPVJ[+om. Ժ h!I!bqr,ۑ`[jT0 )gի G6M:4i$*jk V%2Ou,$7`rc'HU2erc-so~s4- 2hIz=5Ć*t(/W7&^a\L6x /g$n&THТ8b;FKWW==f:~ShdinF } 1ke60A\!> Eܱª[yFrD#wE#TRC#|Pio.9i\p+RETp@mYPeb] d`Vb89Ƭwx!#}c|?e"gRG1Q4 ]1TפFܠmɄ]1ԝ5.ZY@8;;'0&FCvlA*) 4Fa_2ю/sTVO)Ve_)opjBW&TRS4<MđԐw*XjJU#ս9 L(kfk}jկwc>;cjLQ mv,WH4dU19c2iّ ( T@,p0YPOPr4LY'Kw/!sD oW&-iYE cp輻5cX°s&Xw VdRK\13"$6E`q5Y~R I].49vcaRtiE2] 1Pr("=Ө QU=J3I&O1]dёGkUp5FuΉt'hfNh (L_6 J/5!#bFխeu|p&&43܉dQ o/ޭzo/ޭzo/ޭzgg85X{О& IBs,S$ET>ex0c;x]K) <6v]!IFFF( V<%+;c[12>y!xk]99upWvqI pjd_IaDSKN8&L5Gbo'bu3'|wǺu EsxэNiщ>P,t bחI'Ix@LP~FeiJ{T#BxiB01[9c%j9MJ& |`85-n-nѪH8kԵ~|u[T8}9%/jqϕᢿO&ľ߫PfX?~HOm:]M 9,S3Xj \`wr_8rU )SzFzӔO'dݜNiYQTVc4[XG1㎌9$]#PhSFB@xF PI Bе$' Lc"CP=DLưڴ7&gQde%NkԵ~|qd(a]JF~+lkq--1b5%}sV=3$@ w ΝGԲv(c1VmlNPFXդud&bKV.۪$WF7uhFG3$sk]j[6v6@02j#Η2Gl!AvTEPU@ F,B\UlRw;k@ף^~+]Fy1%*y(3JW̿$uo@ =3~83'|wǺs =i%KnppRdähYq hZX<I!pk|ؼ PE+n\@ A0Yfք0`u%O>$ :w狀H 8Gv0 lTR8pC+{dXO ss )!W=ެ;pdg^1K| Lb`PQ WBBgY(YIF\8ǣ^~+?) c&Cw$͂l˛x"׈(# W~$ώXZ5k4qxVeI>xm(sp"%mGZǝi3dDXXEa9}DK.q*[qt[E0WRaӨűyRXHʼnIuЀ`5}ӢIKE6ʂ4FYTG8ʀ"F xQB:O9xfԮu-DaCP#e Yba=Mm!82P#>Ez5ZR sn%v|S=.5⏔"hO22E?z~{%]rnt\άT"ęGAe0 JU"¹PFIGc-__8<j$Gq^ǖ֎'|[H߳w>$ώXAT@ʅ]X|h?2X)%jii^vrpDNTsBo5s oV*!8zoFKWW=5W?Cgҍiy#PxĞ߫vxk\}Wrܮeq+\}Wrܮeq+\}Wrܮeq+\}Wrܮeq+\}WruY(Dv?ROo~u8;z ~"d/c@kƲ(a_̲q |I$p5 ' GeudoT$4ARUF)@iN:GvF@-pK Gc-__6eWbn&c$=,"ω&~lY)/_JO=_Ğ߫'ټq‡#]T,СD'r]5;KvQ6pUCRE$Vț6 !tAXQC$qV(aLNaCT\f+t$ mGiPBFw;GF @Lg'5f!XTHb}lS)炲UHf˹_A͸劾p5* b!] oCOo+6,M|ꮹ*H"35ZR j:}O?KqCըgc\_4+_N0~#ώXP>`23E;Ʒ0ʣH(Qi] į R… 4Aq&qNb}_LWѢeQ]լ.4_Wk 8|N`h]L6 Xč9 M NjYlJFOW5psgXM\WVMPO‹hu?\ՏtGBg9ҊI5 9Y@Ij9ì䮇]# ڭJH)ƺ$p6*:pu ` 8LkqRk"'FŗI&ia"'١pv̧B{`-ȸb9@T =b8(`0Vz#ԯ{xv4k>A^%Aw-Z@w2_ _Ϙ9oz&Ϸ17r: |G 4H "11bU#U-Ue)qk8PGQ&Y3mcߔCCd"0 k`H(q&[A'.C6wqT!13!FC!l8yc24@y*4s$ЪGi !yh}ԖzP#xN %.m"KU:JT6|a bdEl|s#*3v}Gc-__4k˕ !{,d?\U?G#MŌ#**4x/Ok |wǺ gsUUf=@TE) 9c )r@$lÈmJ3&("cw,7*#y ,k2gV"2ZA]PW<*x%qTaFs =3ɳ E O "6Uu\cI$hDI-MH~PȩmemʊcSΐexxf+t`WV1J"Y?"{p4L Ev@ 0U2< ((VSSA-S28AX<ތZ# Y;jJ'#TV;˹p;g{$T5.Gqr$?TzK?\Տtn'-8וO<:b23 qK8͐ѩwV8]?2*疥Gdvʢ NHT:yw$EBDKtX"' g-GP&h4JD"v&&&l~ˮ GsV=()k;|wTb(' $FͶyݯTs]}Zecr%zȮPD0#%aI_57q!# &t H&V(ƉsuVڋvƢXMrM6rz 6}:k d$k0?&(޽k{>z#ԯ{ɳ?Xdiw]a&9S<i"1JI '_!dU>~ҋ~{r$T*)O1mM5b B\n8zә[KlJx*^VLMNgDaISI-}ew%2 /"幹AX#x Tn0PK3(LZ4ЄTo MRu5U؈Cn0AB` )o-v8i7mC4xSrk$%xS+# Cm$/d/qע;jJ's[̙|1d+}!(~$m>1COv}yo>;cLDaV5yUt[‘koh&6!ʁ8c^ սq3"fgSu 5 ec@=lN1"]0 )c FwF;}#kvLr P0H"5b1Ɍ7=U Ji!~hnѯDv?ROo{t~r:~!g3B£ zNhn~X)_! $팁R?z~{o3H,r11 ơPE"C.P$lA `B| xc[]O`jkmV&Ƭ9 TyPI,"K<A:Ԩ2tXqR(*'!dCqr |j U:!i4 *dw#̵%_4)ncS ą}pmyWPNBiI:h"54]c擽U1tF'JkԵ~|q:Ft)ηȹ<(|3w2nRM[L|ϝ~"P97Ca |wǺ,2@tp$AE$[cTWU82HDj&xFNⲸPlFw=r%#qPnaB6\ɤ/ i$| :J&IifNC Vf2B$׳ `5,m  t$-F&dVB˷~鯯I„b6{U$]؂ÉB)󍥩;ZY y$^z#ԯ{|F~:c!kȃߍf}x샃A# *!y!H}=ԑO^߫9Essssssssssssssssss=ԥXxj;?jJ'y8QⶑnEq>evc薡炪GG:/qS/\Տ+-@p>q c[,GҀɃ QE+=-#^߫ KWW={6r t?c[:55;8F2Aj tOu5"9%/\#A(Mv+_]Wle/ev+_]Wle/ev+_]Wle/ev+_SlN} (UQOoW3P''G;O|KrEڏ7s._h#ޠ+Lz?~"οoytPyE )qK"w,2X`iuYXK6żg5z'ӇkK/\-҃'ZM9S lg*ܧ*r7ޤp4F#G/RO<(TAh1m5krdGXIDgƨB[0)B;.I}T0ƎrWSƌ:TEeFPh}1XH-kFPQ%ٮӿPpNWQ(&L@f#ai"ƒm1*iѤec J" 5-(KlBҊ PAs;4c*DvBrj]h8 g{֥Z;'Ȑ2Zr PB U/*>9IzjHG5Zm\c!I ר X |4mȄ*byv?3#ӎO[(뉒S>-`nO3¢tVd*2uSFk, UOI2 -qs;`LN H!pdkm,ВiKݖ~6k3#왆 &{݀dCA#$iP#U>5R: 5uHw#h&~"/762J ] G撻'#'UYO Yɨc9HY4 FI38UG{[4˼aSe6) Ş Q v)BP(ݥCTDyJG<435!܍( l+3h5r 2kfF沼CឹS$#tu yEMq4-=He 6W@ċ"oל˶H˒ti'} LPu(2k}0ggoifŜڳhOss9?Li$V UlqgR =|ied|I%gvn%R{섀C Aus5ܑ拾vul+RO$T&w.ǎFQb{Щ=&%~"=(LbbK2$Q+>тPj)扌qDa@H'"(&aكT3I&Atܢ6 T3\lШȌL}բ4ou1 V Tَh%HF&W+4]ヤI42+Fg }ka܇ ZtnⶲZd+BfdLFia(6`bF4ѸH4ܪR;%9 $AW7S:1I%vt#jkw10d̒;>*)}BвM&c0AB*Ii\<39[ 9'Z˦h wgB' Z3 b3LK Di1Itq;Oxg~uGwp\PϮ}sy\W>5ij$c*$f=WSD1>Ϯ}sy\W>Ϯ}Ks4NA#p{n%1"6PN3\W>%ϼ}s+\ʖi&!Ìq$Spղesy\W>Ϯ}sy\W>Ϯ}syN#Y$>Ϯ}sy\W>Ϯ}sy\W>Ϯ} οo>X7\u5/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wle/ev+_]Wlepn&Gp?+!R"13A`aQqB?5[M*}J}E +U]UWQTkcw9NX %\F1cVlu=rX %[hĀSԧ cr8 9GHT0?]r7;#$uD@#E f7vSÇDcK`+fozzO@V =Z+TjSƙ l1!0`?D"` vU}.Uz]}t././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/_static/sql_railroad.html0000644000000000000000000012321214412577542016412 0ustar00

Forward

'select' '*' column name ',' Suppress column name 'from' table name ',' Suppress table name 'where' 'or' term

column name

W:(A-Za-z, $0-9A-Z_a-z) identifier '.' W:(A-Za-z, $0-9A-Z_a-z) identifier

table name

W:(A-Za-z, $0-9A-Z_a-z) identifier '.' W:(A-Za-z, $0-9A-Z_a-z) identifier

'or' term

'and' term 'or' 'and' term _FB 'and' term 'or' 'and' term 'and' term

'and' term

'not' term 'and' 'not' term _FB 'not' term 'and' 'not' term 'not' term

'not' term

'not' 'not' term _FB 'not' 'not' term column name '=' '!=' '<=' '<' '>=' '>' 'EQ' 'NE' 'LT' 'LE' 'GT' 'GE' Unnamed 2 column name 'in' '(' Unnamed 2 ',' Suppress Unnamed 2 ')' column name 'in' '(' Forward ')' column name 'is' 'null' 'not' 'null' '(' Suppress 'or' term ')' Suppress

Unnamed 2

Re:('[+-]?(?:\d+\.\d*|\.\d+)') real number Re:('[+-]?\d+') signed integer Re:('"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') '"' Re:("'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") "'" quotedString using single or double quotes column name
././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/conf.py0000644000000000000000000001252714412577542012727 0ustar00# # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath("..")) from pyparsing import __version__ as pyparsing_version # -- Project information ----------------------------------------------------- project = "PyParsing" copyright = "2018-2022, Paul T. McGuire" author = "Paul T. McGuire" # The short X.Y version version = pyparsing_version # The full version, including alpha/beta/rc tags release = pyparsing_version # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "PyParsingdoc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "PyParsing.tex", "PyParsing Documentation", "Paul T. McGuire", "manual", ), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "pyparsing", "PyParsing Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "PyParsing", "PyParsing Documentation", author, "PyParsing", "Python PEG parsing library.", "Miscellaneous", ), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/index.rst0000644000000000000000000000103114412577542013255 0ustar00.. PyParsing documentation master file, created by sphinx-quickstart on Mon Nov 19 15:06:52 2018. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to PyParsing's documentation! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Release v\ |version| .. toctree:: :maxdepth: 2 :caption: Contents: whats_new_in_3_0_0 HowToUsePyparsing modules CODE_OF_CONDUCT Indices and tables ~~~~~~~~~~~~~~~~~~ * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/docs/make_sphinx_docs.bat0000644000000000000000000000004214443622312015411 0ustar00sphinx-build.exe -M html . _build ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/modules.rst0000644000000000000000000000010014412577542013612 0ustar00pyparsing ========= .. toctree:: :maxdepth: 4 pyparsing ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/docs/pyparsing.rst0000644000000000000000000000017014412577542014165 0ustar00pyparsing module ================ .. automodule:: pyparsing :members: :special-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/docs/pyparsing_class_diagram.puml0000644000000000000000000001362714443622312017204 0ustar00@startuml 'https://plantuml.com/class-diagram top to bottom direction hide circle hide empty members 'hide empty methods skinparam groupInheritance 3 note as N1 Class Diagram --- pyparsing 3.0.9 May, 2022 end note N1 <-[hidden]- unicode package core { class globals { quoted_string sgl_quoted_string dbl_quoted_string counted_array() match_previous_literal() match_previous_expr() one_of() dict_of() original_text_for() ungroup() nested_expr() make_html_tags() make_xml_tags() common_html_entity replace_html_entity() class OpAssoc infix_notation() class IndentedBlock c_style_comment html_comment rest_of_line dbl_slash_comment cpp_style_comment java_style_comment python_style_comment match_only_at_col() replace_with() remove_quotes() with_attribute() with_class() trace_parse_action() condition_as_parse_action() srange() token_map() autoname_elements() } class ParseResults { class List {static}from_dict() __getitem__() __setitem__() __contains__() __len__() __bool__() __iter__() __reversed__() __getattr__() __add__() __getstate__() __setstate__() __getnewargs__() __dir__() as_dict() as_list() dump() get_name() items() keys() values() haskeys() pop() get() insert() append() extend() clear() copy() get_name() pprint() } class ParseBaseException #ffffff { {static} explain_exception() explain() mark_input_line() line lineno column parser_element } class ParseException class ParseFatalException class ParseSyntaxException ParseBaseException <|-- ParseException ParseBaseException <|-- ParseFatalException ParseFatalException <|-- ParseSyntaxException class ParserElement { name: str results_name: str --- {classifier} enable_packrat() {classifier} enable_left_recursion() {classifier} disable_memoization() {classifier} set_default_whitespace_chars() {classifier} inline_literals_using() {classifier} reset_cache() {static} verbose_stacktrace suppress_warning() operator + () -> And operator - () -> And.ErrorStop operator | () -> MatchFirst operator ^ () -> Or operator & () -> Each operator ~ () -> NotAny operator [] () -> _MultipleMatch operator () () [set_results_name()] add_condition() add_parse_action() set_parse_action() copy() ignore(expr) leave_whitespace() parse_with_tabs() suppress() set_break() set_debug() set_debug_actions() set_name() set_results_name() parse_string() scan_string() search_string() transform_string() split() run_tests() recurse() create_diagram() } class Token #ffffff class ParseExpression #ffffff { exprs: list[ParserElement] } class ParseElementEnhance #ffffff { expr: ParserElement } class _PositionToken #ffffff class Char class White class Word { 'Word(init_chars: str, body_chars: str, min: int, \nmax: int, exact: int, as_keyword: bool, exclude_chars: str) } class Keyword { {static} set_default_keyword_chars(chars: str) } class CaselessKeyword class Empty class Literal class Regex class NoMatch class CharsNotIn class QuotedString class And class Or class MatchFirst class Each class OneOrMore class ZeroOrMore class DelimitedList class SkipTo class Group class Forward { operator <<= () } class LineStart class LineEnd class StringStart class StringEnd class WordStart class WordEnd class _MultipleMatch #ffffff class FollowedBy class PrecededBy class AtLineStart class AtStringStart class TokenConverter #ffffff class Located class Opt class Combine class Group class Dict class Suppress ParserElement <|-- Token ParserElement <|----- ParseExpression Token <|-- _PositionToken ParserElement <|----- ParseElementEnhance 'ParseElementEnhance ---> ParserElement 'ParseExpression ---> "*" ParserElement Token <|-- Empty Token <|-- CloseMatch Token <|-- NoMatch Token <|-- Literal Token <|-- Word Token <|---- Keyword Token <|--- Regex Token <|--- CharsNotIn Token <|-- White Token <|---- QuotedString Word <|-- Char Literal <|-- CaselessLiteral Keyword <|-- CaselessKeyword ParseExpression <|-- And ParseExpression <|-- Or ParseExpression <|-- MatchFirst ParseExpression <|-- Each ParseElementEnhance <|-- SkipTo ParseElementEnhance <|--- Forward ParseElementEnhance <|-- Located ParseElementEnhance <|--- _MultipleMatch _MultipleMatch <|-- OneOrMore _MultipleMatch <|-- ZeroOrMore ParseElementEnhance <|-- DelimitedList ParseElementEnhance <|--- NotAny ParseElementEnhance <|--- FollowedBy ParseElementEnhance <|--- PrecededBy ParseElementEnhance <|-- Opt ParseElementEnhance <|--- TokenConverter ParseElementEnhance <|-- AtStringStart ParseElementEnhance <|-- AtLineStart TokenConverter <|-- Group TokenConverter <|-- Dict TokenConverter <|-- Suppress TokenConverter <|-- Combine _PositionToken <|-- LineStart _PositionToken <|-- LineEnd _PositionToken <|-- WordStart _PositionToken <|-- WordEnd _PositionToken <|-- StringStart _PositionToken <|-- StringEnd } package common { class " " { comma_separated_list convert_to_integer() convert_to_float() integer hex_integer signed_integer fraction mixed_integer real sci_real number fnumber identifier ipv4_address ipv6_address mac_address convert_to_date() convert_to_datetime() iso8601_date iso8601_datetime uuid strip_html_tags() upcase_tokens() downcase_tokens() url } } package unicode { class unicode_set { printables: str alphas: str nums: str alphanums: str identchars: str identbodychars: str } class Latin1 class LatinA class LatinB class BasicMultilingualPlane class Chinese class Thai class Japanese { class Kanji class Hiragana class Katakana } class Greek class Hangul class Arabic class Devanagari class Hebrew class Cyrillic unicode_set <|-- Latin1 unicode_set <|--- LatinA unicode_set <|-- LatinB unicode_set <|---- BasicMultilingualPlane unicode_set <|-- Greek unicode_set <|--- Cyrillic unicode_set <|--- Chinese unicode_set <|--- Japanese unicode_set <|--- Hangul Chinese <|-- CJK Japanese <|-- CJK Hangul <|-- CJK unicode_set <|-- Thai unicode_set <|-- Arabic unicode_set <|-- Hebrew unicode_set <|--- Devanagari } ParserElement <-[hidden] ParseBaseException 'ParseBaseException <-[hidden] globals 'globals <-[hidden] ParserElement CJK <-[hidden]-- common @enduml././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/docs/whats_new_in_3_0_0.rst0000644000000000000000000007551614443622312015524 0ustar00============================= What's New in Pyparsing 3.0.0 ============================= :author: Paul McGuire :date: May, 2022 :abstract: This document summarizes the changes made in the 3.0.0 release of pyparsing. (Updated to reflect changes up to 3.0.10) .. sectnum:: :depth: 4 .. contents:: :depth: 4 New Features ============ PEP-8 naming ------------ This release of pyparsing will (finally!) include PEP-8 compatible names and arguments. Backward-compatibility is maintained by defining synonyms using the old camelCase names pointing to the new snake_case names. This code written using non-PEP8 names:: wd = pp.Word(pp.printables, excludeChars="$") wd_list = pp.delimitedList(wd, delim="$") print(wd_list.parseString("dkls$134lkjk$lsd$$").asList()) can now be written as:: wd = pp.Word(pp.printables, exclude_chars="$") wd_list = pp.delimited_list(wd, delim="$") print(wd_list.parse_string("dkls$134lkjk$lsd$$").as_list()) Pyparsing 3.0 will run both versions of this example. New code should be written using the PEP-8 compatible names. The compatibility synonyms will be removed in a future version of pyparsing. Railroad diagramming -------------------- An excellent new enhancement is the new railroad diagram generator for documenting pyparsing parsers.:: import pyparsing as pp # define a simple grammar for parsing street addresses such # as "123 Main Street" # number word... number = pp.Word(pp.nums).set_name("number") name = pp.Word(pp.alphas).set_name("word")[1, ...] parser = number("house_number") + name("street") parser.set_name("street address") # construct railroad track diagram for this parser and # save as HTML parser.create_diagram('parser_rr_diag.html') ``create_diagram`` accepts these named arguments: - ``vertical`` (int) - threshold for formatting multiple alternatives vertically instead of horizontally (default=3) - ``show_results_names`` - bool flag whether diagram should show annotations for defined results names - ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box - ``embed`` - bool flag whether generated HTML should omit ````, ````, and ```` tags to embed the resulting HTML in an enclosing HTML source (new in 3.0.10) - ``head`` - str containing additional HTML to insert into the ```` section of the generated code; can be used to insert custom CSS styling - ``body`` - str containing additional HTML to insert at the beginning of the ```` section of the generated code To use this new feature, install the supporting diagramming packages using:: pip install pyparsing[diagrams] See more in the examples directory: ``make_diagram.py`` and ``railroad_diagram_demo.py``. (Railroad diagram enhancement contributed by Michael Milton) Support for left-recursive parsers ---------------------------------- Another significant enhancement in 3.0 is support for left-recursive (LR) parsers. Previously, given a left-recursive parser, pyparsing would recurse repeatedly until hitting the Python recursion limit. Following the methods of the Python PEG parser, pyparsing uses a variation of packrat parsing to detect and handle left-recursion during parsing.:: import pyparsing as pp pp.ParserElement.enable_left_recursion() # a common left-recursion definition # define a list of items as 'list + item | item' # BNF: # item_list := item_list item | item # item := word of alphas item_list = pp.Forward() item = pp.Word(pp.alphas) item_list <<= item_list + item | item item_list.run_tests("""\ To parse or not to parse that is the question """) Prints:: ['To', 'parse', 'or', 'not', 'to', 'parse', 'that', 'is', 'the', 'question'] See more examples in ``left_recursion.py`` in the pyparsing examples directory. (LR parsing support contributed by Max Fischer) Packrat/memoization enable and disable methods ---------------------------------------------- As part of the implementation of left-recursion support, new methods have been added to enable and disable packrat parsing. ====================== ======================================================= Name Description ---------------------- ------------------------------------------------------- enable_packrat Enable packrat parsing (with specified cache size) enable_left_recursion Enable left-recursion cache disable_memoization Disable all internal parsing caches ====================== ======================================================= Type annotations on all public methods -------------------------------------- Python 3.6 and upward compatible type annotations have been added to most of the public methods in pyparsing. This should facilitate developing pyparsing-based applications using IDEs for development-time type checking. New string constants ``identchars`` and ``identbodychars`` to help in defining identifier Word expressions ---------------------------------------------------------------------------------------------------------- Two new module-level strings have been added to help when defining identifiers, ``identchars`` and ``identbodychars``. Instead of writing:: import pyparsing as pp identifier = pp.Word(pp.alphas + "_", pp.alphanums + "_") you will be able to write:: identifier = pp.Word(pp.identchars, pp.identbodychars) Those constants have also been added to all the Unicode string classes:: import pyparsing as pp ppu = pp.pyparsing_unicode cjk_identifier = pp.Word(ppu.CJK.identchars, ppu.CJK.identbodychars) greek_identifier = pp.Word(ppu.Greek.identchars, ppu.Greek.identbodychars) Refactored/added diagnostic flags --------------------------------- Expanded ``__diag__`` and ``__compat__`` to actual classes instead of just namespaces, to add some helpful behavior: - ``pyparsing.enable_diag()`` and ``pyparsing.disable_diag()`` methods to give extra help when setting or clearing flags (detects invalid flag names, detects when trying to set a ``__compat__`` flag that is no longer settable). Use these methods now to set or clear flags, instead of directly setting to ``True`` or ``False``:: import pyparsing as pp pp.enable_diag(pp.Diagnostics.warn_multiple_tokens_in_named_alternation) - ``pyparsing.enable_all_warnings()`` is another helper that sets all "warn*" diagnostics to ``True``:: pp.enable_all_warnings() - added support for calling ``enable_all_warnings()`` if warnings are enabled using the Python ``-W`` switch, or setting a non-empty value to the environment variable ``PYPARSINGENABLEALLWARNINGS``. (If using ``-Wd`` for testing, but wishing to disable pyparsing warnings, add ``-Wi:::pyparsing``.) - added new warning, ``warn_on_match_first_with_lshift_operator`` to warn when using ``'<<'`` with a ``'|'`` ``MatchFirst`` operator, which will create an unintended expression due to precedence of operations. Example: This statement will erroneously define the ``fwd`` expression as just ``expr_a``, even though ``expr_a | expr_b`` was intended, since ``'<<'`` operator has precedence over ``'|'``:: fwd << expr_a | expr_b To correct this, use the ``'<<='`` operator (preferred) or parentheses to override operator precedence:: fwd <<= expr_a | expr_b or:: fwd << (expr_a | expr_b) - ``warn_on_parse_using_empty_Forward`` - warns that a ``Forward`` has been included in a grammar, but no expression was attached to it using ``'<<='`` or ``'<<'`` - ``warn_on_assignment_to_Forward`` - warns that a ``Forward`` has been created, but was probably later overwritten by erroneously using ``'='`` instead of ``'<<='`` (this is a common mistake when using Forwards) (**currently not working on PyPy**) Support for yielding native Python ``list`` and ``dict`` types in place of ``ParseResults`` ------------------------------------------------------------------------------------------- To support parsers that are intended to generate native Python collection types such as lists and dicts, the ``Group`` and ``Dict`` classes now accept an additional boolean keyword argument ``aslist`` and ``asdict`` respectively. See the ``jsonParser.py`` example in the ``pyparsing/examples`` source directory for how to return types as ``ParseResults`` and as Python collection types, and the distinctions in working with the different types. In addition parse actions that must return a value of list type (which would normally be converted internally to a ``ParseResults``) can override this default behavior by returning their list wrapped in the new ``ParseResults.List`` class:: # this parse action tries to return a list, but pyparsing # will convert to a ParseResults def return_as_list_but_still_get_parse_results(tokens): return tokens.asList() # this parse action returns the tokens as a list, and pyparsing will # maintain its list type in the final parsing results def return_as_list(tokens): return ParseResults.List(tokens.asList()) This is the mechanism used internally by the ``Group`` class when defined using ``aslist=True``. New Located class to replace ``locatedExpr`` helper method ---------------------------------------------------------- The new ``Located`` class will replace the current ``locatedExpr`` method for marking parsed results with the start and end locations of the parsed data in the input string. ``locatedExpr`` had several bugs, and returned its results in a hard-to-use format (location data and results names were mixed in with the located expression's parsed results, and wrapped in an unnecessary extra nesting level). For this code:: wd = Word(alphas) for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): print(match) the docs for ``locatedExpr`` show this output:: [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] The parsed values and the start and end locations are merged into a single nested ``ParseResults`` (and any results names in the parsed values are also merged in with the start and end location names). Using ``Located``, the output is:: [0, ['ljsdf'], 5] [8, ['lksdjjf'], 15] [18, ['lkkjj'], 23] With ``Located``, the parsed expression values and results names are kept separate in the second parsed value, and there is no extra grouping level on the whole result. The existing ``locatedExpr`` is retained for backward-compatibility, but will be deprecated in a future release. New ``AtLineStart`` and ``AtStringStart`` classes ------------------------------------------------- As part of fixing some matching behavior in ``LineStart`` and ``StringStart``, two new classes have been added: ``AtLineStart`` and ``AtStringStart``. ``LineStart`` and ``StringStart`` can be treated as separate elements, including whitespace skipping. ``AtLineStart`` and ``AtStringStart`` enforce that an expression starts exactly at column 1, with no leading whitespace.:: (LineStart() + Word(alphas)).parseString("ABC") # passes (LineStart() + Word(alphas)).parseString(" ABC") # passes AtLineStart(Word(alphas)).parseString(" ABC") # fails [This is a fix to behavior that was added in 3.0.0, but was actually a regression from 2.4.x.] New ``IndentedBlock`` class to replace ``indentedBlock`` helper method ---------------------------------------------------------------------- The new ``IndentedBlock`` class will replace the current ``indentedBlock`` method for defining indented blocks of text, similar to Python source code. Using ``IndentedBlock``, the expression instance itself keeps track of the indent stack, so a separate external ``indentStack`` variable is no longer required. Here is a simple example of an expression containing an alphabetic key, followed by an indented list of integers:: integer = pp.Word(pp.nums) group = pp.Group(pp.Char(pp.alphas) + pp.IndentedBlock(integer)) parses:: A 100 101 B 200 201 as:: [['A', [100, 101]], ['B', [200, 201]]] By default, the results returned from the ``IndentedBlock`` are grouped. ``IndentedBlock`` may also be used to define a recursive indented block (containing nested indented blocks). The existing ``indentedBlock`` is retained for backward-compatibility, but will be deprecated in a future release. Shortened tracebacks -------------------- Cleaned up default tracebacks when getting a ``ParseException`` when calling ``parse_string``. Exception traces should now stop at the call in ``parse_string``, and not include the internal pyparsing traceback frames. (If the full traceback is desired, then set ``ParserElement.verbose_traceback`` to ``True``.) Improved debug logging ---------------------- Debug logging has been improved by: - Including ``try/match/fail`` logging when getting results from the packrat cache (previously cache hits did not show debug logging). Values returned from the packrat cache are marked with an '*'. - Improved fail logging, showing the failed expression, text line, and marker where the failure occurred. - Adding ``with_line_numbers`` to ``pyparsing_testing``. Use ``with_line_numbers`` to visualize the data being parsed, with line and column numbers corresponding to the values output when enabling ``set_debug()`` on an expression:: data = """\ A 100""" expr = pp.Word(pp.alphanums).set_name("word").set_debug() print(ppt.with_line_numbers(data)) expr[...].parseString(data) prints:: . 1 1234567890 1: A 2: 100 Match word at loc 3(1,4) A ^ Matched word -> ['A'] Match word at loc 11(2,7) 100 ^ Matched word -> ['100'] New / improved examples ----------------------- - ``number_words.py`` includes a parser/evaluator to parse ``"forty-two"`` and return ``42``. Also includes example code to generate a railroad diagram for this parser. - ``BigQueryViewParser.py`` added to examples directory, submitted by Michael Smedberg. - ``booleansearchparser.py`` added to examples directory, submitted by xecgr. Builds on searchparser.py, adding support for '*' wildcards and non-Western alphabets. - Improvements in ``select_parser.py``, to include new SQL syntax from SQLite, submitted by Robert Coup. - Off-by-one bug found in the ``roman_numerals.py`` example, a bug that has been there for about 14 years! Submitted by Jay Pedersen. - A simplified Lua parser has been added to the examples (``lua_parser.py``). - Demonstration of defining a custom Unicode set for cuneiform symbols, as well as simple Cuneiform->Python conversion is included in ``cuneiform_python.py``. - Fixed bug in ``delta_time.py`` example, when using a quantity of seconds/minutes/hours/days > 999. Other new features ------------------ - ``url`` expression added to ``pyparsing_common``, with named fields for common fields in URLs. See the updated ``urlExtractorNew.py`` file in the ``examples`` directory. Submitted by Wolfgang Fahl. - ``DelimitedList`` now supports an additional flag ``allow_trailing_delim``, to optionally parse an additional delimiter at the end of the list. Submitted by Kazantcev Andrey. - Added global method ``autoname_elements()`` to call ``set_name()`` on all locally defined ``ParserElements`` that haven't been explicitly named using ``set_name()``, using their local variable name. Useful for setting names on multiple elements when creating a railroad diagram:: a = pp.Literal("a") b = pp.Literal("b").set_name("bbb") pp.autoname_elements() ``a`` will get named "a", while ``b`` will keep its name "bbb". - Enhanced default strings created for ``Word`` expressions, now showing string ranges if possible. ``Word(alphas)`` would formerly print as ``W:(ABCD...)``, now prints as ``W:(A-Za-z)``. - Better exception messages to show full word where an exception occurred.:: Word(alphas)[...].parse_string("abc 123", parse_all=True) Was:: pyparsing.ParseException: Expected end of text, found '1' (at char 4), (line:1, col:5) Now:: pyparsing.exceptions.ParseException: Expected end of text, found '123' (at char 4), (line:1, col:5) - Using ``...`` for ``SkipTo`` can now be wrapped in ``Suppress`` to suppress the skipped text from the returned parse results.:: source = "lead in START relevant text END trailing text" start_marker = Keyword("START") end_marker = Keyword("END") find_body = Suppress(...) + start_marker + ... + end_marker print(find_body.parse_string(source).dump()) Prints:: ['START', 'relevant text ', 'END'] - _skipped: ['relevant text '] - Added ``ignore_whitespace(recurse:bool = True)`` and added a ``recurse`` argument to ``leave_whitespace``, both added to provide finer control over pyparsing's whitespace skipping. Contributed by Michael Milton. - Added ``ParserElement.recurse()`` method to make it simpler for grammar utilities to navigate through the tree of expressions in a pyparsing grammar. - The ``repr()`` string for ``ParseResults`` is now of the form:: ParseResults([tokens], {named_results}) The previous form omitted the leading ``ParseResults`` class name, and was easily misinterpreted as a ``tuple`` containing a ``list`` and a ``dict``. - Minor reformatting of output from ``run_tests`` to make embedded comments more visible. - New ``pyparsing_test`` namespace, assert methods and classes added to support writing unit tests. - ``assertParseResultsEquals`` - ``assertParseAndCheckList`` - ``assertParseAndCheckDict`` - ``assertRunTestResults`` - ``assertRaisesParseException`` - ``reset_pyparsing_context`` context manager, to restore pyparsing config settings - Enhanced error messages and error locations when parsing fails on the ``Keyword`` or ``CaselessKeyword`` classes due to the presence of a preceding or trailing keyword character. - Enhanced the ``Regex`` class to be compatible with re's compiled with the re-equivalent ``regex`` module. Individual expressions can be built with regex compiled expressions using:: import pyparsing as pp import regex # would use regex for this expression integer_parser = pp.Regex(regex.compile(r'\d+')) - Fixed handling of ``ParseSyntaxExceptions`` raised as part of ``Each`` expressions, when sub-expressions contain ``'-'`` backtrack suppression. - Potential performance enhancement when parsing ``Word`` expressions built from ``pyparsing_unicode`` character sets. ``Word`` now internally converts ranges of consecutive characters to regex character ranges (converting ``"0123456789"`` to ``"0-9"`` for instance). - Added a caseless parameter to the ``CloseMatch`` class to allow for casing to be ignored when checking for close matches. Contributed by Adrian Edwards. API Changes =========== - [Note added in pyparsing 3.0.7, reflecting a change in 3.0.0] Fixed a bug in the ``ParseResults`` class implementation of ``__bool__``, which would formerly return ``False`` if the ``ParseResults`` item list was empty, even if it contained named results. Now ``ParseResults`` will return ``True`` if either the item list is not empty *or* if the named results list is not empty:: # generate an empty ParseResults by parsing a blank string with a ZeroOrMore result = Word(alphas)[...].parse_string("") print(result.as_list()) print(result.as_dict()) print(bool(result)) # add a results name to the result result["name"] = "empty result" print(result.as_list()) print(result.as_dict()) print(bool(result)) Prints:: [] {} False [] {'name': 'empty result'} True In previous versions, the second call to ``bool()`` would return ``False``. - [Note added in pyparsing 3.0.4, reflecting a change in 3.0.0] The ``ParseResults`` class now uses ``__slots__`` to pre-define instance attributes. This means that code written like this (which was allowed in pyparsing 2.4.7):: result = Word(alphas).parseString("abc") result.xyz = 100 now raises this Python exception:: AttributeError: 'ParseResults' object has no attribute 'xyz' To add new attribute values to ParseResults object in 3.0.0 and later, you must assign them using indexed notation:: result["xyz"] = 100 You will still be able to access this new value as an attribute or as an indexed item. - ``enable_diag()`` and ``disable_diag()`` methods to enable specific diagnostic values (instead of setting them to ``True`` or ``False``). ``enable_all_warnings()`` has also been added. - ``counted_array`` formerly returned its list of items nested within another list, so that accessing the items required indexing the 0'th element to get the actual list. This extra nesting has been removed. In addition, if there are other metadata fields parsed between the count and the list items, they can be preserved in the resulting list if given results names. - ``ParseException.explain()`` is now an instance method of ``ParseException``:: expr = pp.Word(pp.nums) * 3 try: expr.parse_string("123 456 A789") except pp.ParseException as pe: print(pe.explain(depth=0)) prints:: 123 456 A789 ^ ParseException: Expected W:(0-9), found 'A789' (at char 8), (line:1, col:9) To run explain against other exceptions, use ``ParseException.explain_exception()``. - Debug actions now take an added keyword argument ``cache_hit``. Now that debug actions are called for expressions matched in the packrat parsing cache, debug actions are now called with this extra flag, set to ``True``. For custom debug actions, it is necessary to add support for this new argument. - ``ZeroOrMore`` expressions that have results names will now include empty lists for their name if no matches are found. Previously, no named result would be present. Code that tested for the presence of any expressions using ``"if name in results:"`` will now always return ``True``. This code will need to change to ``"if name in results and results[name]:"`` or just ``"if results[name]:"``. Also, any parser unit tests that check the ``as_dict()`` contents will now see additional entries for parsers having named ``ZeroOrMore`` expressions, whose values will be ``[]``. - ``ParserElement.set_default_whitespace_chars`` will now update whitespace characters on all built-in expressions defined in the pyparsing module. - ``camelCase`` names have been converted to PEP-8 ``snake_case`` names. Method names and arguments that were camel case (such as ``parseString``) have been replaced with PEP-8 snake case versions (``parse_string``). Backward-compatibility synonyms for all names and arguments have been included, to allow parsers written using the old names to run without change. The synonyms will be removed in a future release. New parser code should be written using the new PEP-8 snake case names. ============================== ================================ Name Previous name ------------------------------ -------------------------------- ParserElement - parse_string parseString - scan_string scanString - search_string searchString - transform_string transformString - add_condition addCondition - add_parse_action addParseAction - can_parse_next canParseNext - default_name defaultName - enable_left_recursion enableLeftRecursion - enable_packrat enablePackrat - ignore_whitespace ignoreWhitespace - inline_literals_using inlineLiteralsUsing - parse_file parseFile - leave_whitespace leaveWhitespace - parse_string parseString - parse_with_tabs parseWithTabs - reset_cache resetCache - run_tests runTests - scan_string scanString - search_string searchString - set_break setBreak - set_debug setDebug - set_debug_actions setDebugActions - set_default_whitespace_chars setDefaultWhitespaceChars - set_fail_action setFailAction - set_name setName - set_parse_action setParseAction - set_results_name setResultsName - set_whitespace_chars setWhitespaceChars - transform_string transformString - try_parse tryParse ParseResults - as_list asList - as_dict asDict - get_name getName ParseBaseException - parser_element parserElement any_open_tag anyOpenTag any_close_tag anyCloseTag c_style_comment cStyleComment common_html_entity commonHTMLEntity condition_as_parse_action conditionAsParseAction counted_array countedArray cpp_style_comment cppStyleComment dbl_quoted_string dblQuotedString dbl_slash_comment dblSlashComment DelimitedList delimitedList DelimitedList delimited_list dict_of dictOf html_comment htmlComment infix_notation infixNotation java_style_comment javaStyleComment line_end lineEnd line_start lineStart make_html_tags makeHTMLTags make_xml_tags makeXMLTags match_only_at_col matchOnlyAtCol match_previous_expr matchPreviousExpr match_previous_literal matchPreviousLiteral nested_expr nestedExpr null_debug_action nullDebugAction one_of oneOf OpAssoc opAssoc original_text_for originalTextFor python_style_comment pythonStyleComment quoted_string quotedString remove_quotes removeQuotes replace_html_entity replaceHTMLEntity replace_with replaceWith rest_of_line restOfLine sgl_quoted_string sglQuotedString string_end stringEnd string_start stringStart token_map tokenMap trace_parse_action traceParseAction unicode_string unicodeString with_attribute withAttribute with_class withClass ============================== ================================ Discontinued Features ===================== Python 2.x no longer supported ------------------------------ Removed Py2.x support and other deprecated features. Pyparsing now requires Python 3.6.8 or later. If you are using an earlier version of Python, you must use a Pyparsing 2.4.x version. Other discontinued features --------------------------- - ``ParseResults.asXML()`` - if used for debugging, switch to using ``ParseResults.dump()``; if used for data transfer, use ``ParseResults.as_dict()`` to convert to a nested Python dict, which can then be converted to XML or JSON or other transfer format - ``operatorPrecedence`` synonym for ``infixNotation`` - convert to calling ``infix_notation`` - ``commaSeparatedList`` - convert to using ``pyparsing_common.comma_separated_list`` - ``upcaseTokens`` and ``downcaseTokens`` - convert to using ``pyparsing_common.upcase_tokens`` and ``downcase_tokens`` - ``__compat__.collect_all_And_tokens`` will not be settable to ``False`` to revert to pre-2.3.1 results name behavior - review use of names for ``MatchFirst`` and Or expressions containing ``And`` expressions, as they will return the complete list of parsed tokens, not just the first one. Use ``pyparsing.enable_diag(pyparsing.Diagnostics.warn_multiple_tokens_in_named_alternation)`` to help identify those expressions in your parsers that will have changed as a result. - Removed support for running ``python setup.py test``. The setuptools maintainers consider the ``test`` command deprecated (see ). To run the Pyparsing tests, use the command ``tox``. Fixed Bugs ========== - [Reverted in 3.0.2]Fixed issue when ``LineStart()`` expressions would match input text that was not necessarily at the beginning of a line. [The previous behavior was the correct behavior, since it represents the ``LineStart`` as its own matching expression. ``ParserElements`` that must start in column 1 can be wrapped in the new ``AtLineStart`` class.] - Fixed bug in regex definitions for ``real`` and ``sci_real`` expressions in ``pyparsing_common``. - Fixed ``FutureWarning`` raised beginning in Python 3.7 for ``Regex`` expressions containing '[' within a regex set. - Fixed bug in ``PrecededBy`` which caused infinite recursion. - Fixed bug in ``CloseMatch`` where end location was incorrectly computed; and updated ``partial_gene_match.py`` example. - Fixed bug in ``indentedBlock`` with a parser using two different types of nested indented blocks with different indent values, but sharing the same indent stack. - Fixed bug in ``Each`` when using ``Regex``, when ``Regex`` expression would get parsed twice. - Fixed bugs in ``Each`` when passed ``OneOrMore`` or ``ZeroOrMore`` expressions: . first expression match could be enclosed in an extra nesting level . out-of-order expressions now handled correctly if mixed with required expressions . results names are maintained correctly for these expression - Fixed ``FutureWarning`` that sometimes is raised when ``'['`` passed as a character to ``Word``. - Fixed debug logging to show failure location after whitespace skipping. - Fixed ``ParseFatalExceptions`` failing to override normal exceptions or expression matches in ``MatchFirst`` expressions. - Fixed bug in which ``ParseResults`` replaces a collection type value with an invalid type annotation (as a result of changed behavior in Python 3.9). - Fixed bug in ``ParseResults`` when calling ``__getattr__`` for special double-underscored methods. Now raises ``AttributeError`` for non-existent results when accessing a name starting with '__'. - Fixed bug in ``Located`` class when used with a results name. - Fixed bug in ``QuotedString`` class when the escaped quote string is not a repeated character. Acknowledgments =============== And finally, many thanks to those who helped in the restructuring of the pyparsing code base as part of this release. Pyparsing now has more standard package structure, more standard unit tests, and more standard code formatting (using black). Special thanks to jdufresne, klahnakoski, mattcarmody, ckeygusuz, tmiguelt, and toonarmycaptain to name just a few. Thanks also to Michael Milton and Max Fischer, who added some significant new features to pyparsing.././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/0README.html0000644000000000000000000002420714443622312014205 0ustar00 pyparsing Examples

pyparsing Examples

This directory contains a number of Python scripts that can get you started in learning to use pyparsing.

  • greeting.py
    Parse "Hello, World!".
  • greetingInKorean.py ~ submission by June Kim
    Unicode example to parse "Hello, World!" in Korean.
  • greetingInGreek.py ~ submission by ???
    Unicode example to parse "Hello, World!" in Greek.
  • hola_mundo.py ~ submission by Marco Alfonso
    "Hello, World!" example translated to Spanish, from Marco Alfonso's blog.
  • chemical_formulas.py
    Simple example to demonstrate the use of ParseResults returned from parseString(). Parses a chemical formula (such as "H2O" or "C6H5OH"), and walks the returned list of tokens to calculate the molecular weight.
  • wordsToNum.py
    A sample program that reads a number in words (such as "fifteen hundred and sixty four"), and returns the actual number (1564). Also demonstrates some processing of ParseExceptions, including marking where the parse failure was found.
  • pythonGrammarparser.py ~ suggested by JH Stovall
    A sample program that parses the EBNF used in the Python source code to define the Python grammar. From this parser, one can generate Python grammar documentation tools, such as railroad track diagrams. Also demonstrates use of Dict class.
  • commasep.py
    Demonstration of the use of the commaSeparatedList helper. Shows examples of proper handling of commas within quotes, trimming of whitespace around delimited entries, and handling of consecutive commas (null arguments). Includes comparison with simple string.split(',').
  • dictExample.py
    A demonstration of using the Dict class, to parse a table of ASCII tabulated data.
  • dictExample2.py ~ submission by Mike Kelly
    An extended version of dictExample.py, in which Mike Kelly also parses the column headers, and generates a transposed version of the original table!
  • scanExamples.py
    Some examples of using scanString and transformString, as alternative parsing methods to parseString, to do macro substitution, and selection and/or removal of matching strings within a source file.
  • urlExtractorNew.py
    A sample program showing sample definitions and applications of HTML tag expressions created using makeHTMLTags helper function. Very useful for scraping data from HTML pages.
  • fourFn.py
    A simple algebraic expression parser, that performs +,-,*,/, and ^ arithmetic operations. (With suggestions and bug-fixes graciously offered by Andrea Griffini.)
  • SimpleCalc.py ~ submission by Steven Siew
    An interactive version of fourFn.py, with support for variables.
  • LAParser.py ~ submission by Mike Ellis
    An interactive Linear Algebra Parser, an extension of SimpleCalc.py. Supports linear algebra (LA) notation for vectors, matrices, and scalars, including matrix operations such as inversion and determinants. Converts LA expressions to C code - uses a separate C library for runtime evaluation of results.
  • configParse.py
    A simple alternative to Python's ConfigParse module, demonstrating the use of the Dict class to return nested dictionary access to configuration values.
  • getNTPserversNew.py
    Yet another scanString example, to read/extract the list of NTP servers from NIST's web site. Uses the new makeHTMLTags() method.
  • httpServerLogParser.py
    Parser for Apache server log files.
  • idlParse.py
    Parser for CORBA IDL files.
  • mozillaCalendarParser.py ~ submission by Petri Savolainen
    Parser for Mozilla calendar (*.ics) files.
  • pgn.py ~ submission by Alberto Santini
    Parser for PGN (Portable Game Notation) files, the standard form for documenting the moves in chess games.
  • simpleSQL.py
    A simple parser that will extract table and column names from SQL SELECT statements..
  • dfmparse.py ~ submission by Dan Griffith
    Parser for Delphi forms.
  • ebnf.py / ebnftest.py ~ submission by Seo Sanghyeon
    An EBNF-compiler that reads EBNF and generates a pyparsing grammar! Including a test that compiles... EBNF itself!
  • searchparser.py ~ submission by Steven Mooij and Rudolph Froger
    An expression parser that parses search strings, with special keyword and expression operations using (), not, and, or, and quoted strings.
  • romanNumerals.py
    A Roman numeral generator and parser example, showing the power of parse actions to compile Roman numerals into their integer values.
  • removeLineBreaks.py
    A string transformer that converts text files with hard line-breaks into one with line breaks only between paragraphs. Useful when converting downloads from Project Gutenberg to import to word processing apps that can reformat paragraphs once hard line-breaks are removed, or for loading into your Palm Pilot for portable perusal.

    See Successful Methods of Public Speaking.txt and Successful Methods of Public Speaking(2).txt for a sample before and after (text file courtesy of Project Gutenberg).

  • listAllMatches.py
    An example program showing the utility of the listAllMatches option when specifying results naming.
  • linenoExample.py
    An example program showing how to use the string location to extract line and column numbers, or the source line of text.
  • parseListString.py
    An example program showing a progression of steps, how to parse a string representation of a Python list back into a true list.
  • parsePythonValue.py
    An extension of parseListString.py to parse tuples and dicts, including nested values, returning a Python value of the original type.
  • indentedGrammarExample.py
    An example program showing how to parse a grammar using indentation for grouping, such as is done in Python.
  • simpleArith.py
    An example program showing how to use the new operatorPrecedence helper method to define a 6-function (+, -, *, /, ^, and !) arithmetic expression parser, with unary plus and minus signs.
  • simpleBool.py
    An example program showing how to use the new operatorPrecedence helper method to define a boolean expression parser, with parse actions associated with each operator to "compile" the expression into a data structure that will evaluate the expression's boolean value.
  • simpleWiki.py
    An example program showing how to use transformString to implement a simple Wiki markup parser.
  • sql2dot.py~ submission by EnErGy [CSDX]
    A nice graphing program that generates schema diagrams from SQL table definition statements.
  • htmlStripper.py
    An example implementation of a common application, removing HTML markup tags from an HTML page, leaving just the text content.
  • macroExpansion.py
    An example implementation of a simple preprocessor, that will read embedded macro definitions and replace macro references with the defined substitution string.
  • sexpParser.py
    A parser that uses a recursive grammar to parse S-expressions.
  • nested.py
    An example using nestedExpr, a helper method to simplify definitions of expressions of nested lists.
  • withAttribute.py
    An example using withAttribute, a helper method to define parse actions to validate matched HTML tags using additional attributes. Especially helpful for matching common tags such as <DIV> and <TD>.
  • stackish.py
    A parser for the data representation format, Stackish.
  • builtin_parse_action_demo.py
    Demonstration of using builtins (min, max, sum, len, etc.) as parse actions.
  • antlr_grammar.py~ submission by Luca DellOlio
    Pyparsing example parsing ANTLR .a files and generating a working pyparsing parser.
  • shapes.py
    Parse actions example simple shape definition syntax, and returning the matched tokens as domain objects instead of just strings.
  • datetimeParseActions.py
    Parse actions example showing a parse action returning a datetime object instead of string tokens, and doing validation of the tokens, raising a ParseException if the given YYYY/MM/DD string does not represent a valid date.
  • position.py
    Demonstration of a couple of different ways to capture the location a particular expression was found within the overall input string.
././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/AcManForm.dfm0000644000000000000000000011434214412577542014607 0ustar00object Form1: TForm1 Left = 193 Top = 105 Width = 696 Height = 480 Caption = 'AcManTest' Color = clBtnFace Font.Charset = DEFAULT_CHARSET Font.Color = clWindowText Font.Height = -11 Font.Name = 'MS Sans Serif' Font.Style = [] OldCreateOrder = False OnCreate = FormCreate PixelsPerInch = 96 TextHeight = 13 object RichEdit1: TRichEdit Left = 0 Top = 107 Width = 688 Height = 346 Align = alClient Lines.Strings = ( 'RichEdit1') TabOrder = 0 end object ActionToolBar1: TActionToolBar Left = 0 Top = 25 Width = 688 Height = 28 ActionManager = ActionManager1 Caption = 'ActionToolBar1' ColorMap.HighlightColor = 14410210 ColorMap.BtnSelectedColor = clBtnFace ColorMap.UnusedColor = 14410210 EdgeBorders = [ebTop, ebBottom] Font.Charset = DEFAULT_CHARSET Font.Color = clWindowText Font.Height = -11 Font.Name = 'MS Sans Serif' Font.Style = [] ParentFont = False ParentShowHint = False ShowHint = True Spacing = 0 end object ActionMainMenuBar1: TActionMainMenuBar Left = 0 Top = 0 Width = 688 Height = 25 UseSystemFont = False ActionManager = ActionManager1 AnimationStyle = asSlide Caption = 'ActionMainMenuBar1' ColorMap.HighlightColor = 14410210 ColorMap.BtnSelectedColor = clBtnFace ColorMap.UnusedColor = 14410210 EdgeBorders = [ebTop, ebBottom] EdgeOuter = esNone Font.Charset = ANSI_CHARSET Font.Color = clWindowText Font.Height = -11 Font.Name = 'Tahoma' Font.Style = [] ParentShowHint = False ShowHint = True Spacing = 0 end object ActionToolBar2: TActionToolBar Left = 0 Top = 53 Width = 688 Height = 28 ActionManager = ActionManager1 Caption = 'ActionToolBar2' ColorMap.HighlightColor = 14410210 ColorMap.BtnSelectedColor = clBtnFace ColorMap.UnusedColor = 14410210 EdgeBorders = [ebTop, ebBottom] Font.Charset = DEFAULT_CHARSET Font.Color = clWindowText Font.Height = -11 Font.Name = 'MS Sans Serif' Font.Style = [] ParentFont = False ParentShowHint = False ShowHint = True Spacing = 0 end object ActionToolBar3: TActionToolBar Left = 0 Top = 81 Width = 688 Height = 26 ActionManager = ActionManager1 Caption = 'ActionToolBar3' ColorMap.HighlightColor = 14410210 ColorMap.BtnSelectedColor = clBtnFace ColorMap.UnusedColor = 14410210 Font.Charset = DEFAULT_CHARSET Font.Color = clWindowText Font.Height = -11 Font.Name = 'MS Sans Serif' Font.Style = [] ParentFont = False Spacing = 0 end object ActionManager1: TActionManager FileName = 'settings' ActionBars.SessionCount = 4 ActionBars = < item Items = < item Action = EditUndo1 ImageIndex = 3 ShortCut = 16474 end item Action = EditCut1 ImageIndex = 0 ShortCut = 16472 end item Action = EditCopy1 ImageIndex = 1 ShortCut = 16451 end item Action = EditPaste1 ImageIndex = 2 ShortCut = 16470 end item Action = SearchFind1 ImageIndex = 15 ShortCut = 16454 end item Action = SearchReplace1 ImageIndex = 17 end> ActionBar = ActionToolBar1 AutoSize = False end item Items = < item Items = < item Action = FileOpen1 ImageIndex = 12 ShortCut = 16463 end item Action = FileSaveAs1 ImageIndex = 13 end item Action = FilePrintSetup1 end item Action = FileRun1 end item Action = FileExit1 ImageIndex = 14 LastSession = -1 UsageCount = -1 end> Caption = '&File' end item Items = < item Action = EditCut1 ImageIndex = 0 ShortCut = 16472 end item Action = EditCopy1 ImageIndex = 1 ShortCut = 16451 end item Action = EditPaste1 ImageIndex = 2 ShortCut = 16470 end item Action = EditSelectAll1 ShortCut = 16449 end item Action = EditUndo1 ImageIndex = 3 ShortCut = 16474 end item Action = EditDelete1 ImageIndex = 4 ShortCut = 46 end> Caption = '&Edit' end item Items = < item Action = RichEditBold1 ImageIndex = 5 ShortCut = 16450 end item Action = RichEditItalic1 ImageIndex = 6 ShortCut = 16457 end item Action = RichEditUnderline1 ImageIndex = 7 ShortCut = 16469 end item Action = RichEditStrikeOut1 end item Action = RichEditBullets1 ImageIndex = 8 end item Action = RichEditAlignLeft1 ImageIndex = 9 end item Action = RichEditAlignRight1 ImageIndex = 10 end item Action = RichEditAlignCenter1 ImageIndex = 11 end> Caption = 'F&ormat' end item Items = < item Action = SearchFind1 ImageIndex = 15 ShortCut = 16454 end item Action = SearchFindNext1 ImageIndex = 16 ShortCut = 114 end item Action = SearchReplace1 ImageIndex = 17 end item Action = SearchFindFirst1 end> Caption = '&Search' end item Items = < item Action = CustomizeActionBars1 end> Caption = '&Tools' end item Items = < item Action = HelpContents1 ImageIndex = 18 end> Caption = '&Help' end> ActionBar = ActionMainMenuBar1 AutoSize = False end item Items = < item Action = RichEditBold1 ImageIndex = 5 ShortCut = 16450 end item Action = RichEditItalic1 ImageIndex = 6 ShortCut = 16457 end item Action = RichEditUnderline1 ImageIndex = 7 ShortCut = 16469 end item Action = RichEditBullets1 Caption = 'Bull&ets' ImageIndex = 8 end item Action = RichEditAlignLeft1 ImageIndex = 9 end item Action = RichEditAlignRight1 ImageIndex = 10 end item Action = RichEditAlignCenter1 ImageIndex = 11 end> ActionBar = ActionToolBar2 AutoSize = False end item AutoSize = False end item AutoSize = False end item Items = < item Action = FileSaveAs1 ImageIndex = 13 LastSession = 2 end item Action = CustomizeActionBars1 end item Action = FileExit1 ImageIndex = 14 end item Action = HelpContents1 Caption = 'C&ontents' ImageIndex = 18 end item Action = ActionShowStatus Caption = '&ShowStatus' end> ActionBar = ActionToolBar3 AutoSize = False end> Images = ImageList1 Left = 88 Top = 136 StyleName = 'XP Style' object EditCut1: TEditCut Category = 'Edit' Caption = 'Cu&t' Hint = 'Cut|Cuts the selection and puts it on the Clipboard' ImageIndex = 0 ShortCut = 16472 end object EditCopy1: TEditCopy Category = 'Edit' Caption = '&Copy' Hint = 'Copy|Copies the selection and puts it on the Clipboard' ImageIndex = 1 ShortCut = 16451 end object EditPaste1: TEditPaste Category = 'Edit' Caption = '&Paste' Hint = 'Paste|Inserts Clipboard contents' ImageIndex = 2 ShortCut = 16470 end object EditSelectAll1: TEditSelectAll Category = 'Edit' Caption = 'Select &All' Hint = 'Select All|Selects the entire document' ShortCut = 16449 end object EditUndo1: TEditUndo Category = 'Edit' Caption = '&Undo' Hint = 'Undo|Reverts the last action' ImageIndex = 3 ShortCut = 16474 end object EditDelete1: TEditDelete Category = 'Edit' Caption = '&Delete' Hint = 'Delete|Erases the selection' ImageIndex = 4 ShortCut = 46 end object RichEditBold1: TRichEditBold Category = 'Format' AutoCheck = True Caption = '&Bold' Hint = 'Bold' ImageIndex = 5 ShortCut = 16450 end object RichEditItalic1: TRichEditItalic Category = 'Format' AutoCheck = True Caption = '&Italic' Hint = 'Italic' ImageIndex = 6 ShortCut = 16457 end object RichEditUnderline1: TRichEditUnderline Category = 'Format' AutoCheck = True Caption = '&Underline' Hint = 'Underline' ImageIndex = 7 ShortCut = 16469 end object RichEditStrikeOut1: TRichEditStrikeOut Category = 'Format' AutoCheck = True Caption = '&Strikeout' Hint = 'Strikeout' end object RichEditBullets1: TRichEditBullets Category = 'Format' AutoCheck = True Caption = '&Bullets' Hint = 'Bullets|Inserts a bullet on the current line' ImageIndex = 8 end object RichEditAlignLeft1: TRichEditAlignLeft Category = 'Format' AutoCheck = True Caption = 'Align &Left' Hint = 'Align Left|Aligns text at the left indent' ImageIndex = 9 end object RichEditAlignRight1: TRichEditAlignRight Category = 'Format' AutoCheck = True Caption = 'Align &Right' Hint = 'Align Right|Aligns text at the right indent' ImageIndex = 10 end object RichEditAlignCenter1: TRichEditAlignCenter Category = 'Format' AutoCheck = True Caption = '&Center' Hint = 'Center|Centers text between margins' ImageIndex = 11 end object FileOpen1: TFileOpen Category = 'File' Caption = '&Open...' Hint = 'Open|Opens an existing file' ImageIndex = 12 ShortCut = 16463 end object FileSaveAs1: TFileSaveAs Category = 'File' Caption = 'Save &As...' Hint = 'Save As|Saves the active file with a new name' ImageIndex = 13 end object FilePrintSetup1: TFilePrintSetup Category = 'File' Caption = 'Print Set&up...' Hint = 'Print Setup' end object FileRun1: TFileRun Category = 'File' Browse = False BrowseDlg.Title = 'Run' Caption = '&Run...' Hint = 'Run|Runs an application' Operation = 'open' ShowCmd = scShowNormal end object FileExit1: TFileExit Category = 'File' Caption = 'E&xit' Hint = 'Exit|Quits the application' ImageIndex = 14 end object SearchFind1: TSearchFind Category = 'Search' Caption = '&Find...' Hint = 'Find|Finds the specified text' ImageIndex = 15 ShortCut = 16454 end object SearchFindNext1: TSearchFindNext Category = 'Search' Caption = 'Find &Next' Enabled = False Hint = 'Find Next|Repeats the last find' ImageIndex = 16 ShortCut = 114 end object SearchReplace1: TSearchReplace Category = 'Search' Caption = '&Replace' Hint = 'Replace|Replaces specific text with different text' ImageIndex = 17 end object SearchFindFirst1: TSearchFindFirst Category = 'Search' Caption = 'F&ind First' Hint = 'Find First|Finds the first occurrence of specified text' end object CustomizeActionBars1: TCustomizeActionBars Category = 'Tools' Caption = '&Customize' CustomizeDlg.StayOnTop = False end object HelpContents1: THelpContents Category = 'Help' Caption = '&Contents' Enabled = False Hint = 'Help Contents' ImageIndex = 18 end object ActionShowStatus: TAction Category = 'Tools' Caption = 'ShowStatus' OnExecute = ActionShowStatusExecute end end object ImageList1: TImageList Left = 168 Top = 136 Bitmap = { 494C010113001400040010001000FFFFFFFFFF10FFFFFFFFFFFFFFFF424D3600 0000000000003600000028000000400000005000000001001000000000000028 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 1040104010420000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000010401040 FF7FFF7F18631042000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000010401040FF7FFF7F 0000000018631863104200000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000104210401040FF7FFF7F00000000 1040104000001863186310420000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000001863000000000000 0000000000000000186300000000000000000000000000000000000000000000 00000000000000000000000000000000000010421040FF7F0000000010401040 1040104010400000186318631042000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000010420000 0000000010420000000000000000000000000000000000001863000000000000 0000000000000000186300000000000000001042000000001040104010400042 E07F104010401040000018631863104200000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000010420000 0000000010420000000000000000000000001042104010401040104010401040 0042104010401040104000001863000000000000000000000000000000000000 0000000000000000000000000000000000000000000000001863000000000000 0000186300000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000001040FF7F1040104010401040 1040E07FE07F1040104010400000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000001863000000000000 0000186300000000000000000000000000000000000000001863000000000000 000018630000000000000000000000000000000000001040FF7F104010401040 104010400042E07FE07F10401040000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000001863000000000000 0000186300000000000000000000000000000000000000001040FF7F10401040 104000421040E07FE07F10401040104000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 1042000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000001040FF7F1040 1040E07FE07FE07F104010401040000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 104200000000000000000000000000000000000000000000000000001040FF7F 1040104010401040104000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000001040 FF7F104010400000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 1040104000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000001042104210421042104210421042 104210421042FF7F186310421863FF7F18630000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000001042104210421042104210421042 1042104210421042FF7F1042FF7F104210420000000000000000000000000000 0000000000000000000000000000000000000000000000420042000000000000 0000000000000000000000000042000000000000000000000000000000000000 0000000000000000000000000000000000001000100010001000000000001042 10421042FF7FFF7FFF7F10001000100010000000000000000000000000000000 0000000000000000000000000000000000000000000000420042000000000000 0000000000000000000000000042000000000000000000000000004200420000 00000000000018630000004200000000000000000000000010001F0010000000 00001042FF7FFF7FFF7F10000000000000000000FF7F00000000000000000000 0000000000000000FF7F00000000000000000000000000420042000000000000 0000000000000000000000000042000000000000000000000000004200420000 000000000000186300000042000000000000000000000000100010001F001000 0000FF7FFF7FFF7FFF7F10000000000000000000FF7F00000000000000000000 0000000000000000FF7F00000000000000000000000000420042000000000000 0000000000000000000000000042000000000000000000000000004200420000 00000000000000000000004200000000000000000000000010001F0010001F00 0000FF7FFF7FFF7FFF7F10000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000420042004200420042 0042004200420042004200420042000000000000000000000000004200420042 004200420042004200420042000000000000000000000000100010001F001000 0000FF7FFF03FF7FFF03100000000000000000000000FF7F0000000000000000 00000000FF7F0000000000000000000000000000000000420042000000000000 0000000000000000000000420042000000000000000000000000004200420000 00000000000000000042004200000000000000000000000010001F0010001F00 0000FF03FF7FFF03FF7F100000000000000000000000FF7F0000000000001863 00000000FF7F0000000000000000000000000000000000420000000000000000 0000000000000000000000000042000000000000000000000000004200001863 186318631863186300000042000000000000000000000000100010001F001000 0000FF7FFF03FF7FFF03100000000000000000000000FF7F0000000000001863 00000000FF7F0000000000000000000000000000000000420000000000000000 0000000000000000000000000042000000000000000000000000004200001863 18631863186318630000004200000000000000000000000010001F0010001F00 0000FF03FF7FFF03FF7F10000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000420000000000000000 0000000000000000000000000042000000000000000000000000004200001863 1863186318631863000000000000000000000000000000001000100010001000 100010001000100010001000000000000000000000000000FF7F000000000000 00000000FF7F0000000000000000000000000000000000420000000000000000 0000000000000000000000000042000000000000000000000000004200001863 1863186318631863000018630000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000420000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000420000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000002 0002000200020000000000000000000000000000000000000000FF7F00000000 000000000000FF7F000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000100010001000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000010001000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000001000 1000100010001000100010001000100010000000000000000000000000000000 0000000000000000000000000000000000000000000000000000100000000000 1000000000001000100000000000000000000000000000000000000000000000 1000100010001000100010001000100010000000000000000000000000001000 FF7FFF7FFF7FFF7FFF7FFF7FFF7FFF7F10000000000000000000000000000000 0000000000000000000000000000000000000000000000000000100000000000 1000000010000000000010000000000000000000000000000000000000000000 1000FF7FFF7FFF7FFF7FFF7FFF7FFF7F10000000104200421042004210421000 FF7F000000000000000000000000FF7F10000000000000000000000000000000 0000000000000000000000000000000000000000000000000000100000000000 1000000010000000000010000000000000000000000000000000000000000000 1000FF7F00000000000000000000FF7F10000000004210420042104200421000 FF7FFF7FFF7FFF7FFF7FFF7FFF7FFF7F10000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000010001000 1000000010000000000010000000000000000000000000000000000000000000 1000FF7FFF7FFF7FFF7FFF7FFF7FFF7F10000000104200421042004210421000 FF7F000000000000FF7F10001000100010000000000000000000000000000000 0000000000000000000010000000000000000000000000000000000000000000 10000000100010001000000000000000000000000000FF7FFF7FFF7FFF7FFF7F 1000FF7F00000000000000000000FF7F10000000004210420042104200421000 FF7FFF7FFF7FFF7FFF7F1000FF7F100000000000000010001000100010001000 0000000000000000000010000000000000000000000000000000000000000000 10000000100000000000000000000000000000000000FF7F0000000000000000 1000FF7FFF7FFF7FFF7FFF7FFF7FFF7F10000000104200421042004210421000 FF7FFF7FFF7FFF7FFF7F10001000000000000000000010001000100010000000 0000000000000000000000001000000000000000000000000000000000000000 00000000000000000000000000000000000000000000FF7FFF7FFF7FFF7FFF7F 1000FF7F00000000FF7F10001000100010000000004210420042104200421000 1000100010001000100010000000000000000000000010001000100000000000 0000000000000000000000001000000000000000000000000000000000000000 00000000000000000000000000000000000000000000FF7F0000000000000000 1000FF7FFF7FFF7FFF7F1000FF7F100000000000104200421042004210420042 1042004210420042104200420000000000000000000010001000000010000000 0000000000000000000000001000000000000000000000000000000000000000 00000000000000000000000000000000000000000000FF7FFF7FFF7FFF7FFF7F 1000FF7FFF7FFF7FFF7F10001000000000000000004210420000000000000000 0000000000000000104210420000000000000000000010000000000000001000 1000000000000000000010000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000FF7F00000000FF7F0000 1000100010001000100010000000000000000000104210420000000000000000 0000000000000000104200420000000000000000000000000000000000000000 0000100010001000100000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000FF7FFF7FFF7FFF7F0000 FF7F0000000000000000000000000000000000000042104200420000E07F0000 0000E07F00001042004210420000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000FF7FFF7FFF7FFF7F0000 000000000000000000000000000000000000000000000000000000000000E07F E07F000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000424D3E000000000000003E000000 2800000040000000500000000100010000000000800200000000000000000000 000000000000000000000000FFFFFF00FFFFB6E7FFFF0000FE49B76BFE3F0000 FE498427F81F0000FFFFB76BE00F0000FFFFCEE780070000C7C7FFFF00030000 C7C7C7C700010000C387C7C700000000C007C38700010000C007C00780010000 C007C007C0010000C007C007E0000000C007C007F0000000F39FC007F8030000 F39FF39FFC0F0000F39FF39FFE3F0000FFFFFF7E0000FFFFC001BFFF0000FFFF 8031F003000007C18031E003E00707C18031E003E00707C18001E003E0070101 8001E003E007000180012003E00700018FF1E002E00700018FF1E003E0078003 8FF1E003E007C1078FF1E003FFFFC1078FF1E003F81FE38F8FF5FFFFF81FE38F 8001BF7DF81FE38FFFFF7F7EFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 8FFFFFFFFFFFFFFF8C03C007C007C0078FFFFFFFFFFFFFFFFFFFC03FF807F83F FFFFFFFFFFFFFFFF8FFFC007C007C0078C03FFFFFFFFFFFF8FFFC03FF807F01F FFFFFFFFFFFFFFFFFFFFC007C007C0078FFFFFFFFFFFFFFF8C03C03FF807F83F 8FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF EFFDFFFFFFFFE00FC7FFFFFFFFFFFFFFC3FBF00F81FFF83FE3F7F8C7E3FFF39F F1E7F8C7F1FFF39FF8CFF8C7F8FFF39FFC1FF80FFC7FF39FFE3FF8C7FE3FF39F FC1FF8C7FF1FF39FF8CFF8C7FF8FF39FE1E7F00FFF03E10FC3F3FFFFFFFFFFFF C7FDFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF9FFFFFFFC00FFFF F6CFFE008000FFFFF6B7FE000000FFFFF6B7FE000000FFFFF8B780000000FFF7 FE8F80000001C1F7FE3F80000003C3FBFF7F80000003C7FBFE3F80010003CBFB FEBF80030003DCF7FC9F80070FC3FF0FFDDF807F0003FFFFFDDF80FF8007FFFF FDDF81FFF87FFFFFFFFFFFFFFFFFFFFF00000000000000000000000000000000 000000000000} end end ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/LAparser.py0000644000000000000000000005117014412577542014376 0ustar00""" Purpose: Linear Algebra Parser Based on: SimpleCalc.py example (author Paul McGuire) in pyparsing-1.3.3 Author: Mike Ellis Copyright: Ellis & Grant, Inc. 2005 License: You may freely use, modify, and distribute this software. Warranty: THIS SOFTWARE HAS NO WARRANTY WHATSOEVER. USE AT YOUR OWN RISK. Notes: Parses infix linear algebra (LA) notation for vectors, matrices, and scalars. Output is C code function calls. The parser can be run as an interactive interpreter or included as module to use for in-place substitution into C files containing LA equations. Supported operations are: OPERATION: INPUT OUTPUT Scalar addition: "a = b+c" "a=(b+c)" Scalar subtraction: "a = b-c" "a=(b-c)" Scalar multiplication: "a = b*c" "a=b*c" Scalar division: "a = b/c" "a=b/c" Scalar exponentiation: "a = b^c" "a=pow(b,c)" Vector scaling: "V3_a = V3_b * c" "vCopy(a,vScale(b,c))" Vector addition: "V3_a = V3_b + V3_c" "vCopy(a,vAdd(b,c))" Vector subtraction: "V3_a = V3_b - V3_c" "vCopy(a,vSubtract(b,c))" Vector dot product: "a = V3_b * V3_c" "a=vDot(b,c)" Vector outer product: "M3_a = V3_b @ V3_c" "a=vOuterProduct(b,c)" Vector magn. squared: "a = V3_b^Mag2" "a=vMagnitude2(b)" Vector magnitude: "a = V3_b^Mag" "a=sqrt(vMagnitude2(b))" Matrix scaling: "M3_a = M3_b * c" "mCopy(a,mScale(b,c))" Matrix addition: "M3_a = M3_b + M3_c" "mCopy(a,mAdd(b,c))" Matrix subtraction: "M3_a = M3_b - M3_c" "mCopy(a,mSubtract(b,c))" Matrix multiplication: "M3_a = M3_b * M3_c" "mCopy(a,mMultiply(b,c))" Matrix by vector mult.: "V3_a = M3_b * V3_c" "vCopy(a,mvMultiply(b,c))" Matrix inversion: "M3_a = M3_b^-1" "mCopy(a,mInverse(b))" Matrix transpose: "M3_a = M3_b^T" "mCopy(a,mTranspose(b))" Matrix determinant: "a = M3_b^Det" "a=mDeterminant(b)" The parser requires the expression to be an equation. Each non-scalar variable must be prefixed with a type tag, 'M3_' for 3x3 matrices and 'V3_' for 3-vectors. For proper compilation of the C code, the variables need to be declared without the prefix as float[3] for vectors and float[3][3] for matrices. The operations do not modify any variables on the right-hand side of the equation. Equations may include nested expressions within parentheses. The allowed binary operators are '+-*/^' for scalars, and '+-*^@' for vectors and matrices with the meanings defined in the table above. Specifying an improper combination of operands, e.g. adding a vector to a matrix, is detected by the parser and results in a Python TypeError Exception. The usual cause of this is omitting one or more tag prefixes. The parser knows nothing about a a variable's C declaration and relies entirely on the type tags. Errors in C declarations are not caught until compile time. Usage: To process LA equations embedded in source files, import this module and pass input and output file objects to the fprocess() function. You can can also invoke the parser from the command line, e.g. 'python LAparser.py', to run a small test suite and enter an interactive loop where you can enter LA equations and see the resulting C code. """ import re, sys from pyparsing import ( Word, alphas, ParseException, Literal, CaselessLiteral, Combine, Optional, nums, Forward, ZeroOrMore, StringEnd, alphanums, ) # Debugging flag can be set to either "debug_flag=True" or "debug_flag=False" debug_flag = False # ---------------------------------------------------------------------------- # Variables that hold intermediate parsing results and a couple of # helper functions. exprStack = [] # Holds operators and operands parsed from input. targetvar = None # Holds variable name to left of '=' sign in LA equation. def _pushFirst(str, loc, toks): if debug_flag: print("pushing ", toks[0], "str is ", str) exprStack.append(toks[0]) def _assignVar(str, loc, toks): global targetvar targetvar = toks[0] # ----------------------------------------------------------------------------- # The following statements define the grammar for the parser. point = Literal(".") e = CaselessLiteral("E") plusorminus = Literal("+") | Literal("-") number = Word(nums) integer = Combine(Optional(plusorminus) + number) floatnumber = Combine( integer + Optional(point + Optional(number)) + Optional(e + integer) ) lbracket = Literal("[") rbracket = Literal("]") ident = Forward() ## The definition below treats array accesses as identifiers. This means your expressions ## can include references to array elements, rows and columns, e.g., a = b[i] + 5. ## Expressions within []'s are not presently supported, so a = b[i+1] will raise ## a ParseException. ident = Combine( Word(alphas + "-", alphanums + "_") + ZeroOrMore(lbracket + (Word(alphas + "-", alphanums + "_") | integer) + rbracket) ) plus = Literal("+") minus = Literal("-") mult = Literal("*") div = Literal("/") outer = Literal("@") lpar = Literal("(").suppress() rpar = Literal(")").suppress() addop = plus | minus multop = mult | div | outer expop = Literal("^") assignop = Literal("=") expr = Forward() atom = (e | floatnumber | integer | ident).setParseAction(_pushFirst) | ( lpar + expr.suppress() + rpar ) factor = Forward() factor << atom + ZeroOrMore((expop + factor).setParseAction(_pushFirst)) term = factor + ZeroOrMore((multop + factor).setParseAction(_pushFirst)) expr << term + ZeroOrMore((addop + term).setParseAction(_pushFirst)) equation = (ident + assignop).setParseAction(_assignVar) + expr + StringEnd() # End of grammar definition # ----------------------------------------------------------------------------- ## The following are helper variables and functions used by the Binary Infix Operator ## Functions described below. vprefix = "V3_" vplen = len(vprefix) mprefix = "M3_" mplen = len(mprefix) ## We don't support unary negation for vectors and matrices class UnaryUnsupportedError(Exception): pass def _isvec(ident): if ident[0] == "-" and ident[1 : vplen + 1] == vprefix: raise UnaryUnsupportedError else: return ident[0:vplen] == vprefix def _ismat(ident): if ident[0] == "-" and ident[1 : mplen + 1] == mprefix: raise UnaryUnsupportedError else: return ident[0:mplen] == mprefix def _isscalar(ident): return not (_isvec(ident) or _ismat(ident)) ## Binary infix operator (BIO) functions. These are called when the stack evaluator ## pops a binary operator like '+' or '*". The stack evaluator pops the two operand, a and b, ## and calls the function that is mapped to the operator with a and b as arguments. Thus, ## 'x + y' yields a call to addfunc(x,y). Each of the BIO functions checks the prefixes of its ## arguments to determine whether the operand is scalar, vector, or matrix. This information ## is used to generate appropriate C code. For scalars, this is essentially the input string, e.g. ## 'a + b*5' as input yields 'a + b*5' as output. For vectors and matrices, the input is translated to ## nested function calls, e.g. "V3_a + V3_b*5" yields "V3_vAdd(a,vScale(b,5)". Note that prefixes are ## stripped from operands and function names within the argument list to the outer function and ## the appropriate prefix is placed on the outer function for removal later as the stack evaluation ## recurses toward the final assignment statement. def _addfunc(a, b): if _isscalar(a) and _isscalar(b): return "(%s+%s)" % (a, b) if _isvec(a) and _isvec(b): return "%svAdd(%s,%s)" % (vprefix, a[vplen:], b[vplen:]) if _ismat(a) and _ismat(b): return "%smAdd(%s,%s)" % (mprefix, a[mplen:], b[mplen:]) else: raise TypeError def _subfunc(a, b): if _isscalar(a) and _isscalar(b): return "(%s-%s)" % (a, b) if _isvec(a) and _isvec(b): return "%svSubtract(%s,%s)" % (vprefix, a[vplen:], b[vplen:]) if _ismat(a) and _ismat(b): return "%smSubtract(%s,%s)" % (mprefix, a[mplen:], b[mplen:]) else: raise TypeError def _mulfunc(a, b): if _isscalar(a) and _isscalar(b): return "%s*%s" % (a, b) if _isvec(a) and _isvec(b): return "vDot(%s,%s)" % (a[vplen:], b[vplen:]) if _ismat(a) and _ismat(b): return "%smMultiply(%s,%s)" % (mprefix, a[mplen:], b[mplen:]) if _ismat(a) and _isvec(b): return "%smvMultiply(%s,%s)" % (vprefix, a[mplen:], b[vplen:]) if _ismat(a) and _isscalar(b): return "%smScale(%s,%s)" % (mprefix, a[mplen:], b) if _isvec(a) and _isscalar(b): return "%svScale(%s,%s)" % (vprefix, a[mplen:], b) else: raise TypeError def _outermulfunc(a, b): ## The '@' operator is used for the vector outer product. if _isvec(a) and _isvec(b): return "%svOuterProduct(%s,%s)" % (mprefix, a[vplen:], b[vplen:]) else: raise TypeError def _divfunc(a, b): ## The '/' operator is used only for scalar division if _isscalar(a) and _isscalar(b): return "%s/%s" % (a, b) else: raise TypeError def _expfunc(a, b): ## The '^' operator is used for exponentiation on scalars and ## as a marker for unary operations on vectors and matrices. if _isscalar(a) and _isscalar(b): return "pow(%s,%s)" % (str(a), str(b)) if _ismat(a) and b == "-1": return "%smInverse(%s)" % (mprefix, a[mplen:]) if _ismat(a) and b == "T": return "%smTranspose(%s)" % (mprefix, a[mplen:]) if _ismat(a) and b == "Det": return "mDeterminant(%s)" % (a[mplen:]) if _isvec(a) and b == "Mag": return "sqrt(vMagnitude2(%s))" % (a[vplen:]) if _isvec(a) and b == "Mag2": return "vMagnitude2(%s)" % (a[vplen:]) else: raise TypeError def _assignfunc(a, b): ## The '=' operator is used for assignment if _isscalar(a) and _isscalar(b): return "%s=%s" % (a, b) if _isvec(a) and _isvec(b): return "vCopy(%s,%s)" % (a[vplen:], b[vplen:]) if _ismat(a) and _ismat(b): return "mCopy(%s,%s)" % (a[mplen:], b[mplen:]) else: raise TypeError ## End of BIO func definitions ##---------------------------------------------------------------------------- # Map operator symbols to corresponding BIO funcs opn = { "+": (_addfunc), "-": (_subfunc), "*": (_mulfunc), "@": (_outermulfunc), "/": (_divfunc), "^": (_expfunc), } ##---------------------------------------------------------------------------- # Recursive function that evaluates the expression stack def _evaluateStack(s): op = s.pop() if op in "+-*/@^": op2 = _evaluateStack(s) op1 = _evaluateStack(s) result = opn[op](op1, op2) if debug_flag: print(result) return result else: return op ##---------------------------------------------------------------------------- # The parse function that invokes all of the above. def parse(input_string): """ Accepts an input string containing an LA equation, e.g., "M3_mymatrix = M3_anothermatrix^-1" returns C code function calls that implement the expression. """ global exprStack global targetvar # Start with a blank exprStack and a blank targetvar exprStack = [] targetvar = None if input_string != "": # try parsing the input string try: L = equation.parseString(input_string) except ParseException as err: print("Parse Failure", file=sys.stderr) print(err.line, file=sys.stderr) print(" " * (err.column - 1) + "^", file=sys.stderr) print(err, file=sys.stderr) raise # show result of parsing the input string if debug_flag: print(input_string, "->", L) print("exprStack=", exprStack) # Evaluate the stack of parsed operands, emitting C code. try: result = _evaluateStack(exprStack) except TypeError: print( "Unsupported operation on right side of '%s'.\nCheck for missing or incorrect tags on non-scalar operands." % input_string, file=sys.stderr, ) raise except UnaryUnsupportedError: print( "Unary negation is not supported for vectors and matrices: '%s'" % input_string, file=sys.stderr, ) raise # Create final assignment and print it. if debug_flag: print("var=", targetvar) if targetvar != None: try: result = _assignfunc(targetvar, result) except TypeError: print( "Left side tag does not match right side of '%s'" % input_string, file=sys.stderr, ) raise except UnaryUnsupportedError: print( "Unary negation is not supported for vectors and matrices: '%s'" % input_string, file=sys.stderr, ) raise return result else: print("Empty left side in '%s'" % input_string, file=sys.stderr) raise TypeError ##----------------------------------------------------------------------------------- def fprocess(infilep, outfilep): """ Scans an input file for LA equations between double square brackets, e.g. [[ M3_mymatrix = M3_anothermatrix^-1 ]], and replaces the expression with a comment containing the equation followed by nested function calls that implement the equation as C code. A trailing semi-colon is appended. The equation within [[ ]] should NOT end with a semicolon as that will raise a ParseException. However, it is ok to have a semicolon after the right brackets. Other text in the file is unaltered. The arguments are file objects (NOT file names) opened for reading and writing, respectively. """ pattern = r"\[\[\s*(.*?)\s*\]\]" eqn = re.compile(pattern, re.DOTALL) s = infilep.read() def parser(mo): ccode = parse(mo.group(1)) return "/* %s */\n%s;\nLAParserBufferReset();\n" % (mo.group(1), ccode) content = eqn.sub(parser, s) outfilep.write(content) ##----------------------------------------------------------------------------------- def test(): """ Tests the parsing of various supported expressions. Raises an AssertError if the output is not what is expected. Prints the input, expected output, and actual output for all tests. """ print("Testing LAParser") testcases = [ ("Scalar addition", "a = b+c", "a=(b+c)"), ("Vector addition", "V3_a = V3_b + V3_c", "vCopy(a,vAdd(b,c))"), ("Vector addition", "V3_a=V3_b+V3_c", "vCopy(a,vAdd(b,c))"), ("Matrix addition", "M3_a = M3_b + M3_c", "mCopy(a,mAdd(b,c))"), ("Matrix addition", "M3_a=M3_b+M3_c", "mCopy(a,mAdd(b,c))"), ("Scalar subtraction", "a = b-c", "a=(b-c)"), ("Vector subtraction", "V3_a = V3_b - V3_c", "vCopy(a,vSubtract(b,c))"), ("Matrix subtraction", "M3_a = M3_b - M3_c", "mCopy(a,mSubtract(b,c))"), ("Scalar multiplication", "a = b*c", "a=b*c"), ("Scalar division", "a = b/c", "a=b/c"), ("Vector multiplication (dot product)", "a = V3_b * V3_c", "a=vDot(b,c)"), ( "Vector multiplication (outer product)", "M3_a = V3_b @ V3_c", "mCopy(a,vOuterProduct(b,c))", ), ("Matrix multiplication", "M3_a = M3_b * M3_c", "mCopy(a,mMultiply(b,c))"), ("Vector scaling", "V3_a = V3_b * c", "vCopy(a,vScale(b,c))"), ("Matrix scaling", "M3_a = M3_b * c", "mCopy(a,mScale(b,c))"), ( "Matrix by vector multiplication", "V3_a = M3_b * V3_c", "vCopy(a,mvMultiply(b,c))", ), ("Scalar exponentiation", "a = b^c", "a=pow(b,c)"), ("Matrix inversion", "M3_a = M3_b^-1", "mCopy(a,mInverse(b))"), ("Matrix transpose", "M3_a = M3_b^T", "mCopy(a,mTranspose(b))"), ("Matrix determinant", "a = M3_b^Det", "a=mDeterminant(b)"), ("Vector magnitude squared", "a = V3_b^Mag2", "a=vMagnitude2(b)"), ("Vector magnitude", "a = V3_b^Mag", "a=sqrt(vMagnitude2(b))"), ( "Complicated expression", "myscalar = (M3_amatrix * V3_bvector)^Mag + 5*(-xyz[i] + 2.03^2)", "myscalar=(sqrt(vMagnitude2(mvMultiply(amatrix,bvector)))+5*(-xyz[i]+pow(2.03,2)))", ), ( "Complicated Multiline", "myscalar = \n(M3_amatrix * V3_bvector)^Mag +\n 5*(xyz + 2.03^2)", "myscalar=(sqrt(vMagnitude2(mvMultiply(amatrix,bvector)))+5*(xyz+pow(2.03,2)))", ), ] all_passed = [True] def post_test(test, parsed): # copy exprStack to evaluate and clear before running next test parsed_stack = exprStack[:] exprStack.clear() name, testcase, expected = next(tc for tc in testcases if tc[1] == test) this_test_passed = False try: try: result = _evaluateStack(parsed_stack) except TypeError: print( "Unsupported operation on right side of '%s'.\nCheck for missing or incorrect tags on non-scalar operands." % input_string, file=sys.stderr, ) raise except UnaryUnsupportedError: print( "Unary negation is not supported for vectors and matrices: '%s'" % input_string, file=sys.stderr, ) raise # Create final assignment and print it. if debug_flag: print("var=", targetvar) if targetvar != None: try: result = _assignfunc(targetvar, result) except TypeError: print( "Left side tag does not match right side of '%s'" % input_string, file=sys.stderr, ) raise except UnaryUnsupportedError: print( "Unary negation is not supported for vectors and matrices: '%s'" % input_string, file=sys.stderr, ) raise else: print("Empty left side in '%s'" % input_string, file=sys.stderr) raise TypeError parsed["result"] = result parsed["passed"] = this_test_passed = result == expected finally: all_passed[0] = all_passed[0] and this_test_passed print("\n" + name) equation.runTests((t[1] for t in testcases), postParse=post_test) ##TODO: Write testcases with invalid expressions and test that the expected ## exceptions are raised. print("Tests completed!") print("PASSED" if all_passed[0] else "FAILED") assert all_passed[0] ##---------------------------------------------------------------------------- ## The following is executed only when this module is executed as ## command line script. It runs a small test suite (see above) ## and then enters an interactive loop where you ## can enter expressions and see the resulting C code as output. if __name__ == "__main__": import sys if not sys.flags.interactive: # run testcases test() sys.exit(0) # input_string input_string = "" # Display instructions on how to use the program interactively interactiveusage = """ Entering interactive mode: Type in an equation to be parsed or 'quit' to exit the program. Type 'debug on' to print parsing details as each string is processed. Type 'debug off' to stop printing parsing details """ print(interactiveusage) input_string = input("> ") while input_string != "quit": if input_string == "debug on": debug_flag = True elif input_string == "debug off": debug_flag = False else: try: print(parse(input_string)) except Exception: pass # obtain new input string input_string = input("> ") # if user types 'quit' then say goodbye print("Good bye!") import os os._exit(0) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/Setup.ini0000644000000000000000000000264314412577542014115 0ustar00[Startup] AppName=M3i.comm stname = Utility modemid=125D&DEV_1999 audioid=125D&DEV_1998 win98path= winmepath= win2kpath= winxppath= win95path= winnt4path= stupgrade =Install/Upgrade Drivers stuninstall =Uninstall Drivers stchoose =Choose One Function to Process stchoosez3 =Choose Devices to Process copycompl =Copying files completed RemString1=Set up has finished remove ESS device driver and cleaned your system. Click Finish to exit. RemString2=ESS devices is removed completely.No need to reboot. If you want to reinstall, run the setup again with driver package. stshowmsg1=Setup will clean the installed files and update registry. stshowmsg2=Setup is updating system's registry .... stshowmsg3=Setup is starting sysdriver=es56cvmp.sys mdmzn=mdmm3com.inf mdmznp=esmdm_98.inf mdmzna=mdmessa.inf spkname=essspk.exe remvess=remvess.exe slmcat=allem3m.cat audiocat=allem3.cat audioinf=M3i sysaudio=es198xdl.sys audiovxd=es198x.vxd [Languages] Default=0x0009 count=30 key0=0x002d key1=0x0003 key2=0x0804 key3=0x0404 key4=0x001a key5=0x0005 key6=0x0006 key7=0x0013 key8=0x0009 key9=0x000b key10=0x0c0c key11=0x040c key12=0x0007 key13=0x0008 key14=0x000e key15=0x0021 key16=0x0010 key17=0x0011 key18=0x0012 key19=0x0014 key20=0x0015 key21=0x0416 key22=0x0816 key23=0x0019 key24=0x001b key25=0x0024 key26=0x000a key27=0x001d key28=0x001e key29=0x001f [test] foo=bar ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/SimpleCalc.py0000644000000000000000000000712714412577542014704 0ustar00# SimpleCalc.py # # Demonstration of the parsing module, # Sample usage # # $ python SimpleCalc.py # Type in the string to be parse or 'quit' to exit the program # > g=67.89 + 7/5 # 69.29 # > g # 69.29 # > h=(6*g+8.8)-g # 355.25 # > h + 1 # 356.25 # > 87.89 + 7/5 # 89.29 # > ans+10 # 99.29 # > quit # Good bye! # # # Uncomment the line below for readline support on interactive terminal # import readline from pyparsing import ParseException, Word, alphas, alphanums # Debugging flag can be set to either "debug_flag=True" or "debug_flag=False" debug_flag = False variables = {} from fourFn import BNF, exprStack, evaluate_stack # from fourFn import BNF, exprStack, fn, opn # def evaluateStack( s ): # op = s.pop() # if op == 'unary -': # return -evaluateStack( s ) # if op in "+-*/^": # op2 = evaluateStack( s ) # op1 = evaluateStack( s ) # return opn[op]( op1, op2 ) # elif op == "PI": # return math.pi # 3.1415926535 # elif op == "E": # return math.e # 2.718281828 # elif op in fn: # return fn[op]( evaluateStack( s ) ) # elif op[0].isalpha(): # if op in variables: # return variables[op] # raise Exception("invalid identifier '%s'" % op) # else: # return float( op ) arithExpr = BNF() ident = Word(alphas, alphanums).setName("identifier") assignment = ident("varname") + "=" + arithExpr pattern = assignment | arithExpr if __name__ == "__main__": # input_string input_string = "" # Display instructions on how to quit the program print("Type in the string to be parsed or 'quit' to exit the program") input_string = input("> ") while input_string.strip().lower() != "quit": if input_string.strip().lower() == "debug": debug_flag = True input_string = input("> ") continue # Reset to an empty exprStack del exprStack[:] if input_string != "": # try parsing the input string try: L = pattern.parseString(input_string, parseAll=True) except ParseException as err: L = ["Parse Failure", input_string, (str(err), err.line, err.column)] # show result of parsing the input string if debug_flag: print(input_string, "->", L) if len(L) == 0 or L[0] != "Parse Failure": if debug_flag: print("exprStack=", exprStack) for i, ob in enumerate(exprStack): if isinstance(ob, str) and ob in variables: exprStack[i] = str(variables[ob]) # calculate result , store a copy in ans , display the result to user try: result = evaluate_stack(exprStack) except Exception as e: print(str(e)) else: variables["ans"] = result print(result) # Assign result to a variable if required if L.varname: variables[L.varname] = result if debug_flag: print("variables=", variables) else: print("Parse Failure") err_str, err_line, err_col = L[-1] print(err_line) print(" " * (err_col - 1) + "^") print(err_str) # obtain new input string input_string = input("> ") # if user type 'quit' then say goodbye print("Good bye!") ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/SingleForm.dfm0000644000000000000000000012531414412577542015052 0ustar00object Form1: TForm1 Left = 161 Top = 149 Width = 696 Height = 342 Caption = 'DbxSingle' Color = clBtnFace Font.Charset = DEFAULT_CHARSET Font.Color = clWindowText Font.Height = -11 Font.Name = 'MS Sans Serif' Font.Style = [] OldCreateOrder = False OnCreate = FormCreate PixelsPerInch = 96 TextHeight = 13 object ActionToolBar2: TActionToolBar Left = 0 Top = 0 Width = 688 Height = 26 ActionManager = ActionManager1 AllowHiding = False Caption = 'ActionToolBar2' ColorMap.HighlightColor = 14410210 ColorMap.BtnSelectedColor = clBtnFace ColorMap.UnusedColor = 14410210 Spacing = 0 end object PageControl1: TPageControl Left = 0 Top = 26 Width = 688 Height = 289 ActivePage = TabSheet1 Align = alClient TabOrder = 1 object TabSheet1: TTabSheet Caption = 'Data' object DBGrid1: TDBGrid Left = 0 Top = 0 Width = 680 Height = 261 Align = alClient DataSource = DataSource1 TabOrder = 0 TitleFont.Charset = DEFAULT_CHARSET TitleFont.Color = clWindowText TitleFont.Height = -11 TitleFont.Name = 'MS Sans Serif' TitleFont.Style = [] end end object TabSheet2: TTabSheet Caption = 'Log' ImageIndex = 1 object Memo1: TMemo Left = 0 Top = 0 Width = 680 Height = 399 Align = alClient TabOrder = 0 end end end object SimpleDataSet1: TSimpleDataSet Aggregates = <> Connection.ConnectionName = 'IBLocal' Connection.DriverName = 'Interbase' Connection.GetDriverFunc = 'getSQLDriverINTERBASE' Connection.LibraryName = 'dbexpint.dll' Connection.LoginPrompt = False Connection.Params.Strings = ( 'BlobSize=-1' 'CommitRetain=False' 'Database=C:\Program Files\Common Files\Borland Shared\Data\emplo' + 'yee.gdb' 'DriverName=Interbase' 'Password=masterkey' 'RoleName=RoleName' 'ServerCharSet=ASCII' 'SQLDialect=1' 'Interbase TransIsolation=ReadCommited' 'User_Name=sysdba' 'WaitOnLocks=True') Connection.VendorLib = 'GDS32.DLL' DataSet.CommandText = 'EMPLOYEE' DataSet.CommandType = ctTable DataSet.MaxBlobSize = -1 DataSet.Params = <> Params = <> AfterPost = DoUpdate BeforeDelete = DoUpdate Left = 104 Top = 56 end object ActionManager1: TActionManager ActionBars = < item Items.CaptionOptions = coAll Items = < item Action = DataSetFirst1 ImageIndex = 0 end item Action = DataSetPrior1 ImageIndex = 1 end item Action = DataSetNext1 ImageIndex = 2 end item Action = DataSetLast1 ImageIndex = 3 end item Action = DataSetInsert1 ImageIndex = 4 end item Action = DataSetDelete1 ImageIndex = 5 end item Action = DataSetEdit1 ImageIndex = 6 end item Action = DataSetPost1 ImageIndex = 7 end item Action = DataSetCancel1 ImageIndex = 8 end item Action = DataSetRefresh1 ImageIndex = 9 end> ActionBar = ActionToolBar2 end> Images = ImageList1 Left = 112 Top = 184 StyleName = 'XP Style' object DataSetFirst1: TDataSetFirst Category = 'Dataset' Caption = 'First' ImageIndex = 0 end object DataSetPrior1: TDataSetPrior Category = 'Dataset' Caption = 'Prior' ImageIndex = 1 end object DataSetNext1: TDataSetNext Category = 'Dataset' Caption = 'Next' ImageIndex = 2 end object DataSetLast1: TDataSetLast Category = 'Dataset' Caption = 'Last' ImageIndex = 3 end object DataSetInsert1: TDataSetInsert Category = 'Dataset' Caption = 'Insert' ImageIndex = 4 end object DataSetDelete1: TDataSetDelete Category = 'Dataset' Caption = 'Delete' ImageIndex = 5 end object DataSetEdit1: TDataSetEdit Category = 'Dataset' Caption = 'Edit' ImageIndex = 6 end object DataSetPost1: TDataSetPost Category = 'Dataset' Caption = 'Post' ImageIndex = 7 end object DataSetCancel1: TDataSetCancel Category = 'Dataset' Caption = 'Cancel' ImageIndex = 8 end object DataSetRefresh1: TDataSetRefresh Category = 'Dataset' Caption = 'Refresh' ImageIndex = 9 end end object ImageList1: TImageList Left = 112 Top = 120 Bitmap = { 494C01010C000F00040010001000FFFFFFFFFF10FFFFFFFFFFFFFFFF424D3600 0000000000003600000028000000400000004000000001002000000000000040 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400848484008484840084848400848484008484 8400848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000084 8400000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 00000000000000000000FFFFFF00FFFFFF00FFFFFF00FFFFFF00FFFFFF000000 0000848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400000000008484840000000000000000000000 0000000000000000000000000000000000000000000000000000008484000084 8400000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 00000000000000000000FFFFFF00FFFFFF00FFFFFF00FFFFFF00FFFFFF000000 0000848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000848484000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000084 8400000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 00000000000000000000FFFFFF00FFFFFF00FFFFFF00FFFFFF00FFFFFF000000 0000848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000084 8400000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 00000000000000000000FFFFFF00FFFFFF00FFFFFF00FFFFFF00FFFFFF000000 0000848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000008484 8400000000008484840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000084 8400008484000084840000848400008484000084840000848400008484000084 8400008484000084840000000000000000000000000000000000000000000000 00000000000000000000FFFFFF00FFFFFF00FFFFFF00FFFFFF00FFFFFF000000 0000848484000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000084 8400000000000000000000000000000000000000000000000000000000000000 0000008484000084840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000008484840000000000000000000000000084848400000000000000 0000000000000000000000000000000000000000000000000000008484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000008484 8400000000000000000084848400000000008484840000000000000000000000 0000000000000000000000000000000000000000000000000000008484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000008484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000084840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000008484840000000000000000000000000084848400000000000000 0000000000000000000000000000000000000000000000000000008484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000008484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000084848400000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000084848400000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000848484000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000008484840000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000848484000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000008484840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000848484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000008484840000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000848484008484840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000848484008484 8400000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000848484000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000008484840000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000848484000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000008484840000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000084848400000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000848484000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000008484840000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000084848400000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000084848400000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000848484000000000000000000000000000000000000000000000000008484 8400000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000424D3E000000000000003E000000 2800000040000000400000000100010000000000000200000000000000000000 000000000000000000000000FFFFFF0000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000FFFFFFFFFFFFFC07FFFFFFFFC001F807 FFFFFFFF8031F807FFFFFC7F8031F807F3E7F0FF8031F807F1C7F1FF8001F807 F88FE3FF8001F807FC1FE7FF8001F80FFE3FE7078FF1FF7FFC1FE3878FF1FE3F F88FE1078FF1FC1FF1C7F0078FF1FFFFF3E7F8378FF1FEFFFFFFFFFF8FF5FFFF FFFFFFFF8001FDFFFFFFFFFFFFFF6FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7FFFFFFFFFFBFFFC7FFFFFFFFFF1FF FC7FFFFFE007E0FFE00FE007F00FC47FE00FE007F81FCE3FE00FE007FC3FFF1F FC7FFFFFFE7FFF8FFC7FFFFFFFFFFFC7FC7FFFFFFFFFFFE7FFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE7E7FF9FF9FFE7E7 E787FE1FF87FE1E7E607F81FF81FE067E007F01FF80FE007E607F81FF81FE067 E787FE1FF87FE1E7E7E7FF9FF9FFE7E7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000000000000000000000000000 000000000000} end object DataSource1: TDataSource DataSet = SimpleDataSet1 Left = 108 Top = 250 end object SQLMonitor1: TSQLMonitor OnTrace = SQLMonitor1Trace Left = 228 Top = 122 end end ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/TAP.py0000644000000000000000000001667014412577542013317 0ustar00# # TAP.py - TAP parser # # A pyparsing parser to process the output of the Perl # "Test Anything Protocol" # (https://metacpan.org/pod/release/PETDANCE/TAP-1.00/TAP.pm) # # TAP output lines are preceded or followed by a test number range: # 1..n # with 'n' TAP output lines. # # The general format of a TAP output line is: # ok/not ok (required) # Test number (recommended) # Description (recommended) # Directive (only when necessary) # # A TAP output line may also indicate abort of the test suit with the line: # Bail out! # optionally followed by a reason for bailing # # Copyright 2008, by Paul McGuire # from pyparsing import ( ParserElement, LineEnd, Optional, Word, nums, Regex, Literal, CaselessLiteral, Group, OneOrMore, Suppress, restOfLine, FollowedBy, empty, ) __all__ = ["tapOutputParser", "TAPTest", "TAPSummary"] # newlines are significant whitespace, so set default skippable # whitespace to just spaces and tabs ParserElement.setDefaultWhitespaceChars(" \t") NL = LineEnd().suppress() integer = Word(nums) plan = "1.." + integer("ubound") OK, NOT_OK = map(Literal, ["ok", "not ok"]) testStatus = OK | NOT_OK description = Regex("[^#\n]+") description.setParseAction(lambda t: t[0].lstrip("- ")) TODO, SKIP = map(CaselessLiteral, "TODO SKIP".split()) directive = Group( Suppress("#") + ( TODO + restOfLine | FollowedBy(SKIP) + restOfLine.copy().setParseAction(lambda t: ["SKIP", t[0]]) ) ) commentLine = Suppress("#") + empty + restOfLine testLine = Group( Optional(OneOrMore(commentLine + NL))("comments") + testStatus("passed") + Optional(integer)("testNumber") + Optional(description)("description") + Optional(directive)("directive") ) bailLine = Group(Literal("Bail out!")("BAIL") + empty + Optional(restOfLine)("reason")) tapOutputParser = Optional(Group(plan)("plan") + NL) & Group( OneOrMore((testLine | bailLine) + NL) )("tests") class TAPTest: def __init__(self, results): self.num = results.testNumber self.passed = results.passed == "ok" self.skipped = self.todo = False if results.directive: self.skipped = results.directive[0][0] == "SKIP" self.todo = results.directive[0][0] == "TODO" @classmethod def bailedTest(cls, num): ret = TAPTest(empty.parseString("")) ret.num = num ret.skipped = True return ret class TAPSummary: def __init__(self, results): self.passedTests = [] self.failedTests = [] self.skippedTests = [] self.todoTests = [] self.bonusTests = [] self.bail = False if results.plan: expected = list(range(1, int(results.plan.ubound) + 1)) else: expected = list(range(1, len(results.tests) + 1)) for i, res in enumerate(results.tests): # test for bail out if res.BAIL: # ~ print "Test suite aborted: " + res.reason # ~ self.failedTests += expected[i:] self.bail = True self.skippedTests += [TAPTest.bailedTest(ii) for ii in expected[i:]] self.bailReason = res.reason break # ~ print res.dump() testnum = i + 1 if res.testNumber != "": if testnum != int(res.testNumber): print("ERROR! test %(testNumber)s out of sequence" % res) testnum = int(res.testNumber) res["testNumber"] = testnum test = TAPTest(res) if test.passed: self.passedTests.append(test) else: self.failedTests.append(test) if test.skipped: self.skippedTests.append(test) if test.todo: self.todoTests.append(test) if test.todo and test.passed: self.bonusTests.append(test) self.passedSuite = not self.bail and ( set(self.failedTests) - set(self.todoTests) == set() ) def summary(self, showPassed=False, showAll=False): testListStr = lambda tl: "[" + ",".join(str(t.num) for t in tl) + "]" summaryText = [] if showPassed or showAll: summaryText.append("PASSED: %s" % testListStr(self.passedTests)) if self.failedTests or showAll: summaryText.append("FAILED: %s" % testListStr(self.failedTests)) if self.skippedTests or showAll: summaryText.append("SKIPPED: %s" % testListStr(self.skippedTests)) if self.todoTests or showAll: summaryText.append("TODO: %s" % testListStr(self.todoTests)) if self.bonusTests or showAll: summaryText.append("BONUS: %s" % testListStr(self.bonusTests)) if self.passedSuite: summaryText.append("PASSED") else: summaryText.append("FAILED") return "\n".join(summaryText) # create TAPSummary objects from tapOutput parsed results, by setting # class as parse action tapOutputParser.setParseAction(TAPSummary) def main(): test1 = """\ 1..4 ok 1 - Input file opened not ok 2 - First line of the input valid ok 3 - Read the rest of the file not ok 4 - Summarized correctly # TODO Not written yet """ test2 = """\ ok 1 not ok 2 some description # TODO with a directive ok 3 a description only, no directive ok 4 # TODO directive only ok a description only, no directive ok # Skipped only a directive, no description ok """ test3 = """\ ok - created Board ok ok not ok ok ok ok ok # +------+------+------+------+ # | |16G | |05C | # | |G N C | |C C G | # | | G | | C +| # +------+------+------+------+ # |10C |01G | |03C | # |R N G |G A G | |C C C | # | R | G | | C +| # +------+------+------+------+ # | |01G |17C |00C | # | |G A G |G N R |R N R | # | | G | R | G | # +------+------+------+------+ ok - board has 7 tiles + starter tile 1..9 """ test4 = """\ 1..4 ok 1 - Creating test program ok 2 - Test program runs, no error not ok 3 - infinite loop # TODO halting problem unsolved not ok 4 - infinite loop 2 # TODO halting problem unsolved """ test5 = """\ 1..20 ok - database handle not ok - failed database login Bail out! Couldn't connect to database. """ test6 = """\ ok 1 - retrieving servers from the database # need to ping 6 servers ok 2 - pinged diamond ok 3 - pinged ruby not ok 4 - pinged sapphire ok 5 - pinged onyx not ok 6 - pinged quartz ok 7 - pinged gold 1..7 """ for test in (test1, test2, test3, test4, test5, test6): print(test) tapResult = tapOutputParser.parseString(test)[0] print(tapResult.summary(showAll=True)) print() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/__init__.py0000644000000000000000000000000014412577542014406 0ustar00././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/adventureEngine.py0000644000000000000000000005317614443622312016006 0ustar00# adventureEngine.py # Copyright 2005-2006, Paul McGuire # # Updated 2012 - latest pyparsing API # Updated 2023 - using PEP8 API names # import pyparsing as pp import random import string def a_or_an(item): if item.desc.startswith(tuple("aeiou")): return "an " + item.desc else: return "a " + item.desc def enumerate_items(items_list): if not items_list: return "nothing" *all_but_last, last = items_list out = [] if all_but_last: out.append(", ".join(a_or_an(item) for item in all_but_last)) if len(all_but_last) > 1: out[-1] += ',' out.append("and") out.append(a_or_an(last)) return " ".join(out) def enumerate_doors(doors_list): if not doors_list: return "" *all_but_last, last = doors_list out = [] if all_but_last: out.append(", ".join(all_but_last)) if len(all_but_last) > 1: out[-1] += ',' out.append("and") out.append(last) return " ".join(out) class Room: def __init__(self, desc): self.desc = desc self.inv = [] self.gameOver = False self.doors = [None, None, None, None] def __getattr__(self, attr): return { "n": self.doors[0], "s": self.doors[1], "e": self.doors[2], "w": self.doors[3], }[attr] def enter(self, player): if self.gameOver: player.gameOver = True def add_item(self, it): self.inv.append(it) def remove_item(self, it): self.inv.remove(it) def describe(self): print(self.desc) visibleItems = [it for it in self.inv if it.isVisible] if random.random() > 0.5: if len(visibleItems) > 1: is_form = "are" else: is_form = "is" print("There {} {} here.".format(is_form, enumerate_items(visibleItems))) else: print("You see %s." % (enumerate_items(visibleItems))) class Exit(Room): def __init__(self): super().__init__("") def enter(self, player): player.gameOver = True class Item: items = {} def __init__(self, desc): self.desc = desc self.isDeadly = False self.isFragile = False self.isBroken = False self.isTakeable = True self.isVisible = True self.isOpenable = False self.useAction = None self.usableConditionTest = None self.cantTakeMessage = "You can't take that!" Item.items[desc] = self def __str__(self): return self.desc def breakItem(self): if not self.isBroken: print("") self.desc = "broken " + self.desc self.isBroken = True def isUsable(self, player, target): if self.usableConditionTest: return self.usableConditionTest(player, target) else: return False def useItem(self, player, target): if self.useAction: self.useAction(player, self, target) class OpenableItem(Item): def __init__(self, desc, contents=None): super().__init__(desc) self.isOpenable = True self.isOpened = False if contents is not None: if isinstance(contents, Item): self.contents = [ contents, ] else: self.contents = contents else: self.contents = [] def open_item(self, player): if not self.isOpened: self.isOpened = not self.isOpened if self.contents is not None: for item in self.contents: player.room.add_item(item) self.contents = [] self.desc = "open " + self.desc def close_item(self, player): if self.isOpened: self.isOpened = not self.isOpened if self.desc.startswith("open "): self.desc = self.desc[5:] class Command: "Base class for commands" def __init__(self, verb, verbProg): self.verb = verb self.verbProg = verbProg @staticmethod def help_description(): return "" def _do_command(self, player): pass def __call__(self, player): print(self.verbProg.capitalize() + "...") self._do_command(player) class MoveCommand(Command): def __init__(self, quals): super().__init__("MOVE", "moving") self.direction = quals.direction[0] @staticmethod def help_description(): return """MOVE or GO - go NORTH, SOUTH, EAST, or WEST (can abbreviate as 'GO N' and 'GO W', or even just 'E' and 'S')""" def _do_command(self, player): rm = player.room nextRoom = rm.doors[ { "N": 0, "S": 1, "E": 2, "W": 3, }[self.direction] ] if nextRoom: player.moveTo(nextRoom) else: print("Can't go that way.") class TakeCommand(Command): def __init__(self, quals): super().__init__("TAKE", "taking") self.subject = quals.item @staticmethod def help_description(): return "TAKE or PICKUP or PICK UP - pick up an object (but some are deadly)" def _do_command(self, player): rm = player.room subj = Item.items[self.subject] if subj in rm.inv and subj.isVisible: if subj.isTakeable: rm.remove_item(subj) player.take(subj) else: print(subj.cantTakeMessage) else: print("There is no %s here." % subj) class DropCommand(Command): def __init__(self, quals): super().__init__("DROP", "dropping") self.subject = quals.item @staticmethod def help_description(): return "DROP or LEAVE - drop an object (but fragile items may break)" def _do_command(self, player): rm = player.room subj = Item.items[self.subject] if subj in player.inv: rm.add_item(subj) player.drop(subj) else: print("You don't have %s." % (a_or_an(subj))) class InventoryCommand(Command): def __init__(self, quals): super().__init__("INV", "taking inventory") @staticmethod def help_description(): return "INVENTORY or INV or I - lists what items you have" def _do_command(self, player): print("You have %s." % enumerate_items(player.inv)) class LookCommand(Command): def __init__(self, quals): super().__init__("LOOK", "looking") @staticmethod def help_description(): return "LOOK or L - describes the current room and any objects in it" def _do_command(self, player): player.room.describe() class ExamineCommand(Command): def __init__(self, quals): super().__init__("EXAMINE", "examining") self.subject = Item.items[quals.item] @staticmethod def help_description(): return "EXAMINE or EX or X - look closely at an object" def _do_command(self, player): msg = random.choice( [ "It's {}.", "It's just {}.", "It's a beautiful {1}.", "It's a rare and beautiful {1}.", "It's a rare {1}.", "Just {}, nothing special...", "{0}, just {0}." ] ) print(msg.format(a_or_an(self.subject), self.subject).capitalize()) class DoorsCommand(Command): def __init__(self, quals): super().__init__("DOORS", "looking for doors") @staticmethod def help_description(): return "DOORS - display what doors are visible from this room" def _do_command(self, player): rm = player.room numDoors = sum(1 for r in rm.doors if r is not None) if numDoors == 0: reply = "There are no doors in any direction." else: if numDoors == 1: reply = "There is a door to the " else: reply = "There are doors to the " doorNames = [ {0: "north", 1: "south", 2: "east", 3: "west"}[i] for i, d in enumerate(rm.doors) if d is not None ] reply += enumerate_doors(doorNames) reply += "." print(reply) class UseCommand(Command): def __init__(self, quals): super().__init__("USE", "using") self.subject = Item.items[quals.usedObj] if quals.targetObj: self.target = Item.items[quals.targetObj] else: self.target = None @staticmethod def help_description(): return "USE or U - use an object, optionally IN or ON another object" def _do_command(self, player): rm = player.room availItems = rm.inv + player.inv if self.subject in availItems: if self.subject.isUsable(player, self.target): self.subject.useItem(player, self.target) else: print("You can't use that here.") else: print("There is no %s here to use." % self.subject) class OpenCommand(Command): def __init__(self, quals): super().__init__("OPEN", "opening") self.subject = Item.items[quals.item] @staticmethod def help_description(): return "OPEN or O - open an object" def _do_command(self, player): rm = player.room availItems = rm.inv + player.inv if self.subject in availItems: if self.subject.isOpenable: if not self.subject.isOpened: self.subject.open_item(player) else: print("It's already open.") else: print("You can't open that.") else: print("There is no %s here to open." % self.subject) class CloseCommand(Command): def __init__(self, quals): super().__init__("CLOSE", "closing") self.subject = Item.items[quals.item] @staticmethod def help_description(): return "CLOSE or CL - close an object" def _do_command(self, player): rm = player.room availItems = rm.inv + player.inv if self.subject in availItems: if self.subject.isOpenable: if self.subject.isOpened: self.subject.close_item(player) else: print("You can't close that, it's not open.") else: print("You can't close that.") else: print("There is no %s here to close." % self.subject) class QuitCommand(Command): def __init__(self, quals): super().__init__("QUIT", "quitting") @staticmethod def help_description(): return "QUIT or Q - ends the game" def _do_command(self, player): print("Ok....") player.gameOver = True class HelpCommand(Command): def __init__(self, quals): super().__init__("HELP", "helping") @staticmethod def help_description(): return "HELP or H or ? - displays this help message" def _do_command(self, player): print("Enter any of the following commands (not case sensitive):") for cmd in [ InventoryCommand, DropCommand, TakeCommand, UseCommand, OpenCommand, CloseCommand, MoveCommand, LookCommand, ExamineCommand, DoorsCommand, QuitCommand, HelpCommand, ]: print(" - %s" % cmd.help_description()) print() class AppParseException(pp.ParseException): pass class Parser: def __init__(self): self.bnf = self.make_bnf() def make_bnf(self): invVerb = pp.one_of("INV INVENTORY I", caseless=True) dropVerb = pp.one_of("DROP LEAVE", caseless=True) takeVerb = pp.one_of("TAKE PICKUP", caseless=True) | ( pp.CaselessLiteral("PICK") + pp.CaselessLiteral("UP") ) moveVerb = pp.one_of("MOVE GO", caseless=True) | pp.Empty() useVerb = pp.one_of("USE U", caseless=True) openVerb = pp.one_of("OPEN O", caseless=True) closeVerb = pp.one_of("CLOSE CL", caseless=True) quitVerb = pp.one_of("QUIT Q", caseless=True) lookVerb = pp.one_of("LOOK L", caseless=True) doorsVerb = pp.CaselessLiteral("DOORS") helpVerb = pp.one_of("H HELP ?", caseless=True) itemRef = pp.OneOrMore(pp.Word(pp.alphas)).set_parse_action(self.validate_item_name).setName("item_ref") nDir = pp.one_of("N NORTH", caseless=True).set_parse_action(pp.replace_with("N")) sDir = pp.one_of("S SOUTH", caseless=True).set_parse_action(pp.replace_with("S")) eDir = pp.one_of("E EAST", caseless=True).set_parse_action(pp.replace_with("E")) wDir = pp.one_of("W WEST", caseless=True).set_parse_action(pp.replace_with("W")) moveDirection = nDir | sDir | eDir | wDir invCommand = invVerb dropCommand = dropVerb + itemRef("item") takeCommand = takeVerb + itemRef("item") useCommand = ( useVerb + itemRef("usedObj") + pp.Opt(pp.one_of("IN ON", caseless=True)) + pp.Opt(itemRef, default=None)("targetObj") ) openCommand = openVerb + itemRef("item") closeCommand = closeVerb + itemRef("item") moveCommand = (moveVerb | "") + moveDirection("direction") quitCommand = quitVerb lookCommand = lookVerb examineCommand = pp.one_of("EXAMINE EX X", caseless=True) + itemRef("item") doorsCommand = doorsVerb.setName("DOORS") helpCommand = helpVerb # attach command classes to expressions invCommand.set_parse_action(InventoryCommand) dropCommand.set_parse_action(DropCommand) takeCommand.set_parse_action(TakeCommand) useCommand.set_parse_action(UseCommand) openCommand.set_parse_action(OpenCommand) closeCommand.set_parse_action(CloseCommand) moveCommand.set_parse_action(MoveCommand) quitCommand.set_parse_action(QuitCommand) lookCommand.set_parse_action(LookCommand) examineCommand.set_parse_action(ExamineCommand) doorsCommand.set_parse_action(DoorsCommand) helpCommand.set_parse_action(HelpCommand) # define parser using all command expressions parser = pp.ungroup( invCommand | useCommand | openCommand | closeCommand | dropCommand | takeCommand | moveCommand | lookCommand | examineCommand | doorsCommand | helpCommand | quitCommand )("command") return parser def validate_item_name(self, s, l, t): iname = " ".join(t) if iname not in Item.items: raise AppParseException(s, l, "No such item '%s'." % iname) return iname def parse_cmd(self, cmdstr): try: ret = self.bnf.parse_string(cmdstr) return ret except AppParseException as pe: print(pe.msg) except pp.ParseException as pe: print( random.choice( [ "Sorry, I don't understand that.", "Huh?", "Excuse me?", "???", "What?", ] ) ) class Player: def __init__(self, name): self.name = name self.gameOver = False self.inv = [] def moveTo(self, rm): self.room = rm rm.enter(self) if self.gameOver: if rm.desc: rm.describe() print("Game over!") else: rm.describe() def take(self, it): if it.isDeadly: print("Aaaagh!...., the %s killed me!" % it) self.gameOver = True else: self.inv.append(it) def drop(self, it): self.inv.remove(it) if it.isFragile: it.breakItem() def createRooms(rm): """ create rooms, using multiline string showing map layout string contains symbols for the following: A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by reference letter -, | symbols indicate connection between rooms <, >, ^, . symbols indicate one-way connection between rooms """ # start with empty dictionary of rooms ret = {} # look for room symbols, and initialize dictionary # - exit room is always marked 'Z' for c in rm: if c in string.ascii_letters: if c != "Z": ret[c] = Room(c) else: ret[c] = Exit() # scan through input string looking for connections between rooms rows = rm.split("\n") for row, line in enumerate(rows): for col, c in enumerate(line): if c in string.ascii_letters: room = ret[c] n = None s = None e = None w = None # look in neighboring cells for connection symbols (must take # care to guard that neighboring cells exist before testing # contents) if col > 0 and line[col - 1] in "<-": other = line[col - 2] w = ret[other] if col < len(line) - 1 and line[col + 1] in "->": other = line[col + 2] e = ret[other] if row > 1 and col < len(rows[row - 1]) and rows[row - 1][col] in "|^": other = rows[row - 2][col] n = ret[other] if ( row < len(rows) - 1 and col < len(rows[row + 1]) and rows[row + 1][col] in "|." ): other = rows[row + 2][col] s = ret[other] # set connections to neighboring rooms room.doors = [n, s, e, w] return ret # put items in rooms def putItemInRoom(i, r): if isinstance(r, str): r = rooms[r] r.add_item(Item.items[i]) def playGame(p, startRoom): # create parser parser = Parser() p.moveTo(startRoom) while not p.gameOver: cmdstr = input(">> ") cmd = parser.parse_cmd(cmdstr) if cmd is not None: cmd.command(p) print() print("You ended the game with:") for i in p.inv: print(" -", a_or_an(i)) if __name__ == '__main__': # start game definition roomMap = """ d-Z | f-c-e . | q" + "'")) | ("u" + Word(hexnums, exact=4)) | SGL_PRINTABLE ) LITERAL_CHAR = ESC | ~(APOS | BSLASH) + SGL_PRINTABLE CHAR_LITERAL = APOS + LITERAL_CHAR + APOS STRING_LITERAL = APOS + Combine(OneOrMore(LITERAL_CHAR)) + APOS DOUBLE_QUOTE_STRING_LITERAL = '"' + ZeroOrMore(LITERAL_CHAR) + '"' DOUBLE_ANGLE_STRING_LITERAL = "<<" + ZeroOrMore(SGL_PRINTABLE) + ">>" TOKEN_REF = Word(alphas.upper(), alphanums + "_") RULE_REF = Word(alphas.lower(), alphanums + "_") ACTION_ESC = ( BSLASH.suppress() + APOS | BSLASH.suppress() | BSLASH.suppress() + (~(APOS | QUOTE) + SGL_PRINTABLE) ) ACTION_CHAR_LITERAL = APOS + (ACTION_ESC | ~(BSLASH | APOS) + SGL_PRINTABLE) + APOS ACTION_STRING_LITERAL = ( QUOTE + ZeroOrMore(ACTION_ESC | ~(BSLASH | QUOTE) + SGL_PRINTABLE) + QUOTE ) SRC = SRC_.suppress() + ACTION_STRING_LITERAL("file") + INT("line") id = TOKEN_REF | RULE_REF SL_COMMENT = ( Suppress("//") + Suppress("$ANTLR") + SRC | ZeroOrMore(~EOL + Word(printables)) + EOL ) ML_COMMENT = cStyleComment WS = OneOrMore( Suppress(" ") | Suppress("\t") | (Optional(Suppress("\r")) + Literal("\n")) ) WS_LOOP = ZeroOrMore(SL_COMMENT | ML_COMMENT) NESTED_ARG_ACTION = Forward() NESTED_ARG_ACTION << ( LBRACK + ZeroOrMore(NESTED_ARG_ACTION | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + RBRACK ) ARG_ACTION = NESTED_ARG_ACTION NESTED_ACTION = Forward() NESTED_ACTION << ( LBRACE + ZeroOrMore( NESTED_ACTION | SL_COMMENT | ML_COMMENT | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL ) + RBRACE ) ACTION = NESTED_ACTION + Optional("?") SCOPE = SCOPE_.suppress() OPTIONS = OPTIONS_.suppress() + LBRACE # + WS_LOOP + Suppress('{') TOKENS = TOKENS_.suppress() + LBRACE # + WS_LOOP + Suppress('{') TREE_BEGIN = ROOT + LPAR RANGE = Suppress("..") REWRITE = Suppress("->") # General Parser Definitions # Grammar heading optionValue = id | STRING_LITERAL | CHAR_LITERAL | INT | Literal("*").setName("s") option = Group(id("id") + EQ + optionValue("value"))("option") optionsSpec = OPTIONS + Group(OneOrMore(option + SEMI))("options") + RBRACE tokenSpec = ( Group(TOKEN_REF("token_ref") + (EQ + (STRING_LITERAL | CHAR_LITERAL)("lit")))( "token" ) + SEMI ) tokensSpec = TOKENS + Group(OneOrMore(tokenSpec))("tokens") + RBRACE attrScope = SCOPE_.suppress() + id + ACTION grammarType = LEXER + PARSER + TREE actionScopeName = id | LEXER("l") | PARSER("p") action = AT + Optional(actionScopeName + Suppress("::")) + id + ACTION grammarHeading = ( Optional(ML_COMMENT("ML_COMMENT")) + Optional(grammarType) + GRAMMAR + id("grammarName") + SEMI + Optional(optionsSpec) + Optional(tokensSpec) + ZeroOrMore(attrScope) + ZeroOrMore(action) ) modifier = PROTECTED | PUBLIC | PRIVATE | FRAGMENT ruleAction = AT + id + ACTION throwsSpec = THROWS.suppress() + delimitedList(id) ruleScopeSpec = ( (SCOPE_.suppress() + ACTION) | (SCOPE_.suppress() + delimitedList(id) + SEMI) | (SCOPE_.suppress() + ACTION + SCOPE_.suppress() + delimitedList(id) + SEMI) ) unary_op = oneOf("^ !") notTerminal = CHAR_LITERAL | TOKEN_REF | STRING_LITERAL terminal = ( CHAR_LITERAL | TOKEN_REF + Optional(ARG_ACTION) | STRING_LITERAL | "." ) + Optional(unary_op) block = Forward() notSet = TIL + (notTerminal | block) rangeNotPython = CHAR_LITERAL("c1") + RANGE + CHAR_LITERAL("c2") atom = Group( (rangeNotPython + Optional(unary_op)("op")) | terminal | (notSet + Optional(unary_op)("op")) | (RULE_REF + Optional(ARG_ACTION("arg")) + Optional(unary_op)("op")) ) element = Forward() treeSpec = ROOT + LPAR + element * (2,) + RPAR ebnfSuffix = oneOf("? * +") ebnf = block + Optional(ebnfSuffix("op") | "=>") elementNoOptionSpec = ( (id("result_name") + oneOf("= +=")("labelOp") + atom("atom") + Optional(ebnfSuffix)) | (id("result_name") + oneOf("= +=")("labelOp") + block + Optional(ebnfSuffix)) | atom("atom") + Optional(ebnfSuffix) | ebnf | ACTION | (treeSpec + Optional(ebnfSuffix)) ) # | SEMPRED ( '=>' -> GATED_SEMPRED | -> SEMPRED ) element <<= Group(elementNoOptionSpec)("element") # Do not ask me why group is needed twice... seems like the xml that you see is not always the real structure? alternative = Group(Group(OneOrMore(element))("elements")) rewrite = Optional(Literal("TODO REWRITE RULES TODO")) block <<= ( LPAR + Optional(Optional(optionsSpec("opts")) + COLON) + Group( alternative("a1") + rewrite + Group(ZeroOrMore(VERT + alternative("a2") + rewrite))("alternatives") )("block") + RPAR ) altList = ( alternative("a1") + rewrite + Group(ZeroOrMore(VERT + alternative("a2") + rewrite))("alternatives") ) exceptionHandler = CATCH.suppress() + ARG_ACTION + ACTION finallyClause = FINALLY.suppress() + ACTION exceptionGroup = (OneOrMore(exceptionHandler) + Optional(finallyClause)) | finallyClause ruleHeading = ( Optional(ML_COMMENT)("ruleComment") + Optional(modifier)("modifier") + id("ruleName") + Optional("!") + Optional(ARG_ACTION("arg")) + Optional(Suppress("returns") + ARG_ACTION("rt")) + Optional(throwsSpec) + Optional(optionsSpec) + Optional(ruleScopeSpec) + ZeroOrMore(ruleAction) ) rule = Group(ruleHeading + COLON + altList + SEMI + Optional(exceptionGroup))("rule") grammarDef = grammarHeading + Group(OneOrMore(rule))("rules") def grammar(): return grammarDef def __antlrAlternativesConverter(pyparsingRules, antlrBlock): rule = None if ( hasattr(antlrBlock, "alternatives") and antlrBlock.alternatives != "" and len(antlrBlock.alternatives) > 0 ): alternatives = [] alternatives.append(__antlrAlternativeConverter(pyparsingRules, antlrBlock.a1)) for alternative in antlrBlock.alternatives: alternatives.append( __antlrAlternativeConverter(pyparsingRules, alternative) ) rule = MatchFirst(alternatives)("anonymous_or") elif hasattr(antlrBlock, "a1") and antlrBlock.a1 != "": rule = __antlrAlternativeConverter(pyparsingRules, antlrBlock.a1) else: raise Exception("Not yet implemented") assert rule != None return rule def __antlrAlternativeConverter(pyparsingRules, antlrAlternative): elementList = [] for element in antlrAlternative.elements: rule = None if hasattr(element.atom, "c1") and element.atom.c1 != "": regex = r"[" + str(element.atom.c1[0]) + "-" + str(element.atom.c2[0] + "]") rule = Regex(regex)("anonymous_regex") elif hasattr(element, "block") and element.block != "": rule = __antlrAlternativesConverter(pyparsingRules, element.block) else: ruleRef = element.atom[0] assert ruleRef in pyparsingRules rule = pyparsingRules[ruleRef](ruleRef) if hasattr(element, "op") and element.op != "": if element.op == "+": rule = Group(OneOrMore(rule))("anonymous_one_or_more") elif element.op == "*": rule = Group(ZeroOrMore(rule))("anonymous_zero_or_more") elif element.op == "?": rule = Optional(rule) else: raise Exception("rule operator not yet implemented : " + element.op) rule = rule elementList.append(rule) if len(elementList) > 1: rule = Group(And(elementList))("anonymous_and") else: rule = elementList[0] assert rule is not None return rule def __antlrRuleConverter(pyparsingRules, antlrRule): rule = None rule = __antlrAlternativesConverter(pyparsingRules, antlrRule) assert rule != None rule(antlrRule.ruleName) return rule def antlrConverter(antlrGrammarTree): pyparsingRules = {} antlrTokens = {} for antlrToken in antlrGrammarTree.tokens: antlrTokens[antlrToken.token_ref] = antlrToken.lit for antlrTokenName, antlrToken in list(antlrTokens.items()): pyparsingRules[antlrTokenName] = Literal(antlrToken) antlrRules = {} for antlrRule in antlrGrammarTree.rules: antlrRules[antlrRule.ruleName] = antlrRule pyparsingRules[antlrRule.ruleName] = Forward() # antlr is a top down grammar for antlrRuleName, antlrRule in list(antlrRules.items()): pyparsingRule = __antlrRuleConverter(pyparsingRules, antlrRule) assert pyparsingRule != None pyparsingRules[antlrRuleName] <<= pyparsingRule return pyparsingRules if __name__ == "__main__": text = """\ grammar SimpleCalc; options { language = Python; } tokens { PLUS = '+' ; MINUS = '-' ; MULT = '*' ; DIV = '/' ; } /*------------------------------------------------------------------ * PARSER RULES *------------------------------------------------------------------*/ expr : term ( ( PLUS | MINUS ) term )* ; term : factor ( ( MULT | DIV ) factor )* ; factor : NUMBER ; /*------------------------------------------------------------------ * LEXER RULES *------------------------------------------------------------------*/ NUMBER : (DIGIT)+ ; /* WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ; */ fragment DIGIT : '0'..'9' ; """ grammar().validate() antlrGrammarTree = grammar().parseString(text) print(antlrGrammarTree.dump()) pyparsingRules = antlrConverter(antlrGrammarTree) pyparsingRule = pyparsingRules["expr"] pyparsingTree = pyparsingRule.parseString("2 - 5 * 42 + 7 / 25") print(pyparsingTree.dump()) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/antlr_grammar_tests.py0000644000000000000000000000561314412577542016736 0ustar00""" Created on 4 sept. 2010 @author: luca Submitted by Luca DallOlio, September, 2010 """ import unittest from . import antlr_grammar class Test(unittest.TestCase): def testOptionsSpec(self): text = """options { language = Python; }""" antlr_grammar.optionsSpec.parseString(text) # @UndefinedVariable def testTokensSpec(self): text = """tokens { PLUS = '+' ; MINUS = '-' ; MULT = '*' ; DIV = '/' ; }""" antlr_grammar.tokensSpec.parseString(text) # @UndefinedVariable def testBlock(self): text = """( PLUS | MINUS )""" antlr_grammar.block.parseString(text) # @UndefinedVariable def testRule(self): text = """expr : term ( ( PLUS | MINUS ) term )* ;""" antlr_grammar.rule.parseString(text) # @UndefinedVariable def testLexerRule(self): text = """fragment DIGIT : '0'..'9' ;""" antlr_grammar.rule.parseString(text) # @UndefinedVariable def testLexerRule2(self): text = """WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ;""" # antlr_grammar.rule.parseString(text) #@UndefinedVariable def testGrammar(self): text = """grammar SimpleCalc; options { language = Python; } tokens { PLUS = '+' ; MINUS = '-' ; MULT = '*' ; DIV = '/' ; } /*------------------------------------------------------------------ * PARSER RULES *------------------------------------------------------------------*/ expr : term ( ( PLUS | MINUS ) term )* ; term : factor ( ( MULT | DIV ) factor )* ; factor : NUMBER ; /*------------------------------------------------------------------ * LEXER RULES *------------------------------------------------------------------*/ NUMBER : (DIGIT)+ ; /* WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ; */ fragment DIGIT : '0'..'9' ;""" antlrGrammarTree = antlr_grammar.grammarDef.parseString( text ) # @UndefinedVariable pyparsingRules = antlr_grammar.antlrConverter(antlrGrammarTree) pyparsingRule = pyparsingRules["expr"] pyparsingTree = pyparsingRule.parseString("2 - 5 * 42 + 7 / 25") pyparsingTreeList = pyparsingTree.asList() print(pyparsingTreeList) self.assertEqual( pyparsingTreeList, [ [ [["2"], []], [ ["-", [["5"], [["*", ["4", "2"]]]]], ["+", [["7"], [["/", ["2", "5"]]]]], ], ] ], ) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testOptionsSpec'] unittest.main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/apicheck.py0000644000000000000000000000431314412577542014431 0ustar00# apicheck.py # A simple source code scanner for finding patterns of the form # [ procname1 $arg1 $arg2 ] # and verifying the number of arguments # # Copyright (c) 2004-2016, Paul McGuire # from pyparsing import * # define punctuation and simple tokens for locating API calls LBRACK, RBRACK, LBRACE, RBRACE = map(Suppress, "[]{}") ident = Word(alphas, alphanums + "_") | QuotedString("{", endQuoteChar="}") arg = "$" + ident # define an API call with a specific number of arguments - using '-' # will ensure that after matching procname, an incorrect number of args will # raise a ParseSyntaxException, which will interrupt the scanString def apiProc(name, numargs): return LBRACK + Keyword(name)("procname") - arg * numargs + RBRACK # create an apiReference, listing all API functions to be scanned for, and # their respective number of arguments. Beginning the overall expression # with FollowedBy allows us to quickly rule out non-api calls while scanning, # since all of the api calls begin with a "[" apiRef = FollowedBy("[") + MatchFirst( [ apiProc("procname1", 2), apiProc("procname2", 1), apiProc("procname3", 2), ] ) test = """[ procname1 $par1 $par2 ] other code here [ procname1 $par1 $par2 $par3 ] more code here [ procname1 $par1 ] [ procname3 ${arg with spaces} $par2 ]""" # now explicitly iterate through the scanner using next(), so that # we can trap ParseSyntaxException's that would be raised due to # an incorrect number of arguments. If an exception does occur, # then see how we reset the input text and scanner to advance to the # next line of source code api_scanner = apiRef.scanString(test) while 1: try: t, s, e = next(api_scanner) print("found %s on line %d" % (t.procname, lineno(s, test))) except ParseSyntaxException as pe: print("invalid arg count on line", pe.lineno) print(pe.lineno, ":", pe.line) # reset api scanner to start after this exception location test = "\n" * (pe.lineno - 1) + test[pe.loc + 1 :] api_scanner = apiRef.scanString(test) except StopIteration: break ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/bf.py0000644000000000000000000001023014443622312013232 0ustar00# bf.py # # Brainf*ck interpreter demo # # BF instructions (symbols): # + - increment value at the current pointer # - - decrement value at the current pointer # > - increment pointer # < - decrement pointer # , - input new byte value, store at the current pointer # . - output the byte at the current pointer # [] - evaluate value at current pointer, if nonzero, execute all statements in []'s and repeat # import pyparsing as pp # define the basic parser # define Literals for each symbol in the BF langauge PLUS, MINUS, GT, LT, INP, OUT, LBRACK, RBRACK = pp.Literal.using_each("+-<>,.[]") # use a pyparsing Forward for the recursive definition of an instruction that can # itself contain instructions instruction_expr = pp.Forward().set_name("instruction") # define a LOOP expression for the instructions enclosed in brackets; use a # pyparsing Group to wrap the instructions in a sub-list LOOP = pp.Group(LBRACK + instruction_expr[...] + RBRACK) # use '<<=' operator to insert expression definition into existing Forward instruction_expr <<= PLUS | MINUS | GT | LT | INP | OUT | LOOP program_expr = instruction_expr[...].set_name("program") # ignore everything that is not a BF symbol ignore_chars = pp.Word(pp.printables, exclude_chars="+-<>,.[]") program_expr.ignore(ignore_chars) class BFEngine: """ Brainf*ck execution environment, with a memory array and pointer. """ def __init__(self, memory_size: int = 1024): self._ptr = 0 self._memory_size = memory_size self._memory = [0] * self._memory_size @property def ptr(self): return self._ptr @ptr.setter def ptr(self, value): self._ptr = value % self._memory_size @property def at_ptr(self): return self._memory[self._ptr] @at_ptr.setter def at_ptr(self, value): self._memory[self._ptr] = value % 256 def output_value_at_ptr(self): print(chr(self.at_ptr), end="") def input_value(self): input_char = input() or "\0" self.at_ptr = ord(input_char[0]) def reset(self): self._ptr = 0 self._memory[:] = [0] * self._memory_size def dump_state(self): for i in range(30): print(f"{self._memory[i]:3d} ", end="") print() if self.ptr < 30: print(f" {' ' * self.ptr}^") # define executable classes for each instruction class Instruction: """Abstract class for all instruction classes to implement.""" def __init__(self, tokens): self.tokens = tokens def execute(self, bf_engine: BFEngine): raise NotImplementedError() class IncrPtr(Instruction): def execute(self, bf_engine: BFEngine): bf_engine.ptr += 1 class DecrPtr(Instruction): def execute(self, bf_engine: BFEngine): bf_engine.ptr -= 1 class IncrPtrValue(Instruction): def execute(self, bf_engine: BFEngine): bf_engine.at_ptr += 1 class DecrPtrValue(Instruction): def execute(self, bf_engine: BFEngine): bf_engine.at_ptr -= 1 class OutputPtrValue(Instruction): def execute(self, bf_engine: BFEngine): bf_engine.output_value_at_ptr() class InputPtrValue(Instruction): def execute(self, bf_engine: BFEngine): bf_engine.input_value() class RunInstructionLoop(Instruction): def __init__(self, tokens): super().__init__(tokens) self.instructions = self.tokens[0][1:-1] def execute(self, bf_engine: BFEngine): while bf_engine.at_ptr: for i in self.instructions: i.execute(bf_engine) # add parse actions to all BF instruction expressions PLUS.add_parse_action(IncrPtrValue) MINUS.add_parse_action(DecrPtrValue) GT.add_parse_action(IncrPtr) LT.add_parse_action(DecrPtr) OUT.add_parse_action(OutputPtrValue) INP.add_parse_action(InputPtrValue) LOOP.add_parse_action(RunInstructionLoop) @program_expr.add_parse_action def run_program(tokens): bf = BFEngine() for t in tokens: t.execute(bf) print() # generate railroad diagram program_expr.create_diagram("bf.html") # execute an example BF program hw = "+[-->-[>>+>-----<<]<--<---]>-.>>>+.>>..+++[.>]<<<<.+++.------.<<-.>>>>+." program_expr.parse_string(hw) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/bigquery_view_parser.py0000644000000000000000000017501714412577542017131 0ustar00# bigquery_view_parser.py # # A parser to extract table names from BigQuery view definitions. # This is based on the `select_parser.py` sample in pyparsing: # https://github.com/pyparsing/pyparsing/blob/master/examples/select_parser.py # # Michael Smedberg # import sys import textwrap from pyparsing import ParserElement, Suppress, Forward, CaselessKeyword from pyparsing import MatchFirst, alphas, alphanums, Combine, Word from pyparsing import QuotedString, CharsNotIn, Optional, Group from pyparsing import oneOf, delimitedList, restOfLine, cStyleComment from pyparsing import infixNotation, opAssoc, Regex, nums sys.setrecursionlimit(3000) ParserElement.enablePackrat() class BigQueryViewParser: """Parser to extract table info from BigQuery view definitions Based on the BNF and examples posted at https://cloud.google.com/bigquery/docs/reference/legacy-sql """ _parser = None _table_identifiers = set() _with_aliases = set() def get_table_names(self, sql_stmt): table_identifiers, with_aliases = self._parse(sql_stmt) # Table names and alias names might differ by case, but that's not # relevant- aliases are not case sensitive lower_aliases = BigQueryViewParser.lowercase_set_of_tuples(with_aliases) tables = { x for x in table_identifiers if not BigQueryViewParser.lowercase_of_tuple(x) in lower_aliases } # Table names ARE case sensitive as described at # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity return tables def _parse(self, sql_stmt): BigQueryViewParser._table_identifiers.clear() BigQueryViewParser._with_aliases.clear() BigQueryViewParser._get_parser().parseString(sql_stmt, parseAll=True) return BigQueryViewParser._table_identifiers, BigQueryViewParser._with_aliases @classmethod def lowercase_of_tuple(cls, tuple_to_lowercase): return tuple(x.lower() if x else None for x in tuple_to_lowercase) @classmethod def lowercase_set_of_tuples(cls, set_of_tuples): return {BigQueryViewParser.lowercase_of_tuple(x) for x in set_of_tuples} @classmethod def _get_parser(cls): if cls._parser is not None: return cls._parser ParserElement.enablePackrat() LPAR, RPAR, COMMA, LBRACKET, RBRACKET, LT, GT = map(Suppress, "(),[]<>") QUOT, APOS, ACC, DOT, SEMI = map(Suppress, "\"'`.;") ungrouped_select_stmt = Forward().setName("select statement") QUOTED_QUOT = QuotedString('"') QUOTED_APOS = QuotedString("'") QUOTED_ACC = QuotedString("`") QUOTED_BRACKETS = QuotedString("[", endQuoteChar="]") # fmt: off # keywords ( UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, RIGHT, OUTER, FULL, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, OR, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP, WITH, EXTRACT, PARTITION, ROWS, RANGE, UNBOUNDED, PRECEDING, CURRENT, ROW, FOLLOWING, OVER, INTERVAL, DATE_ADD, DATE_SUB, ADDDATE, SUBDATE, REGEXP_EXTRACT, SPLIT, ORDINAL, FIRST_VALUE, LAST_VALUE, NTH_VALUE, LEAD, LAG, PERCENTILE_CONT, PRECENTILE_DISC, RANK, DENSE_RANK, PERCENT_RANK, CUME_DIST, NTILE, ROW_NUMBER, DATE, TIME, DATETIME, TIMESTAMP, UNNEST, INT64, NUMERIC, FLOAT64, BOOL, BYTES, GEOGRAPHY, ARRAY, STRUCT, SAFE_CAST, ANY_VALUE, ARRAY_AGG, ARRAY_CONCAT_AGG, AVG, BIT_AND, BIT_OR, BIT_XOR, COUNT, COUNTIF, LOGICAL_AND, LOGICAL_OR, MAX, MIN, STRING_AGG, SUM, CORR, COVAR_POP, COVAR_SAMP, STDDEV_POP, STDDEV_SAMP, STDDEV, VAR_POP, VAR_SAMP, VARIANCE, TIMESTAMP_ADD, TIMESTAMP_SUB, GENERATE_ARRAY, GENERATE_DATE_ARRAY, GENERATE_TIMESTAMP_ARRAY, FOR, SYSTEM_TIME, OF, WINDOW, RESPECT, IGNORE, NULLS, IF, CONTAINS, ) = map( CaselessKeyword, """ UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, RIGHT, OUTER, FULL, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, OR, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP, WITH, EXTRACT, PARTITION, ROWS, RANGE, UNBOUNDED, PRECEDING, CURRENT, ROW, FOLLOWING, OVER, INTERVAL, DATE_ADD, DATE_SUB, ADDDATE, SUBDATE, REGEXP_EXTRACT, SPLIT, ORDINAL, FIRST_VALUE, LAST_VALUE, NTH_VALUE, LEAD, LAG, PERCENTILE_CONT, PRECENTILE_DISC, RANK, DENSE_RANK, PERCENT_RANK, CUME_DIST, NTILE, ROW_NUMBER, DATE, TIME, DATETIME, TIMESTAMP, UNNEST, INT64, NUMERIC, FLOAT64, BOOL, BYTES, GEOGRAPHY, ARRAY, STRUCT, SAFE_CAST, ANY_VALUE, ARRAY_AGG, ARRAY_CONCAT_AGG, AVG, BIT_AND, BIT_OR, BIT_XOR, COUNT, COUNTIF, LOGICAL_AND, LOGICAL_OR, MAX, MIN, STRING_AGG, SUM, CORR, COVAR_POP, COVAR_SAMP, STDDEV_POP, STDDEV_SAMP, STDDEV, VAR_POP, VAR_SAMP, VARIANCE, TIMESTAMP_ADD, TIMESTAMP_SUB, GENERATE_ARRAY, GENERATE_DATE_ARRAY, GENERATE_TIMESTAMP_ARRAY, FOR, SYSTEM_TIME, OF, WINDOW, RESPECT, IGNORE, NULLS, IF, CONTAINS, """.replace(",", "").split(), ) keyword_nonfunctions = MatchFirst( (UNION, ALL, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, RIGHT, OUTER, FULL, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, STRUCT, WINDOW, SYSTEM_TIME, IF, FOR, ) ) keyword = keyword_nonfunctions | MatchFirst( (ESCAPE, CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP, DATE_ADD, DATE_SUB, ADDDATE, SUBDATE, INTERVAL, STRING_AGG, REGEXP_EXTRACT, SPLIT, ORDINAL, UNNEST, SAFE_CAST, PARTITION, TIMESTAMP_ADD, TIMESTAMP_SUB, ARRAY, GENERATE_ARRAY, GENERATE_DATE_ARRAY, GENERATE_TIMESTAMP_ARRAY, SYSTEM_TIME, CONTAINS, ) ) # fmt: on identifier_word = Word(alphas + "_@#", alphanums + "@$#_") identifier = ~keyword + identifier_word.copy() collation_name = identifier.copy() # NOTE: Column names can be keywords. Doc says they cannot, but in practice it seems to work. column_name = identifier_word.copy() qualified_column_name = Combine( column_name + ("." + column_name)[..., 6], adjacent=False ) # NOTE: As with column names, column aliases can be keywords, e.g. functions like `current_time`. Other # keywords, e.g. `from` make parsing pretty difficult (e.g. "SELECT a from from b" is confusing.) column_alias = ~keyword_nonfunctions + column_name.copy() table_name = identifier.copy() table_alias = identifier.copy() index_name = identifier.copy() function_name = identifier.copy() parameter_name = identifier.copy() # NOTE: The expression in a CASE statement can be an integer. E.g. this is valid SQL: # select CASE 1 WHEN 1 THEN -1 ELSE -2 END from test_table unquoted_case_identifier = ~keyword + Word(alphanums + "$_") quoted_case_identifier = QUOTED_QUOT | QUOTED_ACC case_identifier = quoted_case_identifier | unquoted_case_identifier case_expr = ( Optional(case_identifier + DOT) + Optional(case_identifier + DOT) + case_identifier ) # expression expr = Forward().setName("expression") integer = Regex(r"[+-]?\d+") numeric_literal = Regex(r"[+-]?\d*\.?\d+([eE][+-]?\d+)?") string_literal = QUOTED_APOS | QUOTED_QUOT | QUOTED_ACC regex_literal = "r" + string_literal blob_literal = Regex(r"[xX]'[0-9A-Fa-f]+'") date_or_time_literal = (DATE | TIME | DATETIME | TIMESTAMP) + string_literal literal_value = ( numeric_literal | string_literal | regex_literal | blob_literal | date_or_time_literal | NULL | CURRENT_TIME + Optional(LPAR + Optional(string_literal) + RPAR) | CURRENT_DATE + Optional(LPAR + Optional(string_literal) + RPAR) | CURRENT_TIMESTAMP + Optional(LPAR + Optional(string_literal) + RPAR) ) bind_parameter = Word("?", nums) | Combine(oneOf(": @ $") + parameter_name) type_name = oneOf( """TEXT REAL INTEGER BLOB NULL TIMESTAMP STRING DATE INT64 NUMERIC FLOAT64 BOOL BYTES DATETIME GEOGRAPHY TIME ARRAY STRUCT""", caseless=True, ) date_part = oneOf( """DAY DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND HOUR HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND MICROSECOND MINUTE MINUTE_MICROSECOND MINUTE_SECOND MONTH QUARTER SECOND SECOND_MICROSECOND WEEK YEAR YEAR_MONTH""", caseless=True, as_keyword=True, ) datetime_operators = ( DATE_ADD | DATE_SUB | ADDDATE | SUBDATE | TIMESTAMP_ADD | TIMESTAMP_SUB ) grouping_term = expr.copy() ordering_term = Group( expr("order_key") + Optional(COLLATE + collation_name("collate")) + Optional(ASC | DESC)("direction") )("ordering_term") function_arg = expr.copy()("function_arg") function_args = Optional( "*" | Optional(DISTINCT) + delimitedList(function_arg) + Optional((RESPECT | IGNORE) + NULLS) )("function_args") function_call = ( (function_name | keyword)("function_name") + LPAR + Group(function_args)("function_args_group") + RPAR ) navigation_function_name = ( FIRST_VALUE | LAST_VALUE | NTH_VALUE | LEAD | LAG | PERCENTILE_CONT | PRECENTILE_DISC ) aggregate_function_name = ( ANY_VALUE | ARRAY_AGG | ARRAY_CONCAT_AGG | AVG | BIT_AND | BIT_OR | BIT_XOR | COUNT | COUNTIF | LOGICAL_AND | LOGICAL_OR | MAX | MIN | STRING_AGG | SUM ) statistical_aggregate_function_name = ( CORR | COVAR_POP | COVAR_SAMP | STDDEV_POP | STDDEV_SAMP | STDDEV | VAR_POP | VAR_SAMP | VARIANCE ) numbering_function_name = ( RANK | DENSE_RANK | PERCENT_RANK | CUME_DIST | NTILE | ROW_NUMBER ) analytic_function_name = ( navigation_function_name | aggregate_function_name | statistical_aggregate_function_name | numbering_function_name )("analytic_function_name") partition_expression_list = delimitedList(grouping_term)( "partition_expression_list" ) window_frame_boundary_start = ( UNBOUNDED + PRECEDING | numeric_literal + (PRECEDING | FOLLOWING) | CURRENT + ROW ) window_frame_boundary_end = ( UNBOUNDED + FOLLOWING | numeric_literal + (PRECEDING | FOLLOWING) | CURRENT + ROW ) window_frame_clause = (ROWS | RANGE) + ( ((UNBOUNDED + PRECEDING) | (numeric_literal + PRECEDING) | (CURRENT + ROW)) | (BETWEEN + window_frame_boundary_start + AND + window_frame_boundary_end) ) window_name = identifier.copy()("window_name") window_specification = ( Optional(window_name) + Optional(PARTITION + BY + partition_expression_list) + Optional(ORDER + BY + delimitedList(ordering_term)) + Optional(window_frame_clause)("window_specification") ) analytic_function = ( analytic_function_name + LPAR + function_args + RPAR + OVER + (window_name | LPAR + Optional(window_specification) + RPAR) )("analytic_function") string_agg_term = ( STRING_AGG + LPAR + Optional(DISTINCT) + expr + Optional(COMMA + string_literal) + Optional( ORDER + BY + expr + Optional(ASC | DESC) + Optional(LIMIT + integer) ) + RPAR )("string_agg") array_literal = ( Optional(ARRAY + Optional(LT + delimitedList(type_name) + GT)) + LBRACKET + delimitedList(expr) + RBRACKET ) interval = INTERVAL + expr + date_part array_generator = ( GENERATE_ARRAY + LPAR + numeric_literal + COMMA + numeric_literal + COMMA + numeric_literal + RPAR ) date_array_generator = ( (GENERATE_DATE_ARRAY | GENERATE_TIMESTAMP_ARRAY) + LPAR + expr("start_date") + COMMA + expr("end_date") + Optional(COMMA + interval) + RPAR ) explicit_struct = ( STRUCT + Optional(LT + delimitedList(type_name) + GT) + LPAR + Optional(delimitedList(expr + Optional(AS + identifier))) + RPAR ) case_when = WHEN + expr.copy()("when") case_then = THEN + expr.copy()("then") case_clauses = Group((case_when + case_then)[...]) case_else = ELSE + expr.copy()("else") case_stmt = ( CASE + Optional(case_expr.copy()) + case_clauses("case_clauses") + Optional(case_else) + END )("case") expr_term = ( (analytic_function)("analytic_function") | (CAST + LPAR + expr + AS + type_name + RPAR)("cast") | (SAFE_CAST + LPAR + expr + AS + type_name + RPAR)("safe_cast") | (Optional(EXISTS) + LPAR + ungrouped_select_stmt + RPAR)("subselect") | (literal_value)("literal") | (bind_parameter)("bind_parameter") | (EXTRACT + LPAR + expr + FROM + expr + RPAR)("extract") | case_stmt | (datetime_operators + LPAR + expr + COMMA + interval + RPAR)( "date_operation" ) | string_agg_term("string_agg_term") | array_literal("array_literal") | array_generator("array_generator") | date_array_generator("date_array_generator") | explicit_struct("explicit_struct") | function_call("function_call") | qualified_column_name("column") ) + Optional(LBRACKET + (OFFSET | ORDINAL) + LPAR + expr + RPAR + RBRACKET)( "offset_ordinal" ) struct_term = LPAR + delimitedList(expr_term) + RPAR UNARY, BINARY, TERNARY = 1, 2, 3 expr <<= infixNotation( (expr_term | struct_term), [ (oneOf("- + ~") | NOT, UNARY, opAssoc.RIGHT), (ISNULL | NOTNULL | NOT + NULL, UNARY, opAssoc.LEFT), ("||", BINARY, opAssoc.LEFT), (oneOf("* / %"), BINARY, opAssoc.LEFT), (oneOf("+ -"), BINARY, opAssoc.LEFT), (oneOf("<< >> & |"), BINARY, opAssoc.LEFT), (oneOf("= > < >= <= <> != !< !> =="), BINARY, opAssoc.LEFT), ( IS + Optional(NOT) | Optional(NOT) + IN | Optional(NOT) + LIKE | GLOB | MATCH | REGEXP | CONTAINS, BINARY, opAssoc.LEFT, ), ((BETWEEN, AND), TERNARY, opAssoc.LEFT), ( Optional(NOT) + IN + LPAR + Group(ungrouped_select_stmt | delimitedList(expr)) + RPAR, UNARY, opAssoc.LEFT, ), (AND, BINARY, opAssoc.LEFT), (OR, BINARY, opAssoc.LEFT), ], ) quoted_expr = ( expr | QUOT + expr + QUOT | APOS + expr + APOS | ACC + expr + ACC )("quoted_expr") compound_operator = ( UNION + Optional(ALL | DISTINCT) | INTERSECT + DISTINCT | EXCEPT + DISTINCT | INTERSECT | EXCEPT )("compound_operator") join_constraint = Group( Optional( ON + expr | USING + LPAR + Group(delimitedList(qualified_column_name)) + RPAR ) )("join_constraint") join_op = ( COMMA | Group( Optional(NATURAL) + Optional( INNER | CROSS | LEFT + OUTER | LEFT | RIGHT + OUTER | RIGHT | FULL + OUTER | OUTER | FULL ) + JOIN ) )("join_op") join_source = Forward() # We support three kinds of table identifiers. # # First, dot delimited info like project.dataset.table, where # each component follows the rules described in the BigQuery # docs, namely: # Contain letters (upper or lower case), numbers, and underscores # # Second, a dot delimited quoted string. Since it's quoted, we'll be # liberal w.r.t. what characters we allow. E.g.: # `project.dataset.name-with-dashes` # # Third, a series of quoted strings, delimited by dots, e.g.: # `project`.`dataset`.`name-with-dashes` # # We also support combinations, like: # project.dataset.`name-with-dashes` # `project`.`dataset.name-with-dashes` def record_table_identifier(t): identifier_list = t.asList() padded_list = [None] * (3 - len(identifier_list)) + identifier_list cls._table_identifiers.add(tuple(padded_list)) standard_table_part = ~keyword + Word(alphanums + "_") quoted_project_part = QUOTED_QUOT | QUOTED_APOS | QUOTED_ACC quoted_table_part = ( QUOT + CharsNotIn('".') + QUOT | APOS + CharsNotIn("'.") + APOS | ACC + CharsNotIn("`.") + ACC ) quoted_table_parts_identifier = ( Optional( (quoted_project_part("project") | standard_table_part("project")) + DOT ) + Optional( (quoted_table_part("dataset") | standard_table_part("dataset")) + DOT ) + (quoted_table_part("table") | standard_table_part("table")) ).setParseAction(record_table_identifier) def record_quoted_table_identifier(t): identifier_list = t[0].split(".") *first, second, third = identifier_list first = ".".join(first) or None identifier_list = [first, second, third] padded_list = [None] * (3 - len(identifier_list)) + identifier_list cls._table_identifiers.add(tuple(padded_list)) quotable_table_parts_identifier = ( QUOTED_QUOT | QUOTED_APOS | QUOTED_ACC | QUOTED_BRACKETS ).setParseAction(record_quoted_table_identifier) table_identifier = ( quoted_table_parts_identifier | quotable_table_parts_identifier ).setName("table_identifier") single_source = ( ( table_identifier + Optional(Optional(AS) + table_alias("table_alias*")) + Optional(FOR - SYSTEM_TIME + AS + OF + expr) + Optional(INDEXED + BY + index_name("name") | NOT + INDEXED) )("index") | (LPAR + ungrouped_select_stmt + RPAR) | (LPAR + join_source + RPAR) | (UNNEST + LPAR + expr + RPAR) ) + Optional(Optional(AS) + table_alias) join_source <<= single_source + (join_op + single_source + join_constraint)[...] over_partition = (PARTITION + BY + delimitedList(partition_expression_list))( "over_partition" ) over_order = ORDER + BY + delimitedList(ordering_term) over_unsigned_value_specification = expr over_window_frame_preceding = ( UNBOUNDED + PRECEDING | over_unsigned_value_specification + PRECEDING | CURRENT + ROW ) over_window_frame_following = ( UNBOUNDED + FOLLOWING | over_unsigned_value_specification + FOLLOWING | CURRENT + ROW ) over_window_frame_bound = ( over_window_frame_preceding | over_window_frame_following ) over_window_frame_between = ( BETWEEN + over_window_frame_bound + AND + over_window_frame_bound ) over_window_frame_extent = ( over_window_frame_preceding | over_window_frame_between ) over_row_or_range = (ROWS | RANGE) + over_window_frame_extent over = ( OVER + LPAR + Optional(over_partition) + Optional(over_order) + Optional(over_row_or_range) + RPAR )("over") if_term = IF - LPAR + expr + COMMA + expr + COMMA + expr + RPAR result_column = Optional(table_name + ".") + "*" + Optional( EXCEPT + LPAR + delimitedList(column_name) + RPAR ) | Group(quoted_expr + Optional(over)) window_select_clause = ( WINDOW + identifier + AS + LPAR + window_specification + RPAR ) with_stmt = Forward().setName("with statement") ungrouped_select_no_with = ( SELECT + Optional(DISTINCT | ALL) + Group( delimitedList( (~FROM + ~IF + result_column | if_term) + Optional(Optional(AS) + column_alias), allow_trailing_delim=True, ) )("columns") + Optional(FROM + join_source("from*")) + Optional(WHERE + expr) + Optional( GROUP + BY + Group(delimitedList(grouping_term))("group_by_terms") ) + Optional(HAVING + expr("having_expr")) + Optional( ORDER + BY + Group(delimitedList(ordering_term))("order_by_terms") ) + Optional(delimitedList(window_select_clause)) ) select_no_with = ungrouped_select_no_with | ( LPAR + ungrouped_select_no_with + RPAR ) select_core = Optional(with_stmt) + select_no_with grouped_select_core = select_core | (LPAR + select_core + RPAR) ungrouped_select_stmt <<= ( grouped_select_core + (compound_operator + grouped_select_core)[...] + Optional( LIMIT + (Group(expr + OFFSET + expr) | Group(expr + COMMA + expr) | expr)( "limit" ) ) )("select") select_stmt = ( ungrouped_select_stmt | (LPAR + ungrouped_select_stmt + RPAR) ) + Optional(SEMI) # define comment format, and ignore them sql_comment = oneOf("-- #") + restOfLine | cStyleComment select_stmt.ignore(sql_comment) def record_with_alias(t): identifier_list = t.asList() padded_list = [None] * (3 - len(identifier_list)) + identifier_list cls._with_aliases.add(tuple(padded_list)) with_clause = Group( identifier.setParseAction(record_with_alias) + AS + LPAR + select_stmt + RPAR ) with_stmt <<= WITH + delimitedList(with_clause) with_stmt.ignore(sql_comment) cls._parser = select_stmt return cls._parser def test(self, sql_stmt, expected_tables, verbose=False): def print_(*args): if verbose: print(*args) print_(textwrap.dedent(sql_stmt.strip())) found_tables = self.get_table_names(sql_stmt) print_(found_tables) expected_tables_set = set(expected_tables) if expected_tables_set != found_tables: raise Exception( f"Test {test_index} failed- expected {expected_tables_set} but got {found_tables}" ) print_() if __name__ == "__main__": # fmt: off TEST_CASES = [ [ """\ SELECT x FROM y.a, b """, [ (None, "y", "a"), (None, None, "b"), ], ], [ """\ SELECT x FROM y.a JOIN b """, [ (None, "y", "a"), (None, None, "b"), ], ], [ """\ select * from xyzzy where z > 100 """, [ (None, None, "xyzzy"), ], ], [ """\ select * from xyzzy where z > 100 order by zz """, [ (None, None, "xyzzy"), ], ], [ """\ select * from xyzzy """, [ (None, None, "xyzzy"), ], ], [ """\ select z.* from xyzzy """, [ (None, None, "xyzzy"), ], ], [ """\ select a, b from test_table where 1=1 and b='yes' """, [ (None, None, "test_table"), ], ], [ """\ select a, b from test_table where 1=1 and b in (select bb from foo) """, [ (None, None, "test_table"), (None, None, "foo"), ], ], [ """\ select z.a, b from test_table where 1=1 and b in (select bb from foo) """, [ (None, None, "test_table"), (None, None, "foo"), ], ], [ """\ select z.a, b from test_table where 1=1 and b in (select bb from foo) order by b,c desc,d """, [ (None, None, "test_table"), (None, None, "foo"), ], ], [ """\ select z.a, b from test_table left join test2_table where 1=1 and b in (select bb from foo) """, [ (None, None, "test_table"), (None, None, "test2_table"), (None, None, "foo"), ], ], [ """\ select a, db.table.b as BBB from db.table where 1=1 and BBB='yes' """, [ (None, "db", "table"), ], ], [ """\ select a, db.table.b as BBB from test_table,db.table where 1=1 and BBB='yes' """, [ (None, None, "test_table"), (None, "db", "table"), ], ], [ """\ select a, db.table.b as BBB from test_table,db.table where 1=1 and BBB='yes' limit 50 """, [ (None, None, "test_table"), (None, "db", "table"), ], ], [ """\ select a, b from test_table where (1=1 or 2=3) and b='yes' group by zx having b=2 order by 1 """, [ (None, None, "test_table"), ], ], [ """\ select a, b # this is a comment from test_table # another comment where (1=1 or 2=3) and b='yes' #yup, a comment group by zx having b=2 order by 1 """, [ (None, None, "test_table"), ], ], [ """\ SELECT COUNT(DISTINCT foo) FROM bar JOIN baz ON bar.baz_id = baz.id """, [ (None, None, "bar"), (None, None, "baz"), ], ], [ """\ SELECT COUNT(DISTINCT foo) FROM bar, baz WHERE bar.baz_id = baz.id """, [ (None, None, "bar"), (None, None, "baz"), ], ], [ """\ WITH one AS (SELECT id FROM foo) SELECT one.id """, [ (None, None, "foo"), ], ], [ """\ WITH one AS (SELECT id FROM foo), two AS (select id FROM bar) SELECT one.id, two.id """, [ (None, None, "foo"), (None, None, "bar"), ], ], [ """\ SELECT x, RANK() OVER (ORDER BY x ASC) AS rank, DENSE_RANK() OVER (ORDER BY x ASC) AS dense_rank, ROW_NUMBER() OVER (PARTITION BY x ORDER BY y) AS row_num FROM a """, [ (None, None, "a"), ], ], [ """\ SELECT x, COUNT(*) OVER ( ORDER BY x RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING ) AS count_x FROM T """, [ (None, None, "T"), ], ], [ """\ SELECT firstname, department, startdate, RANK() OVER ( PARTITION BY department ORDER BY startdate ) AS rank FROM Employees """, [ (None, None, "Employees"), ], ], # A fragment from https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34' """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ WITH finishers AS (SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34') SELECT name, FORMAT_TIMESTAMP('%X', finish_time) AS finish_time, division, FORMAT_TIMESTAMP('%X', fastest_time) AS fastest_time, TIMESTAMP_DIFF(finish_time, fastest_time, SECOND) AS delta_in_seconds FROM ( SELECT name, finish_time, division, FIRST_VALUE(finish_time) OVER (PARTITION BY division ORDER BY finish_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS fastest_time FROM finishers) """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ WITH finishers AS (SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34') SELECT name, FORMAT_TIMESTAMP('%X', finish_time) AS finish_time, division, FORMAT_TIMESTAMP('%X', slowest_time) AS slowest_time, TIMESTAMP_DIFF(slowest_time, finish_time, SECOND) AS delta_in_seconds FROM ( SELECT name, finish_time, division, LAST_VALUE(finish_time) OVER (PARTITION BY division ORDER BY finish_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS slowest_time FROM finishers) """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ WITH finishers AS (SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34') SELECT name, FORMAT_TIMESTAMP('%X', finish_time) AS finish_time, division, FORMAT_TIMESTAMP('%X', fastest_time) AS fastest_time, FORMAT_TIMESTAMP('%X', second_fastest) AS second_fastest FROM ( SELECT name, finish_time, division,finishers, FIRST_VALUE(finish_time) OVER w1 AS fastest_time, NTH_VALUE(finish_time, 2) OVER w1 as second_fastest FROM finishers WINDOW w1 AS ( PARTITION BY division ORDER BY finish_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)) """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ WITH finishers AS (SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34') SELECT name, finish_time, division, LEAD(name) OVER (PARTITION BY division ORDER BY finish_time ASC) AS followed_by FROM finishers """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ WITH finishers AS (SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34') SELECT name, finish_time, division, LEAD(name, 2) OVER (PARTITION BY division ORDER BY finish_time ASC) AS two_runners_back FROM finishers """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ WITH finishers AS (SELECT 'Sophia Liu' as name, TIMESTAMP '2016-10-18 2:51:45' as finish_time, 'F30-34' as division UNION ALL SELECT 'Lisa Stelzner', TIMESTAMP '2016-10-18 2:54:11', 'F35-39' UNION ALL SELECT 'Nikki Leith', TIMESTAMP '2016-10-18 2:59:01', 'F30-34' UNION ALL SELECT 'Lauren Matthews', TIMESTAMP '2016-10-18 3:01:17', 'F35-39' UNION ALL SELECT 'Desiree Berry', TIMESTAMP '2016-10-18 3:05:42', 'F35-39' UNION ALL SELECT 'Suzy Slane', TIMESTAMP '2016-10-18 3:06:24', 'F35-39' UNION ALL SELECT 'Jen Edwards', TIMESTAMP '2016-10-18 3:06:36', 'F30-34' UNION ALL SELECT 'Meghan Lederer', TIMESTAMP '2016-10-18 3:07:41', 'F30-34' UNION ALL SELECT 'Carly Forte', TIMESTAMP '2016-10-18 3:08:58', 'F25-29' UNION ALL SELECT 'Lauren Reasoner', TIMESTAMP '2016-10-18 3:10:14', 'F30-34') SELECT name, finish_time, division, LAG(name) OVER (PARTITION BY division ORDER BY finish_time ASC) AS preceding_runner FROM finishers """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ SELECT PERCENTILE_CONT(x, 0) OVER() AS min, PERCENTILE_CONT(x, 0.01) OVER() AS percentile1, PERCENTILE_CONT(x, 0.5) OVER() AS median, PERCENTILE_CONT(x, 0.9) OVER() AS percentile90, PERCENTILE_CONT(x, 1) OVER() AS max FROM UNNEST([0, 3, NULL, 1, 2]) AS x LIMIT 1 """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions [ """\ SELECT x, PERCENTILE_DISC(x, 0) OVER() AS min, PERCENTILE_DISC(x, 0.5) OVER() AS median, PERCENTILE_DISC(x, 1) OVER() AS max FROM UNNEST(['c', NULL, 'b', 'a']) AS x """, [], ], # From https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions [ """\ SELECT TIMESTAMP "2008-12-25 15:30:00 UTC" as original, TIMESTAMP_ADD(TIMESTAMP "2008-12-25 15:30:00 UTC", INTERVAL 10 MINUTE) AS later """, [], ], # Previously hosted on https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions, but # appears to no longer be there [ """\ WITH date_hour_slots AS ( SELECT [ STRUCT( " 00:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 01:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 02:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 03:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 04:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 05:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 06:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 07:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 08:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY ) as dt_range), STRUCT( " 09:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01', current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 10:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 11:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 12:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 13:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 14:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 15:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 16:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 17:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 18:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 19:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 20:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 21:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 22:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range), STRUCT( " 23:00:00 UTC" as hrs, GENERATE_DATE_ARRAY('2016-01-01',current_date(), INTERVAL 1 DAY) as dt_range) ] AS full_timestamps) SELECT dt AS dates, hrs, CAST(CONCAT( CAST(dt as STRING), CAST(hrs as STRING)) as TIMESTAMP) as timestamp_value FROM `date_hour_slots`, date_hour_slots.full_timestamps LEFT JOIN full_timestamps.dt_range as dt """, [ (None, "date_hour_slots", "full_timestamps"), (None, "full_timestamps", "dt_range"), ], ], [ """\ SELECT [foo], ARRAY[foo], ARRAY[foo, bar], STRUCT(1, 3), STRUCT(2, 'foo'), current_date(), GENERATE_ARRAY(5, NULL, 1), GENERATE_DATE_ARRAY('2016-10-05', '2016-10-01', INTERVAL 1 DAY), GENERATE_DATE_ARRAY('2016-10-05', NULL), GENERATE_DATE_ARRAY('2016-01-01', '2016-12-31', INTERVAL 2 MONTH), GENERATE_DATE_ARRAY('2000-02-01',current_date(), INTERVAL 1 DAY), GENERATE_TIMESTAMP_ARRAY('2016-10-05 00:00:00', '2016-10-05 00:00:02', INTERVAL 1 SECOND) FROM bar """, [ (None, None, "bar"), ], ], [ """\ SELECT GENERATE_ARRAY(start, 5) AS example_array FROM UNNEST([3, 4, 5]) AS start """, [], ], [ """\ WITH StartsAndEnds AS ( SELECT DATE '2016-01-01' AS date_start, DATE '2016-01-31' AS date_end UNION ALL SELECT DATE "2016-04-01", DATE "2016-04-30" UNION ALL SELECT DATE "2016-07-01", DATE "2016-07-31" UNION ALL SELECT DATE "2016-10-01", DATE "2016-10-31" ) SELECT GENERATE_DATE_ARRAY(date_start, date_end, INTERVAL 1 WEEK) AS date_range FROM StartsAndEnds """, [], ], [ """\ SELECT GENERATE_TIMESTAMP_ARRAY(start_timestamp, end_timestamp, INTERVAL 1 HOUR) AS timestamp_array FROM (SELECT TIMESTAMP '2016-10-05 00:00:00' AS start_timestamp, TIMESTAMP '2016-10-05 02:00:00' AS end_timestamp UNION ALL SELECT TIMESTAMP '2016-10-05 12:00:00' AS start_timestamp, TIMESTAMP '2016-10-05 14:00:00' AS end_timestamp UNION ALL SELECT TIMESTAMP '2016-10-05 23:59:00' AS start_timestamp, TIMESTAMP '2016-10-06 01:59:00' AS end_timestamp) """, [], ], [ """\ SELECT DATE_SUB(current_date("-08:00"), INTERVAL 2 DAY) """, [], ], [ """\ SELECT case when (a) then b else c end FROM d """, [ (None, None, "d"), ], ], [ """\ SELECT e, case when (f) then g else h end FROM i """, [ (None, None, "i"), ], ], [ """\ SELECT case when j then k else l end FROM m """, [ (None, None, "m",), ], ], [ """\ SELECT n, case when o then p else q end FROM r """, [ (None, None, "r"), ], ], [ """\ SELECT case s when (t) then u else v end FROM w """, [ (None, None, "w"), ], ], [ """\ SELECT x, case y when (z) then aa else ab end FROM ac """, [ (None, None, "ac"), ], ], [ """\ SELECT case ad when ae then af else ag end FROM ah """, [ (None, None, "ah"), ], ], [ """\ SELECT ai, case aj when ak then al else am end FROM an """, [ (None, None, "an"), ], ], [ """\ WITH ONE AS (SELECT x FROM y), TWO AS (select a FROM b) SELECT y FROM onE JOIN TWo """, [ (None, None, "y"), (None, None, "b"), ], ], [ """\ SELECT a, (SELECT b FROM oNE) FROM OnE """, [ (None, None, "oNE"), (None, None, "OnE"), ], ], [ """\ SELECT * FROM `a.b.c` """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM `b.c` """, [ (None, "b", "c"), ], ], [ """\ SELECT * FROM `c` """, [ (None, None, "c"), ], ], [ """\ SELECT * FROM a.b.c """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM "a"."b"."c" """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM 'a'.'b'.'c' """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM `a`.`b`.`c` """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM "a.b.c" """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM 'a.b.c' """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM `a.b.c` """, [ ("a", "b", "c"), ], ], [ """\ SELECT t2.a FROM t2 FOR SYSTEM_TIME AS OF t1.timestamp_column """, [ (None, None, "t2"), ], ], [ """\ SELECT * FROM t1 WHERE t1.a IN (SELECT t2.a FROM t2 FOR SYSTEM_TIME AS OF t1.timestamp_column) """, [ (None, None, "t1"), (None, None, "t2"), ], ], [ """\ WITH a AS (SELECT b FROM c) SELECT d FROM A JOIN e ON f = g JOIN E ON h = i """, [ (None, None, "c"), (None, None, "e"), (None, None, "E"), ], ], [ """\ with a as ( ( select b from ( select c from d ) Union all ( select e from f ) ) ) select g from h """, [ (None, None, "d"), (None, None, "f"), (None, None, "h"), ], ], [ """\ select a AS ESCAPE, b AS CURRENT_TIME, c AS CURRENT_DATE, d AS CURRENT_TIMESTAMP, e AS DATE_ADD FROM x """, [ (None, None, "x"), ], ], [ """\ WITH x AS ( SELECT a FROM b WINDOW w as (PARTITION BY a) ) SELECT y FROM z """, [ (None, None, "b"), (None, None, "z") ], ], [ """\ SELECT DISTINCT FIRST_VALUE(x IGNORE NULLS) OVER (PARTITION BY y) FROM z """, [ (None, None, "z") ], ], [ """\ SELECT a . b . c FROM d """, [ (None, None, "d") ], ], [ """\ WITH a AS ( SELECT b FROM c UNION ALL ( WITH d AS ( SELECT e FROM f ) SELECT g FROM d ) ) SELECT h FROM a """, [ (None, None, "c"), (None, None, "f") ], ], [ """\ WITH a AS ( SELECT b FROM c UNION ALL ( WITH d AS ( SELECT e FROM f ) SELECT g FROM d ) ) (SELECT h FROM a) """, [ (None, None, "c"), (None, None, "f") ], ], [ """\ SELECT * FROM a.b.`c` """, [ ("a", "b", "c"), ], ], [ """\ SELECT * FROM 'a'.b.`c` """, [ ("a", "b", "c"), ], ], # from https://cloud.google.com/bigquery/docs/reference/legacy-sql [ """\ SELECT word, word_count, RANK() OVER (PARTITION BY corpus ORDER BY word_count DESC) rank, FROM [bigquery-public-data:samples.shakespeare] WHERE corpus='othello' and length(word) > 10 LIMIT 5 """, [ (None, 'bigquery-public-data:samples', 'shakespeare'), ], ], [ """\ SELECT word, word_count, RATIO_TO_REPORT(word_count) OVER (PARTITION BY corpus ORDER BY word_count DESC) r_to_r, FROM [bigquery-public-data:samples.shakespeare] WHERE corpus='othello' and length(word) > 10 LIMIT 5 """, [ (None, 'bigquery-public-data:samples', 'shakespeare'), ], ], [ """\ SELECT word, word_count, ROW_NUMBER() OVER (PARTITION BY corpus ORDER BY word_count DESC) row_num, FROM [bigquery-public-data:samples.shakespeare] WHERE corpus='othello' and length(word) > 10 LIMIT 5 """, [ (None, 'bigquery-public-data:samples', 'shakespeare'), ], ], [ """\ SELECT TO_BASE64(SHA1(title)) FROM [bigquery-public-data:samples.wikipedia] LIMIT 100; """, [ (None, 'bigquery-public-data:samples', 'wikipedia'), ], ], [ """\ SELECT CASE WHEN state IN ('WA', 'OR', 'CA', 'AK', 'HI', 'ID', 'MT', 'WY', 'NV', 'UT', 'CO', 'AZ', 'NM') THEN 'West' WHEN state IN ('OK', 'TX', 'AR', 'LA', 'TN', 'MS', 'AL', 'KY', 'GA', 'FL', 'SC', 'NC', 'VA', 'WV', 'MD', 'DC', 'DE') THEN 'South' WHEN state IN ('ND', 'SD', 'NE', 'KS', 'MN', 'IA', 'MO', 'WI', 'IL', 'IN', 'MI', 'OH') THEN 'Midwest' WHEN state IN ('NY', 'PA', 'NJ', 'CT', 'RI', 'MA', 'VT', 'NH', 'ME') THEN 'Northeast' ELSE 'None' END as region, average_mother_age, average_father_age, state, year FROM (SELECT year, state, SUM(mother_age)/COUNT(mother_age) as average_mother_age, SUM(father_age)/COUNT(father_age) as average_father_age FROM [bigquery-public-data:samples.natality] WHERE father_age < 99 GROUP BY year, state) ORDER BY year LIMIT 5; """, [ (None, 'bigquery-public-data:samples', 'natality'), ], ], [ """\ SELECT /* Replace white spaces in the title with underscores. */ REGEXP_REPLACE(title, r'\s+', '_') AS regexp_title, revisions FROM (SELECT title, COUNT(revision_id) as revisions FROM [bigquery-public-data:samples.wikipedia] WHERE wp_namespace=0 /* Match titles that start with 'G', end with * 'e', and contain at least two 'o's. */ AND REGEXP_MATCH(title, r'^G.*o.*o.*e$') GROUP BY title ORDER BY revisions DESC LIMIT 100);""", [ (None, 'bigquery-public-data:samples', 'wikipedia'), ], ], [ """\ SELECT page_title, /* Populate these columns as True or False, */ /* depending on the condition */ IF (page_title CONTAINS 'search', INTEGER(total), 0) AS search, IF (page_title CONTAINS 'Earth' OR page_title CONTAINS 'Maps', INTEGER(total), 0) AS geo, FROM /* Subselect to return top revised Wikipedia articles */ /* containing 'Google', followed by additional text. */ (SELECT TOP (title, 5) as page_title, COUNT (*) as total FROM [bigquery-public-data:samples.wikipedia] WHERE REGEXP_MATCH (title, r'^Google.+') AND wp_namespace = 0 ); """, [ (None, 'bigquery-public-data:samples', 'wikipedia'), ] ], [ """\ SELECT title, HASH(title) AS hash_value, IF(ABS(HASH(title)) % 2 == 1, 'True', 'False') AS included_in_sample FROM [bigquery-public-data:samples.wikipedia] WHERE wp_namespace = 0 LIMIT 5; """, [ (None, 'bigquery-public-data:samples', 'wikipedia'), ] ], [ """\ with t as (select CASE when EXTRACT(dayofweek FROM CURRENT_DATETIME()) == 1 then "S" end) select * from t """, [], ], ] # fmt: on parser = BigQueryViewParser() for test_index, test_case in enumerate(TEST_CASES): sql, expected = test_case parser.test(sql_stmt=sql, expected_tables=expected, verbose=True) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1690670157.8383648 pyparsing-3.1.1/examples/booleansearchparser.py0000644000000000000000000003641014461312116016673 0ustar00""" Boolean Search query parser (Based on searchparser: https://github.com/pyparsing/pyparsing/blob/master/examples/searchparser.py) version 2018-07-22 This search query parser uses the excellent Pyparsing module (http://pyparsing.sourceforge.net/) to parse search queries by users. It handles: * 'and', 'or' and implicit 'and' operators; * parentheses; * quoted strings; * wildcards at the end of a search term (help*); * wildcards at the beginning of a search term (*lp); * non-western languages Requirements: * Python * Pyparsing SAMPLE USAGE: from booleansearchparser import BooleanSearchParser from __future__ import print_function bsp = BooleanSearchParser() text = u"wildcards at the beginning of a search term " exprs= [ u"*cards and term", #True u"wild* and term", #True u"not terms", #True u"terms or begin", #False ] for expr in exprs: print (bsp.match(text,expr)) #non-western samples text = u"안녕하세요, 당신은 어떠세요?" exprs= [ u"*신은 and 어떠세요", #True u"not 당신은", #False u"당신 or 당", #False ] for expr in exprs: print (bsp.match(text,expr)) ------------------------------------------------------------------------------- Copyright (c) 2006, Estrate, the Netherlands All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Estrate nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. CONTRIBUTORS: - Steven Mooij - Rudolph Froger - Paul McGuire - Guiem Bosch - Francesc Garcia TODO: - add more docs - ask someone to check my English texts - add more kinds of wildcards ('*' at the beginning and '*' inside a word)? """ from pyparsing import ( Word, alphanums, CaselessKeyword, Group, Forward, Suppress, OneOrMore, one_of, ParserElement, ) import re ParserElement.enablePackrat() # Updated on 02 Dec 2021 according to ftp://ftp.unicode.org/Public/UNIDATA/Blocks.txt # (includes characters not found in the BasicMultilingualPlane) alphabet_ranges = [ # CYRILIC: https://en.wikipedia.org/wiki/Cyrillic_(Unicode_block) [int("0400", 16), int("04FF", 16)], # ARABIC: https://en.wikipedia.org/wiki/Arabic_(Unicode_block) (Arabic (0600–06FF)+ Syriac (0700–074F)+ Arabic Supplement (0750–077F)) [int("0600", 16), int("07FF", 16)], # THAI: https://en.wikipedia.org/wiki/Thai_(Unicode_block) [int("0E00", 16), int("0E7F", 16)], # JAPANESE : https://en.wikipedia.org/wiki/Japanese_writing_system (Hiragana (3040–309F) + Katakana (30A0–30FF)) [int("3040", 16), int("30FF", 16)], # Enclosed CJK Letters and Months [int("3200", 16), int("32FF", 16)], # CHINESE: https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) [int("4E00", 16), int("9FFF", 16)], # KOREAN : https://en.wikipedia.org/wiki/Hangul [int("1100", 16), int("11FF", 16)], [int("3130", 16), int("318F", 16)], [int("A960", 16), int("A97F", 16)], [int("AC00", 16), int("D7AF", 16)], [int("D7B0", 16), int("D7FF", 16)], # Halfwidth and Fullwidth Forms [int("FF00", 16), int("FFEF", 16)], ] class BooleanSearchParser: def __init__(self, only_parse=False): self._methods = { "and": self.evaluateAnd, "or": self.evaluateOr, "not": self.evaluateNot, "parenthesis": self.evaluateParenthesis, "quotes": self.evaluateQuotes, "word": self.evaluateWord, "wordwildcardprefix": self.evaluateWordWildcardPrefix, "wordwildcardsufix": self.evaluateWordWildcardSufix, } self._parser = self.parser() self.text = "" self.words = [] def parser(self): """ This function returns a parser. The grammar should be like most full text search engines (Google, Tsearch, Lucene). Grammar: - a query consists of alphanumeric words, with an optional '*' wildcard at the end or the beginning of a word - a sequence of words between quotes is a literal string - words can be used together by using operators ('and' or 'or') - words with operators can be grouped with parenthesis - a word or group of words can be preceded by a 'not' operator - the 'and' operator precedes an 'or' operator - if an operator is missing, use an 'and' operator """ operatorOr = Forward() alphabet = alphanums # support for non-western alphabets for lo, hi in alphabet_ranges: alphabet += "".join(chr(c) for c in range(lo, hi + 1) if not chr(c).isspace()) operatorWord = Group(Word(alphabet + "*")).set_results_name("word*") operatorQuotesContent = Forward() operatorQuotesContent << ((operatorWord + operatorQuotesContent) | operatorWord) operatorQuotes = ( Group(Suppress('"') + operatorQuotesContent + Suppress('"')).set_results_name( "quotes" ) | operatorWord ) operatorParenthesis = ( Group(Suppress("(") + operatorOr + Suppress(")")).set_results_name( "parenthesis" ) | operatorQuotes ) operatorNot = Forward() operatorNot << ( Group(Suppress(CaselessKeyword("not")) + operatorNot).set_results_name( "not" ) | operatorParenthesis ) operatorAnd = Forward() operatorAnd << ( Group( operatorNot + Suppress(CaselessKeyword("and")) + operatorAnd ).set_results_name("and") | Group( operatorNot + OneOrMore(~one_of("and or") + operatorAnd) ).set_results_name("and") | operatorNot ) operatorOr << ( Group( operatorAnd + Suppress(CaselessKeyword("or")) + operatorOr ).set_results_name("or") | operatorAnd ) return operatorOr.parse_string def evaluateAnd(self, argument): return all(self.evaluate(arg) for arg in argument) def evaluateOr(self, argument): return any(self.evaluate(arg) for arg in argument) def evaluateNot(self, argument): return self.GetNot(self.evaluate(argument[0])) def evaluateParenthesis(self, argument): return self.evaluate(argument[0]) def evaluateQuotes(self, argument): """Evaluate quoted strings First is does an 'and' on the individual search terms, then it asks the function GetQuoted to only return the subset of ID's that contain the literal string. """ # r = set() r = False search_terms = [] for item in argument: search_terms.append(item[0]) r = r and self.evaluate(item) return self.GetQuotes(" ".join(search_terms), r) def evaluateWord(self, argument): wildcard_count = argument[0].count("*") if wildcard_count > 0: if wildcard_count == 1 and argument[0].startswith("*"): return self.GetWordWildcard(argument[0][1:], method="endswith") if wildcard_count == 1 and argument[0].endswith("*"): return self.GetWordWildcard(argument[0][:-1], method="startswith") else: _regex = argument[0].replace("*", ".+") matched = False for w in self.words: matched = bool(re.search(_regex, w)) if matched: break return matched return self.GetWord(argument[0]) def evaluateWordWildcardPrefix(self, argument): return self.GetWordWildcard(argument[0], method="endswith") def evaluateWordWildcardSufix(self, argument): return self.GetWordWildcard(argument[0], method="startswith") def evaluate(self, argument): return self._methods[argument.getName()](argument) def Parse(self, query): return self.evaluate(self._parser(query)[0]) def GetWord(self, word): return word in self.words def GetWordWildcard(self, word, method="startswith"): matched = False for w in self.words: matched = getattr(w, method)(word) if matched: break return matched """ def GetKeyword(self, name, value): return set() def GetBetween(self, min, max): print (min,max) return set() """ def GetQuotes(self, search_string, tmp_result): return search_string in self.text def GetNot(self, not_set): return not not_set def _split_words(self, text): words = [] """ >>> import string >>> string.punctuation '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' """ # it will keep @, # and # usernames and hashtags can contain dots, so a double check is done r = re.compile(r"[\s{}]+".format(re.escape("!\"$%&'()*+,-/:;<=>?[\\]^`{|}~"))) _words = r.split(text) for _w in _words: if "." in _w and not _w.startswith("#") and not _w.startswith("@"): for __w in _w.split("."): words.append(__w) continue words.append(_w) return words def match(self, text, expr): self.text = text self.words = self._split_words(text) return self.Parse(expr) class ParserTest(BooleanSearchParser): """Tests the parser with some search queries tests contains a dictionary with tests and expected results. """ def Test(self): # fmt: off exprs = { "0": "help", "1": "help or hulp", "2": "help and hulp", "3": "help hulp", "4": "help and hulp or hilp", "5": "help or hulp and hilp", "6": "help or hulp or hilp or halp", "7": "(help or hulp) and (hilp or halp)", "8": "help and (hilp or halp)", "9": "(help and (hilp or halp)) or hulp", "10": "not help", "11": "not hulp and halp", "12": "not (help and halp)", "13": '"help me please"', "14": '"help me please" or hulp', "15": '"help me please" or (hulp and halp)', "16": "help*", "17": "help or hulp*", "18": "help* and hulp", "19": "help and hulp* or hilp", "20": "help* or hulp or hilp or halp", "21": "(help or hulp*) and (hilp* or halp)", "22": "help* and (hilp* or halp*)", "23": "(help and (hilp* or halp)) or hulp*", "24": "not help* and halp", "25": "not (help* and helpe*)", "26": '"help* me please"', "27": '"help* me* please" or hulp*', "28": '"help me please*" or (hulp and halp)', "29": '"help me please" not (hulp and halp)', "30": '"help me please" hulp', "31": "help and hilp and not holp", "32": "help hilp not holp", "33": "help hilp and not holp", "34": "*lp and halp", "35": "*신은 and 어떠세요", } texts_matcheswith = { "halp thinks he needs help": [ "25", "22", "20", "21", "11", "17", "16", "23", "34", "1", "0", "5", "7", "6", "9", "8", ], "he needs halp": ["24", "25", "20", "11", "10", "12", "34", "6"], "help": ["25", "20", "12", "17", "16", "1", "0", "5", "6"], "help hilp": [ "25", "22", "20", "32", "21", "12", "17", "16", "19", "31", "23", "1", "0", "5", "4", "7", "6", "9", "8", "33", ], "help me please hulp": [ "30", "25", "27", "20", "13", "12", "15", "14", "17", "16", "19", "18", "23", "29", "1", "0", "3", "2", "5", "4", "6", "9", ], "helper": ["20", "10", "12", "16"], "hulp hilp": [ "25", "27", "20", "21", "10", "12", "14", "17", "19", "23", "1", "5", "4", "7", "6", "9", ], "nothing": ["25", "10", "12"], "안녕하세요, 당신은 어떠세요?": ["10", "12", "25", "35"], } # fmt: on all_ok = True for text, matches in texts_matcheswith.items(): _matches = [] for _id, expr in exprs.items(): if self.match(text, expr): _matches.append(_id) test_passed = sorted(matches) == sorted(_matches) if test_passed: print("Passed", repr(text)) else: print("Failed", repr(text), "expected", matches, "matched", _matches) all_ok = all_ok and test_passed # Tests for non western characters, should fail with # pyparsing.exceptions.ParseException under the previous # configuration non_western_exprs = { "0": "*", "1": "ヿ", # Edge character "2": "亀", # Character in CJK block "3": "ヿ or 亀", "4": "ヿ and 亀", "5": "not ヿ" } non_western_texts_matcheswith = { "안녕하세요, 당신은 어떠세요?": ["0", "5"], "ヿ": ["0", "1", "3"], "亀": ["0", "2", "3", "5"], "亀 ヿ": ["0", "1", "2", "3", "4"], } for text, matches in non_western_texts_matcheswith.items(): _matches = [] for _id, expr in non_western_exprs.items(): if self.match(text, expr): _matches.append(_id) test_passed = sorted(matches) == sorted(_matches) if test_passed: print("Passed", repr(text)) else: print("Failed", repr(text), "expected", matches, "matched", _matches) all_ok = all_ok and test_passed return all_ok def main(): if ParserTest().Test(): print("All tests OK") else: print("One or more tests FAILED") raise Exception("One or more tests FAILED") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/btpyparse.py0000644000000000000000000001024114412577542014670 0ustar00""" Pyparsing parser for BibTeX files A standalone parser using pyparsing. pyparsing has a simple and expressive syntax so the grammar is easy to read and write. Submitted by Matthew Brett, 2010 Simplified BSD license """ from pyparsing import ( Regex, Suppress, ZeroOrMore, Group, Optional, Forward, SkipTo, CaselessLiteral, Dict, ) class Macro: """Class to encapsulate undefined macro references""" def __init__(self, name): self.name = name def __repr__(self): return 'Macro("%s")' % self.name def __eq__(self, other): return self.name == other.name # Character literals LCURLY, RCURLY, LPAREN, RPAREN, QUOTE, COMMA, AT, EQUALS, HASH = map( Suppress, '{}()",@=#' ) def bracketed(expr): """Return matcher for `expr` between curly brackets or parentheses""" return (LPAREN + expr + RPAREN) | (LCURLY + expr + RCURLY) # Define parser components for strings (the hard bit) chars_no_curly = Regex(r"[^{}]+") chars_no_curly.leaveWhitespace() chars_no_quotecurly = Regex(r'[^"{}]+') chars_no_quotecurly.leaveWhitespace() # Curly string is some stuff without curlies, or nested curly sequences curly_string = Forward() curly_item = Group(curly_string) | chars_no_curly curly_string << LCURLY + ZeroOrMore(curly_item) + RCURLY # quoted string is either just stuff within quotes, or stuff within quotes, within # which there is nested curliness quoted_item = Group(curly_string) | chars_no_quotecurly quoted_string = QUOTE + ZeroOrMore(quoted_item) + QUOTE # Numbers can just be numbers. Only integers though. number = Regex("[0-9]+") # Basis characters (by exclusion) for variable / field names. The following # list of characters is from the btparse documentation any_name = Regex("[^\\s\"#%'(),={}]+") # btparse says, and the test bibs show by experiment, that macro and field names # cannot start with a digit. In fact entry type names cannot start with a digit # either (see tests/bibs). Cite keys can start with a digit not_digname = Regex("[^\\d\\s\"#%'(),={}][^\\s\"#%'(),={}]*") # Comment comments out to end of line comment = AT + CaselessLiteral("comment") + Regex(r"[\s{(].*").leaveWhitespace() # The name types with their digiteyness not_dig_lower = not_digname.copy().setParseAction(lambda t: t[0].lower()) macro_def = not_dig_lower.copy() macro_ref = not_dig_lower.copy().setParseAction(lambda t: Macro(t[0].lower())) field_name = not_dig_lower.copy() # Spaces in names mean they cannot clash with field names entry_type = not_dig_lower("entry_type") cite_key = any_name("cite_key") # Number has to be before macro name string = number | macro_ref | quoted_string | curly_string # There can be hash concatenation field_value = string + ZeroOrMore(HASH + string) field_def = Group(field_name + EQUALS + field_value) entry_contents = Dict(ZeroOrMore(field_def + COMMA) + Optional(field_def)) # Entry is surrounded either by parentheses or curlies entry = AT + entry_type + bracketed(cite_key + COMMA + entry_contents) # Preamble is a macro-like thing with no name preamble = AT + CaselessLiteral("preamble") + bracketed(field_value) # Macros (aka strings) macro_contents = macro_def + EQUALS + field_value macro = AT + CaselessLiteral("string") + bracketed(macro_contents) # Implicit comments icomment = SkipTo("@").setParseAction(lambda t: t.insert(0, "icomment")) # entries are last in the list (other than the fallback) because they have # arbitrary start patterns that would match comments, preamble or macro definitions = Group(comment | preamble | macro | entry | icomment) # Start symbol bibfile = ZeroOrMore(definitions) def parse_str(str): return bibfile.parseString(str) if __name__ == "__main__": # Run basic test txt = """ Some introductory text (implicit comment) @ARTICLE{Authors2011, author = {First Author and Second Author and Third Author}, title = {An article about {S}omething}, journal = "Journal of Articles", year = {2011}, volume = {16}, pages = {1140--1141}, number = {2} } """ print("\n\n".join(defn.dump() for defn in parse_str(txt))) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/builtin_parse_action_demo.py0000644000000000000000000000146114443622312020052 0ustar00# # builtin_parse_action_demo.py # Copyright, 2012 - Paul McGuire # # Simple example of using builtin functions as parse actions. # import pyparsing as pp ppc = pp.common # make an expression that will match a list of ints (which # will be converted to actual ints by the parse action attached # to integer) nums = ppc.integer[...] test = "2 54 34 2 211 66 43 2 0" print(test) # try each of these builtins as parse actions for fn in (sum, max, min, len, sorted, reversed, list, tuple, set, any, all): if fn is reversed: # reversed returns an iterator, we really want to show the list of items fn = lambda x: list(reversed(x)) # show how each builtin works as a free-standing parse action print(fn.__name__, nums.set_parse_action(fn).parse_string(test)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/cLibHeader.py0000644000000000000000000000152514412577542014646 0ustar00# # cLibHeader.py # # A simple parser to extract API doc info from a C header file # # Copyright, 2012 - Paul McGuire # from pyparsing import ( Word, alphas, alphanums, Combine, oneOf, Optional, delimitedList, Group, Keyword, ) testdata = """ int func1(float *vec, int len, double arg1); int func2(float **arr, float *vec, int len, double arg1, double arg2); """ ident = Word(alphas, alphanums + "_") vartype = Combine(oneOf("float double int char") + Optional(Word("*")), adjacent=False) arglist = delimitedList(Group(vartype("type") + ident("name"))) functionCall = Keyword("int") + ident("name") + "(" + arglist("args") + ")" + ";" for fn, s, e in functionCall.scanString(testdata): print(fn.name) for a in fn.args: print(" - %(name)s (%(type)s)" % a) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/chemical_formulas.py0000644000000000000000000000573314443622312016334 0ustar00# # chemicalFormulas.py # # Copyright (c) 2003,2019 Paul McGuire # import pyparsing as pp atomic_weight = { "O": 15.9994, "H": 1.00794, "Na": 22.9897, "Cl": 35.4527, "C": 12.0107, } digits = "0123456789" # Version 1 element = pp.Word(pp.alphas.upper(), pp.alphas.lower(), max=2).set_name("element") # for stricter matching, use this Regex instead # element = Regex("A[cglmrstu]|B[aehikr]?|C[adeflmorsu]?|D[bsy]|" # "E[rsu]|F[emr]?|G[ade]|H[efgos]?|I[nr]?|Kr?|L[airu]|" # "M[dgnot]|N[abdeiop]?|Os?|P[abdmortu]?|R[abefghnu]|" # "S[bcegimnr]?|T[abcehilm]|U(u[bhopqst])?|V|W|Xe|Yb?|Z[nr]") element_ref = pp.Group(element + pp.Opt(pp.Word(digits), default="1")) formula = element_ref[...] def sum_atomic_weights(element_list): return sum(atomic_weight[elem] * int(qty) for elem, qty in element_list) formula.run_tests( """\ H2O C6H5OH NaCl """, full_dump=False, post_parse=lambda _, tokens: f"Molecular weight: {sum_atomic_weights(tokens)}", ) print() # Version 2 - access parsed items by results name element_ref = pp.Group( element("symbol") + pp.Opt(pp.Word(digits), default="1")("qty") ) formula = element_ref[...] def sum_atomic_weights_by_results_name(element_list): return sum(atomic_weight[elem.symbol] * int(elem.qty) for elem in element_list) formula.run_tests( """\ H2O C6H5OH NaCl """, full_dump=False, post_parse=lambda _, tokens: f"Molecular weight: {sum_atomic_weights_by_results_name(tokens)}", ) print() # Version 3 - convert integers during parsing process integer = pp.Word(digits).set_name("integer") integer.add_parse_action(lambda t: int(t[0])) element_ref = pp.Group(element("symbol") + pp.Opt(integer, default=1)("qty")) formula = element_ref[...].set_name("chemical_formula") def sum_atomic_weights_by_results_name_with_converted_ints(element_list): return sum(atomic_weight[elem.symbol] * int(elem.qty) for elem in element_list) formula.run_tests( """\ H2O C6H5OH NaCl """, full_dump=False, post_parse=lambda _, tokens: f"Molecular weight: {sum_atomic_weights_by_results_name_with_converted_ints(tokens)}", ) print() # Version 4 - parse and convert integers as subscript digits subscript_digits = "₀₁₂₃₄₅₆₇₈₉" subscript_int_map = {e[1]: e[0] for e in enumerate(subscript_digits)} def cvt_subscript_int(s): ret = 0 for c in s[0]: ret = ret * 10 + subscript_int_map[c] return ret subscript_int = pp.Word(subscript_digits).set_name("subscript") subscript_int.add_parse_action(cvt_subscript_int) element_ref = pp.Group(element("symbol") + pp.Opt(subscript_int, default=1)("qty")) formula = element_ref[1, ...].set_name("chemical_formula") formula.run_tests( """\ H₂O C₆H₅OH NaCl """, full_dump=False, post_parse=lambda _, tokens: f"Molecular weight: {sum_atomic_weights_by_results_name_with_converted_ints(tokens)}", ) print() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/commasep.py0000644000000000000000000000130314412577542014462 0ustar00# commasep.py # # comma-separated list example, to illustrate the advantages of using # the pyparsing comma_separated_list as opposed to string.split(","): # - leading and trailing whitespace is implicitly trimmed from list elements # - list elements can be quoted strings, which can safely contain commas without breaking # into separate elements # # Copyright (c) 2004-2016, Paul McGuire # import pyparsing as pp ppc = pp.pyparsing_common testData = [ "a,b,c,100.2,,3", "d, e, j k , m ", "'Hello, World', f, g , , 5.1,x", "John Doe, 123 Main St., Cleveland, Ohio", "Jane Doe, 456 St. James St., Los Angeles , California ", "", ] ppc.comma_separated_list.runTests(testData) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/configParse.py0000644000000000000000000000377314412577542015133 0ustar00# # configparse.py # # an example of using the parsing module to be able to process a .INI configuration file # # Copyright (c) 2003, Paul McGuire # from pyparsing import ( Literal, Word, ZeroOrMore, Group, Dict, Optional, printables, ParseException, restOfLine, empty, ) import pprint inibnf = None def inifile_BNF(): global inibnf if not inibnf: # punctuation lbrack = Literal("[").suppress() rbrack = Literal("]").suppress() equals = Literal("=").suppress() semi = Literal(";") comment = semi + Optional(restOfLine) nonrbrack = "".join([c for c in printables if c != "]"]) + " \t" nonequals = "".join([c for c in printables if c != "="]) + " \t" sectionDef = lbrack + Word(nonrbrack) + rbrack keyDef = ~lbrack + Word(nonequals) + equals + empty + restOfLine # strip any leading or trailing blanks from key def stripKey(tokens): tokens[0] = tokens[0].strip() keyDef.setParseAction(stripKey) # using Dict will allow retrieval of named data fields as attributes of the parsed results inibnf = Dict(ZeroOrMore(Group(sectionDef + Dict(ZeroOrMore(Group(keyDef)))))) inibnf.ignore(comment) return inibnf pp = pprint.PrettyPrinter(2) def test(strng): print(strng) try: iniFile = open(strng) iniData = "".join(iniFile.readlines()) bnf = inifile_BNF() tokens = bnf.parseString(iniData) pp.pprint(tokens.asList()) except ParseException as err: print(err.line) print(" " * (err.column - 1) + "^") print(err) iniFile.close() print() return tokens if __name__ == "__main__": ini = test("setup.ini") print("ini['Startup']['modemid'] =", ini["Startup"]["modemid"]) print("ini.Startup =", ini.Startup) print("ini.Startup.modemid =", ini.Startup.modemid) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/cpp_enum_parser.py0000644000000000000000000000256314443622312016037 0ustar00# # cpp_enum_parser.py # # Posted by Mark Tolonen on comp.lang.python in August, 2009, # Used with permission. # # Parser that scans through C or C++ code for enum definitions, and # generates corresponding Python constant definitions. # # import pyparsing as pp # sample string with enums and other stuff sample = """ stuff before enum hello { Zero, One, Two, Three, Five=5, Six, Ten=10 }; in the middle enum blah { alpha, beta, gamma = 10 , zeta = 50 }; at the end """ # syntax we don't want to see in the final parse tree LBRACE, RBRACE, EQ, COMMA = pp.Suppress.using_each("{}=,") _enum = pp.Suppress("enum") identifier = pp.Word(pp.alphas + "_", pp.alphanums + "_") integer = pp.Word(pp.nums) enumValue = pp.Group(identifier("name") + pp.Optional(EQ + integer("value"))) enumList = pp.Group(enumValue + (COMMA + enumValue)[...]) enum = _enum + identifier("enum") + LBRACE + enumList("names") + RBRACE # find instances of enums ignoring other syntax for item, start, stop in enum.scan_string(sample): idx = 0 for entry in item.names: if entry.value != "": idx = int(entry.value) print("%s_%s = %d" % (item.enum.upper(), entry.name.upper(), idx)) idx += 1 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/cuneiform_python.py0000644000000000000000000000511114412577542016247 0ustar00# # cuneiform_python.py # # Example showing how to create a custom Unicode set for parsing # # Copyright Paul McGuire, 2021 # from typing import List, Tuple import pyparsing as pp class Cuneiform(pp.unicode_set): """Unicode set for Cuneiform Character Range""" _ranges: List[Tuple[int, ...]] = [ (0x10380, 0x103d5), (0x12000, 0x123FF), (0x12400, 0x1247F), ] # list out all valid identifier characters # print(Cuneiform.identchars) """ Simple Cuneiform Python language transformer Define Cuneiform "words" print: 𒄑𒉿𒅔𒋫 hello: 𒀄𒂖𒆷𒁎 world: 𒍟𒁎𒉿𒆷𒀳 def: 𒁴𒈫 """ # uncomment to show parse-time debugging # pp.enable_diag(pp.Diagnostics.enable_debug_on_named_expressions) # define a MINIMAL Python parser LPAR, RPAR, COLON, EQ = map(pp.Suppress, "():=") def_ = pp.Keyword("𒁴𒈫", ident_chars=Cuneiform.identbodychars).set_name("def") any_keyword = def_ ident = (~any_keyword) + pp.Word( Cuneiform.identchars, Cuneiform.identbodychars, asKeyword=True ) str_expr = pp.infix_notation( pp.QuotedString('"') | pp.common.integer, [ ("*", 2, pp.OpAssoc.LEFT), ("+", 2, pp.OpAssoc.LEFT), ], ) rvalue = pp.Forward() fn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name("fn_call") rvalue <<= fn_call | ident | str_expr | pp.common.number assignment_stmt = ident + EQ + rvalue stmt = pp.Group(fn_call | assignment_stmt).set_name("stmt") fn_def = pp.Group( def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR) + COLON ).set_name("fn_def") fn_body = pp.IndentedBlock(stmt).set_name("fn_body") fn_expr = pp.Group(fn_def + pp.Group(fn_body)) script = fn_expr[...] + stmt[...] # parse some Python written in Cuneiform cuneiform_hello_world = r""" 𒁴𒈫 𒀄𒂖𒆷𒁎(): 𒀁 = "𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\n" * 3 𒄑𒉿𒅔𒋫(𒀁) 𒀄𒂖𒆷𒁎()""" script.parseString(cuneiform_hello_world).pprint(width=40) # use transform_string to convert keywords and builtins to runnable Python names_map = { "𒄑𒉿𒅔𒋫": "print", } ident.add_parse_action(lambda t: names_map.get(t[0], t[0])) def_.add_parse_action(lambda: "def") print("\nconvert Cuneiform Python to executable Python") transformed = ( # always put ident last (def_ | ident) .ignore(pp.quoted_string) .transform_string(cuneiform_hello_world) .strip() ) print( "=================\n" + cuneiform_hello_world.strip() + "\n=================\n" + transformed + "\n=================\n" ) print("# run transformed Python") exec(transformed) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/datetime_parse_actions.py0000644000000000000000000000374514443622312017366 0ustar00# parseActions.py # # A sample program a parser to match a date string of the form "YYYY/MM/DD", # and return it as a datetime, or raise an exception if not a valid date. # # Copyright 2012, Paul T. McGuire # from datetime import datetime import pyparsing as pp from pyparsing import pyparsing_common as ppc # define an integer string, and a parse action to convert it # to an integer at parse time integer = pp.Word(pp.nums).set_name("integer") def convert_to_int(tokens): # no need to test for validity - we can't get here # unless tokens[0] contains all numeric digits return int(tokens[0]) integer.set_parse_action(convert_to_int) # or can be written as one line as # integer = Word(nums).set_parse_action(lambda t: int(t[0])) # define a pattern for a year/month/day date date_expr = integer("year") + "/" + integer("month") + "/" + integer("day") date_expr.ignore(pp.python_style_comment) def convert_to_datetime(s, loc, tokens): try: # note that the year, month, and day fields were already # converted to ints from strings by the parse action defined # on the integer expression above return datetime(tokens.year, tokens.month, tokens.day).date() except Exception as ve: errmsg = "'%s/%s/%s' is not a valid date, %s" % ( tokens.year, tokens.month, tokens.day, ve, ) raise pp.ParseException(s, loc, errmsg) date_expr.set_parse_action(convert_to_datetime) date_expr.run_tests( """\ 2000/1/1 # invalid month 2000/13/1 # 1900 was not a leap year 1900/2/29 # but 2000 was 2000/2/29 """ ) # if dates conform to ISO8601, use definitions in pyparsing_common date_expr = ppc.iso8601_date.set_parse_action(ppc.convert_to_date()) date_expr.ignore(pp.python_style_comment) date_expr.run_tests( """\ 2000-01-01 # invalid month 2000-13-01 # 1900 was not a leap year 1900-02-29 # but 2000 was 2000-02-29 """ ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/decaf_parser.py0000644000000000000000000001664714412577542015315 0ustar00# # decaf_parser.py # # Rudimentary parser for decaf language, used in Stanford University CS143 # (https://web.stanford.edu/class/archive/cs/cs143/cs143.1128/handouts/030%20Decaf%20Specification.pdf) # # To convert this parser into one that gives more of an AST, change all the Group wrappers to add parse # actions that will result in ASTNode classes, or statement-specific subclasses. # # Copyright 2018, Paul McGuire # """ Program ::= Decl+ Decl ::= VariableDecl | FunctionDecl | ClassDecl | InterfaceDecl VariableDecl ::= Variable ; Variable ::= Type ident Type ::= int | double | bool | string | ident | Type [] FunctionDecl ::= Type ident ( Formals ) StmtBlock | void ident ( Formals ) StmtBlock Formals ::= Variable+, | e ClassDecl ::= class ident { Field* } Field ::= VariableDecl | FunctionDecl InterfaceDecl ::= interface ident { Prototype* } Prototype ::= Type ident ( Formals ) ; | void ident ( Formals ) ; StmtBlock ::= { VariableDecl* Stmt* } Stmt ::= ; | IfStmt | WhileStmt | ForStmt | BreakStmt | ReturnStmt | PrintStmt | StmtBlock IfStmt ::= if ( Expr ) Stmt WhileStmt ::= while ( Expr ) Stmt ForStmt ::= for ( ; Expr ; ) Stmt ReturnStmt ::= return ; BreakStmt ::= break ; PrintStmt ::= Print ( Expr+, ) ; Expr ::= LValue = Expr | Constant | LValue | this | Call | ( Expr ) | Expr + Expr | Expr - Expr | Expr * Expr | Expr / Expr | Expr % Expr | - Expr | Expr < Expr | Expr <= Expr | Expr > Expr | Expr >= Expr | Expr == Expr | Expr != Expr | Expr && Expr | Expr || Expr | ! Expr | ReadInteger ( ) | ReadLine ( ) | new ident | NewArray ( Expr , Typev) LValue ::= ident | Expr . ident | Expr [ Expr ] Call ::= ident ( Actuals ) | Expr . ident ( Actuals ) Actuals ::= Expr+, | e Constant ::= intConstant | doubleConstant | boolConstant | stringConstant | null """ import pyparsing as pp from pyparsing import pyparsing_common as ppc pp.ParserElement.enablePackrat() # keywords keywords = ( VOID, INT, DOUBLE, BOOL, STRING, CLASS, INTERFACE, NULL, THIS, EXTENDS, IMPLEMENTS, FOR, WHILE, IF, ELSE, RETURN, BREAK, NEW, NEWARRAY, PRINT, READINTEGER, READLINE, TRUE, FALSE, ) = map( pp.Keyword, """void int double bool string class interface null this extends implements or while if else return break new NewArray Print ReadInteger ReadLine true false""".split(), ) keywords = pp.MatchFirst(list(keywords)) LPAR, RPAR, LBRACE, RBRACE, LBRACK, RBRACK, DOT, EQ, COMMA, SEMI = map( pp.Suppress, "(){}[].=,;" ) hexConstant = pp.Regex(r"0[xX][0-9a-fA-F]+").addParseAction(lambda t: int(t[0][2:], 16)) intConstant = hexConstant | ppc.integer doubleConstant = ppc.real boolConstant = TRUE | FALSE stringConstant = pp.dblQuotedString null = NULL constant = doubleConstant | boolConstant | intConstant | stringConstant | null ident = ~keywords + pp.Word(pp.alphas, pp.alphanums + "_") type_ = pp.Group((INT | DOUBLE | BOOL | STRING | ident) + pp.ZeroOrMore("[]")) variable = type_ + ident variable_decl = variable + SEMI expr = pp.Forward() expr_parens = pp.Group(LPAR + expr + RPAR) actuals = pp.Optional(pp.delimitedList(expr)) call = pp.Group( ident("call_ident") + LPAR + actuals("call_args") + RPAR | (expr_parens + pp.ZeroOrMore(DOT + ident))("call_ident_expr") + LPAR + actuals("call_args") + RPAR ) lvalue = ( (ident | expr_parens) + pp.ZeroOrMore(DOT + (ident | expr_parens)) + pp.ZeroOrMore(LBRACK + expr + RBRACK) ) assignment = pp.Group(lvalue("lhs") + EQ + expr("rhs")) read_integer = pp.Group(READINTEGER + LPAR + RPAR) read_line = pp.Group(READLINE + LPAR + RPAR) new_statement = pp.Group(NEW + ident) new_array = pp.Group(NEWARRAY + LPAR + expr + COMMA + type_ + RPAR) rvalue = constant | call | read_integer | read_line | new_statement | new_array | ident arith_expr = pp.infixNotation( rvalue, [ ( "-", 1, pp.opAssoc.RIGHT, ), ( pp.oneOf("* / %"), 2, pp.opAssoc.LEFT, ), ( pp.oneOf("+ -"), 2, pp.opAssoc.LEFT, ), ], ) comparison_expr = pp.infixNotation( arith_expr, [ ( "!", 1, pp.opAssoc.RIGHT, ), ( pp.oneOf("< > <= >="), 2, pp.opAssoc.LEFT, ), ( pp.oneOf("== !="), 2, pp.opAssoc.LEFT, ), ( pp.oneOf("&&"), 2, pp.opAssoc.LEFT, ), ( pp.oneOf("||"), 2, pp.opAssoc.LEFT, ), ], ) expr <<= ( assignment | call | THIS | comparison_expr | arith_expr | lvalue | constant | read_integer | read_line | new_statement | new_array ) stmt = pp.Forward() print_stmt = pp.Group( PRINT("statement") + LPAR + pp.Group(pp.Optional(pp.delimitedList(expr)))("args") + RPAR + SEMI ) break_stmt = pp.Group(BREAK("statement") + SEMI) return_stmt = pp.Group(RETURN("statement") + expr + SEMI) for_stmt = pp.Group( FOR("statement") + LPAR + pp.Optional(expr) + SEMI + expr + SEMI + pp.Optional(expr) + RPAR + stmt ) while_stmt = pp.Group(WHILE("statement") + LPAR + expr + RPAR + stmt) if_stmt = pp.Group( IF("statement") + LPAR + pp.Group(expr)("condition") + RPAR + pp.Group(stmt)("then_statement") + pp.Group(pp.Optional(ELSE + stmt))("else_statement") ) stmt_block = pp.Group( LBRACE + pp.ZeroOrMore(variable_decl) + pp.ZeroOrMore(stmt) + RBRACE ) stmt <<= ( if_stmt | while_stmt | for_stmt | break_stmt | return_stmt | print_stmt | stmt_block | pp.Group(expr + SEMI) ) formals = pp.Optional(pp.delimitedList(variable)) prototype = pp.Group( (type_ | VOID)("return_type") + ident("function_name") + LPAR + formals("args") + RPAR + SEMI )("prototype") function_decl = pp.Group( (type_ | VOID)("return_type") + ident("function_name") + LPAR + formals("args") + RPAR + stmt_block("body") )("function_decl") interface_decl = pp.Group( INTERFACE + ident("interface_name") + LBRACE + pp.ZeroOrMore(prototype)("prototypes") + RBRACE )("interface") field = variable_decl | function_decl class_decl = pp.Group( CLASS + ident("class_name") + pp.Optional(EXTENDS + ident)("extends") + pp.Optional(IMPLEMENTS + pp.delimitedList(ident))("implements") + LBRACE + pp.ZeroOrMore(field)("fields") + RBRACE )("class_decl") decl = variable_decl | function_decl | class_decl | interface_decl | prototype program = pp.OneOrMore(pp.Group(decl)) decaf_parser = program stmt.runTests( """\ sin(30); a = 1; b = 1 + 1; b = 1 != 2 && false; print("A"); a.b = 100; a.b = 100.0; a[100] = b; a[0][0] = 2; a = 0x1234; """ ) test_program = """ void getenv(string var); int main(string[] args) { if (a > 100) { Print(a, " is too big"); } else if (a < 100) { Print(a, " is too small"); } else { Print(a, "just right!"); } } """ print(decaf_parser.parseString(test_program).dump()) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/delta_time.py0000644000000000000000000003752114443622312014766 0ustar00# deltaTime.py # # Parser to convert a conversational time reference such as "in a minute" or # "noon tomorrow" and convert it to a Python datetime. The returned # ParseResults object contains # - original - the original time expression string # - computed_dt - the Python datetime representing the computed time # - relative_to - the reference "now" time # - time_offset - the difference between the reference time and the computed time # # BNF: # time_and_day ::= time_reference [day_reference] | day_reference 'at' absolute_time_of_day # day_reference ::= absolute_day_reference | relative_day_reference # absolute_day_reference ::= 'today' | 'tomorrow' | 'yesterday' | ('next' | 'last') weekday_name # relative_day_reference ::= 'in' qty day_units # | qty day_units 'ago' # | 'qty day_units ('from' | 'before' | 'after') absolute_day_reference # day_units ::= 'days' | 'weeks' # # time_reference ::= absolute_time_of_day | relative_time_reference # relative_time_reference ::= qty time_units ('from' | 'before' | 'after') absolute_time_of_day # | qty time_units 'ago' # | 'in' qty time_units # time_units ::= 'hours' | 'minutes' | 'seconds' # absolute_time_of_day ::= 'noon' | 'midnight' | 'now' | absolute_time # absolute_time ::= 24hour_time | hour ("o'clock" | ':' minute) ('AM'|'PM') # # qty ::= integer | integer_words | 'a couple of' | 'a' | 'the' # # Copyright 2010, 2019 by Paul McGuire # from datetime import datetime, time, timedelta import pyparsing as pp import calendar __all__ = ["time_expression"] # basic grammar definitions def make_integer_word_expr(int_name, int_value): return pp.CaselessKeyword(int_name).add_parse_action(pp.replaceWith(int_value)) integer_word = pp.MatchFirst( make_integer_word_expr(int_str, int_value) for int_value, int_str in enumerate( "one two three four five six seven eight nine ten" " eleven twelve thirteen fourteen fifteen sixteen" " seventeen eighteen nineteen twenty".split(), start=1, ) ).set_name("integer_word") integer = pp.pyparsing_common.integer | integer_word integer.set_name("numeric") CK = pp.CaselessKeyword CL = pp.CaselessLiteral today, tomorrow, yesterday, noon, midnight, now = CK.using_each( "today tomorrow yesterday noon midnight now".split() ) def plural(s): return CK(s) | CK(s + "s").add_parse_action(pp.replaceWith(s)) week, day, hour, minute, second = map(plural, "week day hour minute second".split()) time_units = hour | minute | second any_time_units = (week | day | time_units).set_name("any_time_units") am = CL("am") pm = CL("pm") COLON = pp.Suppress(":") in_ = CK("in").set_parse_action(pp.replaceWith(1)) from_ = CK("from").set_parse_action(pp.replaceWith(1)) before = CK("before").set_parse_action(pp.replaceWith(-1)) after = CK("after").set_parse_action(pp.replaceWith(1)) ago = CK("ago").set_parse_action(pp.replaceWith(-1)) next_ = CK("next").set_parse_action(pp.replaceWith(1)) last_ = CK("last").set_parse_action(pp.replaceWith(-1)) at_ = CK("at") on_ = CK("on") couple = ( (pp.Opt(CK("a")) + CK("couple") + pp.Opt(CK("of"))) .set_parse_action(pp.replaceWith(2)) .set_name("couple") ) a_qty = (CK("a") | CK("an")).set_parse_action(pp.replaceWith(1)) the_qty = CK("the").set_parse_action(pp.replaceWith(1)) qty = pp.ungroup( (integer | couple | a_qty | the_qty).set_name("qty_expression") ).set_name("qty") time_ref_present = pp.Empty().add_parse_action(pp.replace_with(True))( "time_ref_present" ) def fill_24hr_time_fields(t): t["HH"] = t[0] t["MM"] = t[1] t["SS"] = 0 t["ampm"] = ("am", "pm")[t.HH >= 12] def fill_default_time_fields(t): for fld in "HH MM SS".split(): if fld not in t: t[fld] = 0 # get weekday names from the calendar module weekday_name_list = list(calendar.day_name) weekday_name = pp.one_of(weekday_name_list).set_name("weekday_name") # expressions for military 2400 time _24hour_time = ~(pp.Word(pp.nums) + any_time_units).set_name("numbered_time_units") + pp.Word( pp.nums, exact=4, as_keyword=True ).set_name("HHMM").add_parse_action( lambda t: [int(t[0][:2]), int(t[0][2:])], fill_24hr_time_fields ) _24hour_time.set_name("0000 time") ampm = am | pm timespec = ( integer("HH") + pp.Opt(CK("o'clock") | COLON + integer("MM") + pp.Opt(COLON + integer("SS"))) + (am | pm)("ampm") ).add_parse_action(fill_default_time_fields) absolute_time = _24hour_time | timespec absolute_time.set_name("absolute time") absolute_time_of_day = noon | midnight | now | absolute_time absolute_time_of_day.set_name("time of day") def add_computed_time(t): if t[0] in "now noon midnight".split(): t["computed_time"] = { "now": datetime.now().time().replace(microsecond=0), "noon": time(hour=12), "midnight": time(), }[t[0]] else: t["HH"] = {"am": int(t["HH"]) % 12, "pm": int(t["HH"]) % 12 + 12}[t.ampm] t["computed_time"] = time(hour=t.HH, minute=t.MM, second=t.SS) absolute_time_of_day.add_parse_action(add_computed_time) # relative_time_reference ::= qty time_units ('ago' | ('from' | 'before' | 'after') absolute_time_of_day) # | 'in' qty time_units time_units = (hour | minute | second).set_name("time unit") relative_time_reference = ( ( qty("qty") + time_units("units") + ( ago("dir") | (from_ | before | after)("dir") + pp.Group(absolute_time_of_day)("ref_time") ) ) | in_("dir") + qty("qty") + time_units("units") ).set_name("relative time") def compute_relative_time(t): if "ref_time" not in t: t["ref_time"] = datetime.now().time().replace(microsecond=0) else: t["ref_time"] = t.ref_time.computed_time delta_seconds = {"hour": 3600, "minute": 60, "second": 1}[t.units] * t.qty t["time_delta"] = timedelta(seconds=t.dir * delta_seconds) relative_time_reference.add_parse_action(compute_relative_time) time_reference = absolute_time_of_day | relative_time_reference time_reference.set_name("time reference") def add_default_time_ref_fields(t): if "time_delta" not in t: t["time_delta"] = timedelta() time_reference.add_parse_action(add_default_time_ref_fields) # absolute_day_reference ::= 'today' | 'tomorrow' | 'yesterday' | ('next' | 'last') weekday_name # day_units ::= 'days' | 'weeks' day_units = day | week weekday_reference = pp.Opt(next_ | last_, 1)("dir") + weekday_name("day_name") def convert_abs_day_reference_to_date(t): now = datetime.now().replace(microsecond=0) # handle day reference by weekday name if "day_name" in t: todaynum = now.weekday() daynames = [n.lower() for n in weekday_name_list] nameddaynum = daynames.index(t.day_name.lower()) # compute difference in days - if current weekday name is referenced, then # computed 0 offset is changed to 7 if t.dir > 0: daydiff = (nameddaynum + 7 - todaynum) % 7 or 7 else: daydiff = -((todaynum + 7 - nameddaynum) % 7 or 7) t["abs_date"] = datetime(now.year, now.month, now.day) + timedelta(daydiff) else: name = t[0] t["abs_date"] = { "now": now, "today": datetime(now.year, now.month, now.day), "yesterday": datetime(now.year, now.month, now.day) + timedelta(days=-1), "tomorrow": datetime(now.year, now.month, now.day) + timedelta(days=+1), }[name] absolute_day_reference = ( today | tomorrow | yesterday | now + time_ref_present | weekday_reference ) absolute_day_reference.add_parse_action(convert_abs_day_reference_to_date) absolute_day_reference.set_name("absolute day") # relative_day_reference ::= 'in' qty day_units # | qty day_units # ('ago' # | ('from' | 'before' | 'after') absolute_day_reference) relative_day_reference = in_("dir") + qty("qty") + day_units("units") | qty( "qty" ) + day_units("units") + ( ago("dir") | ((from_ | before | after)("dir") + absolute_day_reference("ref_day")) ) relative_day_reference.set_name("relative day") def compute_relative_date(t): now = datetime.now().replace(microsecond=0) if "ref_day" in t: t["computed_date"] = t.ref_day else: t["computed_date"] = now.date() day_diff = t.dir * t.qty * {"week": 7, "day": 1}[t.units] t["date_delta"] = timedelta(days=day_diff) relative_day_reference.add_parse_action(compute_relative_date) # combine expressions for absolute and relative day references day_reference = relative_day_reference | absolute_day_reference day_reference.set_name("day reference") def add_default_date_fields(t): if "date_delta" not in t: t["date_delta"] = timedelta() day_reference.add_parse_action(add_default_date_fields) # combine date and time expressions into single overall parser time_and_day = time_reference + time_ref_present + pp.Opt( pp.Opt(on_) + day_reference ) | day_reference + pp.Opt(at_ + absolute_time_of_day + time_ref_present) time_and_day.set_name("time and day") # parse actions for total time_and_day expression def save_original_string(s, l, t): # save original input string and reference time t["original"] = " ".join(s.strip().split()) t["relative_to"] = datetime.now().replace(microsecond=0) def compute_timestamp(t): # accumulate values from parsed time and day subexpressions - fill in defaults for omitted parts now = datetime.now().replace(microsecond=0) if "computed_time" not in t: t["computed_time"] = t.ref_time or now.time() if "abs_date" not in t: t["abs_date"] = now # roll up all fields and apply any time or day deltas t["computed_dt"] = ( t.abs_date.replace( hour=t.computed_time.hour, minute=t.computed_time.minute, second=t.computed_time.second, ) + (t.time_delta or timedelta(0)) + (t.date_delta or timedelta(0)) ) # if time just given in terms of day expressions, zero out time fields if not t.time_ref_present: t["computed_dt"] = t.computed_dt.replace(hour=0, minute=0, second=0) # add results name compatible with previous version t["calculatedTime"] = t.computed_dt # add time_offset fields t["time_offset"] = t.computed_dt - t.relative_to def remove_temp_keys(t): # strip out keys that are just used internally all_keys = list(t.keys()) for k in all_keys: if k not in ( "computed_dt", "original", "relative_to", "time_offset", "calculatedTime", ): del t[k] time_and_day.add_parse_action(save_original_string, compute_timestamp, remove_temp_keys) time_expression = time_and_day # fmt: off def main(): current_time = datetime.now() # test grammar tests = """\ today tomorrow yesterday the day before yesterday the day after tomorrow 2 weeks after today in a couple of days a couple of days from now a couple of days from today in a day 3 days ago 3 days from now a day ago an hour ago in 2 weeks in 3 days at 5pm now 10 minutes ago 10 minutes from now in 10 minutes in a minute in a couple of minutes 20 seconds ago in 30 seconds in an hour in a couple hours in a couple days 20 seconds before noon ten seconds before noon tomorrow noon midnight noon tomorrow 6am tomorrow 0800 yesterday 1700 tomorrow 12:15 AM today 3pm 2 days from today a week from today a week from now three weeks ago noon next Sunday noon Sunday noon last Sunday 2pm next Sunday next Sunday at 2pm last Sunday at 2pm 10 seconds ago 100 seconds ago 1000 seconds ago 10000 seconds ago """ time_of_day = timedelta( hours=current_time.hour, minutes=current_time.minute, seconds=current_time.second, ) expected = { "now": timedelta(0), "10 seconds ago": timedelta(seconds=-10), "100 seconds ago": timedelta(seconds=-100), "1000 seconds ago": timedelta(seconds=-1000), "10000 seconds ago": timedelta(seconds=-10000), "10 minutes ago": timedelta(minutes=-10), "10 minutes from now": timedelta(minutes=10), "in 10 minutes": timedelta(minutes=10), "in a minute": timedelta(minutes=1), "in a couple of minutes": timedelta(minutes=2), "20 seconds ago": timedelta(seconds=-20), "in 30 seconds": timedelta(seconds=30), "in an hour": timedelta(hours=1), "in a couple hours": timedelta(hours=2), "a week from now": timedelta(days=7), "3 days from now": timedelta(days=3), "a couple of days from now": timedelta(days=2), "an hour ago": timedelta(hours=-1), "in a couple days": timedelta(days=2) - time_of_day, "a week from today": timedelta(days=7) - time_of_day, "three weeks ago": timedelta(days=-21) - time_of_day, "a day ago": timedelta(days=-1) - time_of_day, "in a couple of days": timedelta(days=2) - time_of_day, "a couple of days from today": timedelta(days=2) - time_of_day, "2 weeks after today": timedelta(days=14) - time_of_day, "in 2 weeks": timedelta(days=14) - time_of_day, "the day after tomorrow": timedelta(days=2) - time_of_day, "tomorrow": timedelta(days=1) - time_of_day, "the day before yesterday": timedelta(days=-2) - time_of_day, "8am the day after tomorrow": timedelta(days=+2) - time_of_day + timedelta(hours=8), "yesterday": timedelta(days=-1) - time_of_day, "today": -time_of_day, "midnight": -time_of_day, "in a day": timedelta(days=1) - time_of_day, "3 days ago": timedelta(days=-3) - time_of_day, "noon tomorrow": timedelta(days=1) - time_of_day + timedelta(hours=12), "6am tomorrow": timedelta(days=1) - time_of_day + timedelta(hours=6), "0800 yesterday": timedelta(days=-1) - time_of_day + timedelta(hours=8), "1700 tomorrow": timedelta(days=1) - time_of_day + timedelta(hours=17), "12:15 AM today": -time_of_day + timedelta(minutes=15), "3pm 2 days from today": timedelta(days=2) - time_of_day + timedelta(hours=15), "ten seconds before noon tomorrow": timedelta(days=1) - time_of_day + timedelta(hours=12) + timedelta(seconds=-10), "20 seconds before noon": -time_of_day + timedelta(hours=12) + timedelta(seconds=-20), "in 3 days at 5pm": timedelta(days=3) - time_of_day + timedelta(hours=17), } # fmt: on def verify_offset(instring, parsed): time_epsilon = timedelta(seconds=1) if instring in expected: # allow up to a second time discrepancy due to test processing time if (parsed.time_offset - expected[instring]) <= time_epsilon: parsed["verify_offset"] = "PASS" else: parsed["verify_offset"] = "FAIL" print("(relative to %s)" % datetime.now()) success, report = time_expression.runTests(tests, postParse=verify_offset) assert success fails = [] for test, rpt in report: if rpt.get("verify_offset", "PASS") != "PASS": fails.append((test, rpt)) if fails: print("\nFAILED") print("\n".join("- " + test for test, rpt in fails)) assert not fails if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/dfmparse.py0000644000000000000000000001605414412577542014470 0ustar00""" This module can parse a Delphi Form (dfm) file. The main is used in experimenting (to find which files fail to parse, and where), but isn't useful for anything else. """ __version__ = "1.0" __author__ = "Daniel 'Dang' Griffith " from pyparsing import ( Literal, CaselessLiteral, Word, delimitedList, Optional, Combine, Group, alphas, nums, alphanums, Forward, oneOf, OneOrMore, ZeroOrMore, CharsNotIn, ) # This converts DFM character constants into Python string (unicode) values. def to_chr(x): """chr(x) if 0 < x < 128 ; unicode(x) if x > 127.""" return 0 < x < 128 and chr(x) or eval("u'\\u%d'" % x) ################# # BEGIN GRAMMAR ################# COLON = Literal(":").suppress() CONCAT = Literal("+").suppress() EQUALS = Literal("=").suppress() LANGLE = Literal("<").suppress() LBRACE = Literal("[").suppress() LPAREN = Literal("(").suppress() PERIOD = Literal(".").suppress() RANGLE = Literal(">").suppress() RBRACE = Literal("]").suppress() RPAREN = Literal(")").suppress() CATEGORIES = CaselessLiteral("categories").suppress() END = CaselessLiteral("end").suppress() FONT = CaselessLiteral("font").suppress() HINT = CaselessLiteral("hint").suppress() ITEM = CaselessLiteral("item").suppress() OBJECT = CaselessLiteral("object").suppress() attribute_value_pair = Forward() # this is recursed in item_list_entry simple_identifier = Word(alphas, alphanums + "_") identifier = Combine(simple_identifier + ZeroOrMore(Literal(".") + simple_identifier)) object_name = identifier object_type = identifier # Integer and floating point values are converted to Python longs and floats, respectively. int_value = Combine(Optional("-") + Word(nums)).setParseAction( lambda s, l, t: [int(t[0])] ) float_value = Combine( Optional("-") + Optional(Word(nums)) + "." + Word(nums) ).setParseAction(lambda s, l, t: [float(t[0])]) number_value = float_value | int_value # Base16 constants are left in string form, including the surrounding braces. base16_value = Combine( Literal("{") + OneOrMore(Word("0123456789ABCDEFabcdef")) + Literal("}"), adjacent=False, ) # This is the first part of a hack to convert the various delphi partial sglQuotedStrings # into a single sglQuotedString equivalent. The gist of it is to combine # all sglQuotedStrings (with their surrounding quotes removed (suppressed)) # with sequences of #xyz character constants, with "strings" concatenated # with a '+' sign. unquoted_sglQuotedString = Combine( Literal("'").suppress() + ZeroOrMore(CharsNotIn("'\n\r")) + Literal("'").suppress() ) # The parse action on this production converts repetitions of constants into a single string. pound_char = Combine( OneOrMore( (Literal("#").suppress() + Word(nums)).setParseAction( lambda s, l, t: to_chr(int(t[0])) ) ) ) # This is the second part of the hack. It combines the various "unquoted" # partial strings into a single one. Then, the parse action puts # a single matched pair of quotes around it. delphi_string = Combine( OneOrMore(CONCAT | pound_char | unquoted_sglQuotedString), adjacent=False ).setParseAction(lambda s, l, t: "'%s'" % t[0]) string_value = delphi_string | base16_value list_value = ( LBRACE + Optional(Group(delimitedList(identifier | number_value | string_value))) + RBRACE ) paren_list_value = ( LPAREN + ZeroOrMore(identifier | number_value | string_value) + RPAREN ) item_list_entry = ITEM + ZeroOrMore(attribute_value_pair) + END item_list = LANGLE + ZeroOrMore(item_list_entry) + RANGLE generic_value = identifier value = ( item_list | number_value | string_value | list_value | paren_list_value | generic_value ) category_attribute = CATEGORIES + PERIOD + oneOf("strings itemsvisibles visibles", True) event_attribute = oneOf( "onactivate onclosequery onclose oncreate ondeactivate onhide onshow", True ) font_attribute = FONT + PERIOD + oneOf("charset color height name style", True) hint_attribute = HINT layout_attribute = oneOf("left top width height", True) generic_attribute = identifier attribute = ( category_attribute | event_attribute | font_attribute | hint_attribute | layout_attribute | generic_attribute ) category_attribute_value_pair = category_attribute + EQUALS + paren_list_value event_attribute_value_pair = event_attribute + EQUALS + value font_attribute_value_pair = font_attribute + EQUALS + value hint_attribute_value_pair = hint_attribute + EQUALS + value layout_attribute_value_pair = layout_attribute + EQUALS + value generic_attribute_value_pair = attribute + EQUALS + value attribute_value_pair << Group( category_attribute_value_pair | event_attribute_value_pair | font_attribute_value_pair | hint_attribute_value_pair | layout_attribute_value_pair | generic_attribute_value_pair ) object_declaration = Group(OBJECT + object_name + COLON + object_type) object_attributes = Group(ZeroOrMore(attribute_value_pair)) nested_object = Forward() object_definition = ( object_declaration + object_attributes + ZeroOrMore(nested_object) + END ) nested_object << Group(object_definition) ################# # END GRAMMAR ################# def printer(s, loc, tok): print(tok, end=" ") return tok def get_filename_list(tf): import sys, glob if tf == None: if len(sys.argv) > 1: tf = sys.argv[1:] else: tf = glob.glob("*.dfm") elif type(tf) == str: tf = [tf] testfiles = [] for arg in tf: testfiles.extend(glob.glob(arg)) return testfiles def main(testfiles=None, action=printer): """testfiles can be None, in which case the command line arguments are used as filenames. testfiles can be a string, in which case that file is parsed. testfiles can be a list. In all cases, the filenames will be globbed. If more than one file is parsed successfully, a dictionary of ParseResults is returned. Otherwise, a simple ParseResults is returned. """ testfiles = get_filename_list(testfiles) print(testfiles) if action: for i in (simple_identifier, value, item_list): i.setParseAction(action) success = 0 failures = [] retval = {} for f in testfiles: try: retval[f] = object_definition.parseFile(f) success += 1 except Exception: failures.append(f) if failures: print("\nfailed while processing %s" % ", ".join(failures)) print("\nsucceeded on %d of %d files" % (success, len(testfiles))) if len(retval) == 1 and len(testfiles) == 1: # if only one file is parsed, return the parseResults directly return retval[list(retval.keys())[0]] # else, return a dictionary of parseResults return retval if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/dhcpd_leases_parser.py0000644000000000000000000000575014412577542016662 0ustar00# # dhcpd_leases_parser.py # # Copyright 2008, Paul McGuire # # Sample parser to parse a dhcpd.leases file to extract leases # and lease attributes # # format ref: http://www.linuxmanpages.com/man5/dhcpd.leases.5.php # sample = r"""\ # All times in this file are in UTC (GMT), not your local timezone. This is # not a bug, so please don't ask about it. There is no portable way to # store leases in the local timezone, so please don't request this as a # feature. If this is inconvenient or confusing to you, we sincerely # apologize. Seriously, though - don't ask. # The format of this file is documented in the dhcpd.leases(5) manual page. # This lease file was written by isc-dhcp-V3.0.4 lease 192.168.0.250 { starts 3 2008/01/23 17:16:41; ends 6 2008/02/02 17:16:41; tstp 6 2008/02/02 17:16:41; binding state free; hardware ethernet 00:17:f2:9b:d8:19; uid "\001\000\027\362\233\330\031"; } lease 192.168.0.198 { starts 1 2008/02/04 13:46:55; ends never; tstp 1 2008/02/04 17:04:14; binding state free; hardware ethernet 00:13:72:d3:3b:98; uid "\001\000\023r\323;\230"; } lease 192.168.0.239 { starts 3 2008/02/06 12:12:03; ends 4 2008/02/07 12:12:03; tstp 4 2008/02/07 12:12:03; binding state free; hardware ethernet 00:1d:09:65:93:26; } """ from pyparsing import * import datetime, time LBRACE, RBRACE, SEMI, QUOTE = map(Suppress, '{};"') ipAddress = Combine(Word(nums) + ("." + Word(nums)) * 3) hexint = Word(hexnums, exact=2) macAddress = Combine(hexint + (":" + hexint) * 5) hdwType = Word(alphanums) yyyymmdd = Combine( (Word(nums, exact=4) | Word(nums, exact=2)) + ("/" + Word(nums, exact=2)) * 2 ) hhmmss = Combine(Word(nums, exact=2) + (":" + Word(nums, exact=2)) * 2) dateRef = oneOf(list("0123456"))("weekday") + yyyymmdd("date") + hhmmss("time") def utcToLocalTime(tokens): utctime = datetime.datetime.strptime( "%(date)s %(time)s" % tokens, "%Y/%m/%d %H:%M:%S" ) localtime = utctime - datetime.timedelta(0, time.timezone, 0) tokens["utcdate"], tokens["utctime"] = tokens["date"], tokens["time"] tokens["localdate"], tokens["localtime"] = str(localtime).split() del tokens["date"] del tokens["time"] dateRef.setParseAction(utcToLocalTime) startsStmt = "starts" + dateRef + SEMI endsStmt = "ends" + (dateRef | "never") + SEMI tstpStmt = "tstp" + dateRef + SEMI tsfpStmt = "tsfp" + dateRef + SEMI hdwStmt = "hardware" + hdwType("type") + macAddress("mac") + SEMI uidStmt = "uid" + QuotedString('"')("uid") + SEMI bindingStmt = "binding" + Word(alphanums) + Word(alphanums) + SEMI leaseStatement = ( startsStmt | endsStmt | tstpStmt | tsfpStmt | hdwStmt | uidStmt | bindingStmt ) leaseDef = ( "lease" + ipAddress("ipaddress") + LBRACE + Dict(ZeroOrMore(Group(leaseStatement))) + RBRACE ) for lease in leaseDef.searchString(sample): print(lease.dump()) print(lease.ipaddress, "->", lease.hardware.mac) print() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/dictExample.py0000644000000000000000000000336014412577542015122 0ustar00# # dictExample.py # # Illustration of using pyparsing's Dict class to process tabular data # # Copyright (c) 2003, Paul McGuire # import pyparsing as pp testData = """ +-------+------+------+------+------+------+------+------+------+ | | A1 | B1 | C1 | D1 | A2 | B2 | C2 | D2 | +=======+======+======+======+======+======+======+======+======+ | min | 7 | 43 | 7 | 15 | 82 | 98 | 1 | 37 | | max | 11 | 52 | 10 | 17 | 85 | 112 | 4 | 39 | | ave | 9 | 47 | 8 | 16 | 84 | 106 | 3 | 38 | | sdev | 1 | 3 | 1 | 1 | 1 | 3 | 1 | 1 | +-------+------+------+------+------+------+------+------+------+ """ # define grammar for datatable heading = ( pp.Literal("+-------+------+------+------+------+------+------+------+------+") + "| | A1 | B1 | C1 | D1 | A2 | B2 | C2 | D2 |" + "+=======+======+======+======+======+======+======+======+======+" ).suppress() vert = pp.Literal("|").suppress() number = pp.Word(pp.nums) rowData = pp.Group( vert + pp.Word(pp.alphas) + vert + pp.delimitedList(number, "|") + vert ) trailing = pp.Literal( "+-------+------+------+------+------+------+------+------+------+" ).suppress() datatable = heading + pp.Dict(pp.ZeroOrMore(rowData)) + trailing # now parse data and print results data = datatable.parseString(testData) print(data) # shortcut for import pprint; pprint.pprint(data.asList()) data.pprint() # access all data keys print("data keys=", list(data.keys())) # use dict-style access to values print("data['min']=", data["min"]) # use attribute-style access to values (if key is a valid Python identifier) print("data.max", data.max) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/dictExample2.py0000644000000000000000000000415014412577542015202 0ustar00# # dictExample2.py # # Illustration of using pyparsing's Dict class to process tabular data # Enhanced Dict example, courtesy of Mike Kelly # # Copyright (c) 2004, Paul McGuire # from pyparsing import ( Literal, Word, Group, Dict, ZeroOrMore, alphas, nums, delimitedList, pyparsing_common as ppc, ) testData = """ +-------+------+------+------+------+------+------+------+------+ | | A1 | B1 | C1 | D1 | A2 | B2 | C2 | D2 | +=======+======+======+======+======+======+======+======+======+ | min | 7 | 43 | 7 | 15 | 82 | 98 | 1 | 37 | | max | 11 | 52 | 10 | 17 | 85 | 112 | 4 | 39 | | ave | 9 | 47 | 8 | 16 | 84 | 106 | 3 | 38 | | sdev | 1 | 3 | 1 | 1 | 1 | 3 | 1 | 1 | +-------+------+------+------+------+------+------+------+------+ """ # define grammar for datatable underline = Word("-=") number = ppc.integer vert = Literal("|").suppress() rowDelim = ("+" + ZeroOrMore(underline + "+")).suppress() columnHeader = Group(vert + vert + delimitedList(Word(alphas + nums), "|") + vert) heading = rowDelim + columnHeader("columns") + rowDelim rowData = Group(vert + Word(alphas) + vert + delimitedList(number, "|") + vert) trailing = rowDelim datatable = heading + Dict(ZeroOrMore(rowData)) + trailing # now parse data and print results data = datatable.parseString(testData) print(data.dump()) print("data keys=", list(data.keys())) print("data['min']=", data["min"]) print("sum(data['min']) =", sum(data["min"])) print("data.max =", data.max) print("sum(data.max) =", sum(data.max)) # now print transpose of data table, using column labels read from table header and # values from data lists print() print(" " * 5, end=" ") for i in range(1, len(data)): print("|%5s" % data[i][0], end=" ") print() print(("-" * 6) + ("+------" * (len(data) - 1))) for i in range(len(data.columns)): print("%5s" % data.columns[i], end=" ") for j in range(len(data) - 1): print("|%5s" % data[j + 1][i + 1], end=" ") print() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0718386 pyparsing-3.1.1/examples/ebnf.py0000644000000000000000000001022714412577542013575 0ustar00# This module tries to implement ISO 14977 standard with pyparsing. # pyparsing version 1.1 or greater is required. # ISO 14977 standardize The Extended Backus-Naur Form(EBNF) syntax. # You can read a final draft version here: # https://www.cl.cam.ac.uk/~mgk25/iso-ebnf.html # # Submitted 2004 by Seo Sanghyeon # from pyparsing import * all_names = """ integer meta_identifier terminal_string optional_sequence repeated_sequence grouped_sequence syntactic_primary syntactic_factor syntactic_term single_definition definitions_list syntax_rule syntax """.split() integer = Word(nums) meta_identifier = Word(alphas, alphanums + "_") terminal_string = Suppress("'") + CharsNotIn("'") + Suppress("'") ^ Suppress( '"' ) + CharsNotIn('"') + Suppress('"') definitions_list = Forward() optional_sequence = Suppress("[") + definitions_list + Suppress("]") repeated_sequence = Suppress("{") + definitions_list + Suppress("}") grouped_sequence = Suppress("(") + definitions_list + Suppress(")") syntactic_primary = ( optional_sequence ^ repeated_sequence ^ grouped_sequence ^ meta_identifier ^ terminal_string ) syntactic_factor = Optional(integer + Suppress("*")) + syntactic_primary syntactic_term = syntactic_factor + Optional(Suppress("-") + syntactic_factor) single_definition = delimitedList(syntactic_term, ",") definitions_list << delimitedList(single_definition, "|") syntax_rule = meta_identifier + Suppress("=") + definitions_list + Suppress(";") ebnfComment = ( ("(*" + ZeroOrMore(CharsNotIn("*") | ("*" + ~Literal(")"))) + "*)") .streamline() .setName("ebnfComment") ) syntax = OneOrMore(syntax_rule) syntax.ignore(ebnfComment) def do_integer(str, loc, toks): return int(toks[0]) def do_meta_identifier(str, loc, toks): if toks[0] in symbol_table: return symbol_table[toks[0]] else: forward_count.value += 1 symbol_table[toks[0]] = Forward() return symbol_table[toks[0]] def do_terminal_string(str, loc, toks): return Literal(toks[0]) def do_optional_sequence(str, loc, toks): return Optional(toks[0]) def do_repeated_sequence(str, loc, toks): return ZeroOrMore(toks[0]) def do_grouped_sequence(str, loc, toks): return Group(toks[0]) def do_syntactic_primary(str, loc, toks): return toks[0] def do_syntactic_factor(str, loc, toks): if len(toks) == 2: # integer * syntactic_primary return And([toks[1]] * toks[0]) else: # syntactic_primary return [toks[0]] def do_syntactic_term(str, loc, toks): if len(toks) == 2: # syntactic_factor - syntactic_factor return NotAny(toks[1]) + toks[0] else: # syntactic_factor return [toks[0]] def do_single_definition(str, loc, toks): toks = toks.asList() if len(toks) > 1: # syntactic_term , syntactic_term , ... return And(toks) else: # syntactic_term return [toks[0]] def do_definitions_list(str, loc, toks): toks = toks.asList() if len(toks) > 1: # single_definition | single_definition | ... return Or(toks) else: # single_definition return [toks[0]] def do_syntax_rule(str, loc, toks): # meta_identifier = definitions_list ; assert toks[0].expr is None, "Duplicate definition" forward_count.value -= 1 toks[0] << toks[1] return [toks[0]] def do_syntax(str, loc, toks): # syntax_rule syntax_rule ... return symbol_table symbol_table = {} class forward_count: pass forward_count.value = 0 for name in all_names: expr = vars()[name] action = vars()["do_" + name] expr.setName(name) expr.setParseAction(action) # ~ expr.setDebug() def parse(ebnf, given_table={}): symbol_table.clear() symbol_table.update(given_table) forward_count.value = 0 table = syntax.parseString(ebnf)[0] assert forward_count.value == 0, "Missing definition" for name in table: expr = table[name] expr.setName(name) # ~ expr.setDebug() return table ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/ebnftest.py0000644000000000000000000000454314412577542014501 0ustar00# # ebnftest.py # # Test script for ebnf.py # # Submitted 2004 by Seo Sanghyeon # print("Importing pyparsing...") from pyparsing import * print("Constructing EBNF parser with pyparsing...") import ebnf grammar = """ syntax = (syntax_rule), {(syntax_rule)}; syntax_rule = meta_identifier, '=', definitions_list, ';'; definitions_list = single_definition, {'|', single_definition}; single_definition = syntactic_term, {',', syntactic_term}; syntactic_term = syntactic_factor,['-', syntactic_factor]; syntactic_factor = [integer, '*'], syntactic_primary; syntactic_primary = optional_sequence | repeated_sequence | grouped_sequence | meta_identifier | terminal_string; optional_sequence = '[', definitions_list, ']'; repeated_sequence = '{', definitions_list, '}'; grouped_sequence = '(', definitions_list, ')'; (* terminal_string = "'", character - "'", {character - "'"}, "'" | '"', character - '"', {character - '"'}, '"'; meta_identifier = letter, {letter | digit}; integer = digit, {digit}; *) """ table = {} # ~ table['character'] = Word(printables, exact=1) # ~ table['letter'] = Word(alphas + '_', exact=1) # ~ table['digit'] = Word(nums, exact=1) table["terminal_string"] = sglQuotedString table["meta_identifier"] = Word(alphas + "_", alphas + "_" + nums) table["integer"] = Word(nums) print("Parsing EBNF grammar with EBNF parser...") parsers = ebnf.parse(grammar, table) ebnf_parser = parsers["syntax"] commentcharcount = 0 commentlocs = set() def tallyCommentChars(s, l, t): global commentcharcount, commentlocs # only count this comment if we haven't seen it before if l not in commentlocs: charCount = len(t[0]) - len(list(filter(str.isspace, t[0]))) commentcharcount += charCount commentlocs.add(l) return l, t # ordinarily, these lines wouldn't be necessary, but we are doing extra stuff with the comment expression ebnf.ebnfComment.setParseAction(tallyCommentChars) ebnf_parser.ignore(ebnf.ebnfComment) print("Parsing EBNF grammar with generated EBNF parser...\n") parsed_chars = ebnf_parser.parseString(grammar) parsed_char_len = len(parsed_chars) print("],\n".join(str(parsed_chars.asList()).split("],"))) # ~ grammar_length = len(grammar) - len(filter(str.isspace, grammar))-commentcharcount # ~ assert parsed_char_len == grammar_length print("Ok!") ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/eval_arith.py0000644000000000000000000001425414412577542015005 0ustar00# eval_arith.py # # Copyright 2009, 2011 Paul McGuire # # Expansion on the pyparsing example simpleArith.py, to include evaluation # of the parsed tokens. # # Added support for exponentiation, using right-to-left evaluation of # operands # from pyparsing import ( Word, nums, alphas, Combine, one_of, OpAssoc, infix_notation, Literal, ParserElement, ) ParserElement.enablePackrat() class EvalConstant: "Class to evaluate a parsed constant or variable" vars_ = {} def __init__(self, tokens): self.value = tokens[0] def eval(self): if self.value in EvalConstant.vars_: return EvalConstant.vars_[self.value] else: return float(self.value) class EvalSignOp: "Class to evaluate expressions with a leading + or - sign" def __init__(self, tokens): self.sign, self.value = tokens[0] def eval(self): mult = {"+": 1, "-": -1}[self.sign] return mult * self.value.eval() def operatorOperands(tokenlist): "generator to extract operators and operands in pairs" it = iter(tokenlist) while 1: try: yield (next(it), next(it)) except StopIteration: break class EvalPowerOp: "Class to evaluate multiplication and division expressions" def __init__(self, tokens): self.value = tokens[0] def eval(self): res = self.value[-1].eval() for val in self.value[-3::-2]: res = val.eval() ** res return res class EvalMultOp: "Class to evaluate multiplication and division expressions" def __init__(self, tokens): self.value = tokens[0] def eval(self): prod = self.value[0].eval() for op, val in operatorOperands(self.value[1:]): if op == "*": prod *= val.eval() if op == "/": prod /= val.eval() return prod class EvalAddOp: "Class to evaluate addition and subtraction expressions" def __init__(self, tokens): self.value = tokens[0] def eval(self): sum = self.value[0].eval() for op, val in operatorOperands(self.value[1:]): if op == "+": sum += val.eval() if op == "-": sum -= val.eval() return sum class EvalComparisonOp: "Class to evaluate comparison expressions" opMap = { "<": lambda a, b: a < b, "<=": lambda a, b: a <= b, ">": lambda a, b: a > b, ">=": lambda a, b: a >= b, "!=": lambda a, b: a != b, "=": lambda a, b: a == b, "LT": lambda a, b: a < b, "LE": lambda a, b: a <= b, "GT": lambda a, b: a > b, "GE": lambda a, b: a >= b, "NE": lambda a, b: a != b, "EQ": lambda a, b: a == b, "<>": lambda a, b: a != b, } def __init__(self, tokens): self.value = tokens[0] def eval(self): val1 = self.value[0].eval() for op, val in operatorOperands(self.value[1:]): fn = EvalComparisonOp.opMap[op] val2 = val.eval() if not fn(val1, val2): break val1 = val2 else: return True return False # define the parser integer = Word(nums) real = Combine(Word(nums) + "." + Word(nums)) variable = Word(alphas, exact=1) operand = real | integer | variable signop = one_of("+ -") multop = one_of("* /") plusop = one_of("+ -") expop = Literal("**") # use parse actions to attach EvalXXX constructors to sub-expressions operand.setParseAction(EvalConstant) arith_expr = infix_notation( operand, [ (signop, 1, OpAssoc.RIGHT, EvalSignOp), (expop, 2, OpAssoc.LEFT, EvalPowerOp), (multop, 2, OpAssoc.LEFT, EvalMultOp), (plusop, 2, OpAssoc.LEFT, EvalAddOp), ], ) comparisonop = one_of("< <= > >= != = <> LT GT LE GE EQ NE") comp_expr = infix_notation( arith_expr, [ (comparisonop, 2, OpAssoc.LEFT, EvalComparisonOp), ], ) # sample expressions posted on comp.lang.python, asking for advice # in safely evaluating them rules = [ "( A - B ) = 0", "( B - C + B ) = 0", "(A + B + C + D + E + F + G + H + I) = J", "(A + B + C + D + E + F + G + H) = I", "(A + B + C + D + E + F) = G", "(A + B + C + D + E) = (F + G + H + I + J)", "(A + B + C + D + E) = (F + G + H + I)", "(A + B + C + D + E) = F", "(A + B + C + D) = (E + F + G + H)", "(A + B + C) = D", "(A + B + C) = (D + E + F)", "(A + B) = (C + D + E + F)", "(A + B) = (C + D)", "(A + B) = (C - D + E - F - G + H + I + J)", "(A + B) = C", "(A + B) = 0", "(A+B+C+D+E) = (F+G+H+I+J)", "(A+B+C+D) = (E+F+G+H)", "(A+B+C+D)=(E+F+G+H)", "(A+B+C)=(D+E+F)", "(A+B)=(C+D)", "(A+B)=C", "(A-B)=C", "(A/(B+C))", "(B/(C+D))", "(G + H) = I", "-0.99 LE ((A+B+C)-(D+E+F+G)) LE 0.99", "-0.99 LE (A-(B+C)) LE 0.99", "-1000.00 LE A LE 0.00", "-5000.00 LE A LE 0.00", "A < B", "A < 7000", "A = -(B)", "A = C", "A = 0", "A GT 0", "A GT 0.00", "A GT 7.00", "A LE B", "A LT -1000.00", "A LT -5000", "A LT 0", "G=(B+C+D)", "A=B", "I = (G + H)", "0.00 LE A LE 4.00", "4.00 LT A LE 7.00", "0.00 LE A LE 4.00 LE E > D", "2**2**(A+3)", ] vars_ = { "A": 0, "B": 1.1, "C": 2.2, "D": 3.3, "E": 4.4, "F": 5.5, "G": 6.6, "H": 7.7, "I": 8.8, "J": 9.9, } # define tests from given rules tests = [] for t in rules: t_orig = t t = t.replace("=", "==") t = t.replace("EQ", "==") t = t.replace("LE", "<=") t = t.replace("GT", ">") t = t.replace("LT", "<") t = t.replace("GE", ">=") t = t.replace("LE", "<=") t = t.replace("NE", "!=") t = t.replace("<>", "!=") tests.append((t_orig, eval(t, vars_))) # copy vars_ to EvalConstant lookup dict EvalConstant.vars_ = vars_ failed = 0 for test, expected in tests: ret = comp_expr.parseString(test)[0] parsedvalue = ret.eval() print(test, expected, parsedvalue) if abs(parsedvalue - expected) > 1e-6: print("<<< FAIL") failed += 1 else: print("") print("") if failed: raise Exception("could not parse") ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/excel_expr.py0000644000000000000000000000431714443622312015012 0ustar00# excelExpr.py # # Copyright 2010, Paul McGuire # # A partial implementation of a parser of Excel formula expressions. # import pyparsing as pp ppc = pp.common pp.ParserElement.enable_packrat() EQ, LPAR, RPAR, COLON, COMMA = pp.Suppress.using_each("=():,") EXCL, DOLLAR = pp.Literal.using_each("!$") sheet_ref = pp.Word(pp.alphas, pp.alphanums) | pp.QuotedString("'", escQuote="''") col_ref = pp.Opt(DOLLAR) + pp.Word(pp.alphas, max=2) row_ref = pp.Opt(DOLLAR) + pp.Word(pp.nums) cell_ref = pp.Combine( pp.Group(pp.Opt(sheet_ref + EXCL)("sheet") + col_ref("col") + row_ref("row")) ) cell_range = ( pp.Group(cell_ref("start") + COLON + cell_ref("end"))("range") | cell_ref | pp.Word(pp.alphas, pp.alphanums) ) expr = pp.Forward() COMPARISON_OP = pp.one_of("< = > >= <= != <>") cond_expr = expr + COMPARISON_OP + expr if_func = ( pp.CaselessKeyword("if") - LPAR + pp.Group(cond_expr)("condition") + COMMA + pp.Group(expr)("if_true") + COMMA + pp.Group(expr)("if_false") + RPAR ) def stat_function(name): return pp.Group(pp.CaselessKeyword(name) + pp.Group(LPAR + pp.DelimitedList(expr) + RPAR)) sum_func = stat_function("sum") min_func = stat_function("min") max_func = stat_function("max") ave_func = stat_function("ave") func_call = if_func | sum_func | min_func | max_func | ave_func mult_op = pp.one_of("* /") add_op = pp.one_of("+ -") numeric_literal = ppc.number operand = numeric_literal | func_call | cell_range | cell_ref arith_expr = pp.infix_notation( operand, [ (mult_op, 2, pp.OpAssoc.LEFT), (add_op, 2, pp.OpAssoc.LEFT), ], ) text_operand = pp.dbl_quoted_string | cell_ref text_expr = pp.infix_notation( text_operand, [ ("&", 2, pp.OpAssoc.LEFT), ], ) expr <<= arith_expr | text_expr def main(): success, report = (EQ + expr).run_tests( """\ =3*A7+5 =3*Sheet1!$A$7+5 =3*'Sheet 1'!$A$7+5 =3*'O''Reilly''s sheet'!$A$7+5 =if(Sum(A1:A25)>42,Min(B1:B25),if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25))) =sum(a1:a25,10,min(b1,c2,d3)) =if("T"&a2="TTime", "Ready", "Not ready") """ ) assert success if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/fourFn.py0000644000000000000000000002442614412577542014130 0ustar00# fourFn.py # # Demonstration of the pyparsing module, implementing a simple 4-function expression parser, # with support for scientific notation, and symbols for e and pi. # Extended to add exponentiation and simple built-in functions. # Extended test cases, simplified pushFirst method. # Removed unnecessary expr.suppress() call (thanks Nathaniel Peterson!), and added Group # Changed fnumber to use a Regex, which is now the preferred method # Reformatted to latest pypyparsing features, support multiple and variable args to functions # # Copyright 2003-2019 by Paul McGuire # from pyparsing import ( Literal, Word, Group, Forward, alphas, alphanums, Regex, ParseException, CaselessKeyword, Suppress, delimitedList, ) import math import operator exprStack = [] def push_first(toks): exprStack.append(toks[0]) def push_unary_minus(toks): for t in toks: if t == "-": exprStack.append("unary -") else: break bnf = None def BNF(): """ expop :: '^' multop :: '*' | '/' addop :: '+' | '-' integer :: ['+' | '-'] '0'..'9'+ atom :: PI | E | real | fn '(' expr ')' | '(' expr ')' factor :: atom [ expop factor ]* term :: factor [ multop factor ]* expr :: term [ addop term ]* """ global bnf if not bnf: # use CaselessKeyword for e and pi, to avoid accidentally matching # functions that start with 'e' or 'pi' (such as 'exp'); Keyword # and CaselessKeyword only match whole words e = CaselessKeyword("E") pi = CaselessKeyword("PI") # fnumber = Combine(Word("+-"+nums, nums) + # Optional("." + Optional(Word(nums))) + # Optional(e + Word("+-"+nums, nums))) # or use provided pyparsing_common.number, but convert back to str: # fnumber = ppc.number().addParseAction(lambda t: str(t[0])) fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?") ident = Word(alphas, alphanums + "_$") plus, minus, mult, div = map(Literal, "+-*/") lpar, rpar = map(Suppress, "()") addop = plus | minus multop = mult | div expop = Literal("^") expr = Forward() expr_list = delimitedList(Group(expr)) # add parse action that replaces the function identifier with a (name, number of args) tuple def insert_fn_argcount_tuple(t): fn = t.pop(0) num_args = len(t[0]) t.insert(0, (fn, num_args)) fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction( insert_fn_argcount_tuple ) atom = ( addop[...] + ( (fn_call | pi | e | fnumber | ident).setParseAction(push_first) | Group(lpar + expr + rpar) ) ).setParseAction(push_unary_minus) # by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left # exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor <<= atom + (expop + factor).setParseAction(push_first)[...] term = factor + (multop + factor).setParseAction(push_first)[...] expr <<= term + (addop + term).setParseAction(push_first)[...] bnf = expr return bnf # map operator symbols to corresponding arithmetic operations epsilon = 1e-12 opn = { "+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv, "^": operator.pow, } fn = { "sin": math.sin, "cos": math.cos, "tan": math.tan, "exp": math.exp, "abs": abs, "trunc": int, "round": round, "sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0, # functionsl with multiple arguments "multiply": lambda a, b: a * b, "hypot": math.hypot, # functions with a variable number of arguments "all": lambda *a: all(a), } def evaluate_stack(s): op, num_args = s.pop(), 0 if isinstance(op, tuple): op, num_args = op if op == "unary -": return -evaluate_stack(s) if op in "+-*/^": # note: operands are pushed onto the stack in reverse order op2 = evaluate_stack(s) op1 = evaluate_stack(s) return opn[op](op1, op2) elif op == "PI": return math.pi # 3.1415926535 elif op == "E": return math.e # 2.718281828 elif op in fn: # note: args are pushed onto the stack in reverse order args = reversed([evaluate_stack(s) for _ in range(num_args)]) return fn[op](*args) elif op[0].isalpha(): raise Exception("invalid identifier '%s'" % op) else: # try to evaluate as int first, then as float if int fails try: return int(op) except ValueError: return float(op) if __name__ == "__main__": def test(s, expected): exprStack[:] = [] try: results = BNF().parseString(s, parseAll=True) val = evaluate_stack(exprStack[:]) except ParseException as pe: print(s, "failed parse:", str(pe)) except Exception as e: print(s, "failed eval:", str(e), exprStack) else: if val == expected: print(s, "=", val, results, "=>", exprStack) else: print(s + "!!!", val, "!=", expected, results, "=>", exprStack) test("9", 9) test("-9", -9) test("--9", 9) test("-E", -math.e) test("9 + 3 + 6", 9 + 3 + 6) test("9 + 3 / 11", 9 + 3.0 / 11) test("(9 + 3)", (9 + 3)) test("(9+3) / 11", (9 + 3.0) / 11) test("9 - 12 - 6", 9 - 12 - 6) test("9 - (12 - 6)", 9 - (12 - 6)) test("2*3.14159", 2 * 3.14159) test("3.1415926535*3.1415926535 / 10", 3.1415926535 * 3.1415926535 / 10) test("PI * PI / 10", math.pi * math.pi / 10) test("PI*PI/10", math.pi * math.pi / 10) test("PI^2", math.pi ** 2) test("round(PI^2)", round(math.pi ** 2)) test("6.02E23 * 8.048", 6.02e23 * 8.048) test("e / 3", math.e / 3) test("sin(PI/2)", math.sin(math.pi / 2)) test("10+sin(PI/4)^2", 10 + math.sin(math.pi / 4) ** 2) test("trunc(E)", int(math.e)) test("trunc(-E)", int(-math.e)) test("round(E)", round(math.e)) test("round(-E)", round(-math.e)) test("E^PI", math.e ** math.pi) test("exp(0)", 1) test("exp(1)", math.e) test("2^3^2", 2 ** 3 ** 2) test("(2^3)^2", (2 ** 3) ** 2) test("2^3+2", 2 ** 3 + 2) test("2^3+5", 2 ** 3 + 5) test("2^9", 2 ** 9) test("sgn(-2)", -1) test("sgn(0)", 0) test("sgn(0.1)", 1) test("foo(0.1)", None) test("round(E, 3)", round(math.e, 3)) test("round(PI^2, 3)", round(math.pi ** 2, 3)) test("sgn(cos(PI/4))", 1) test("sgn(cos(PI/2))", 0) test("sgn(cos(PI*3/4))", -1) test("+(sgn(cos(PI/4)))", 1) test("-(sgn(cos(PI/4)))", -1) test("hypot(3, 4)", 5) test("multiply(3, 7)", 21) test("all(1,1,1)", True) test("all(1,1,1,1,1,0)", False) """ Test output: >python fourFn.py 9 = 9 ['9'] => ['9'] -9 = -9 ['-', '9'] => ['9', 'unary -'] --9 = 9 ['-', '-', '9'] => ['9', 'unary -', 'unary -'] -E = -2.718281828459045 ['-', 'E'] => ['E', 'unary -'] 9 + 3 + 6 = 18 ['9', '+', '3', '+', '6'] => ['9', '3', '+', '6', '+'] 9 + 3 / 11 = 9.272727272727273 ['9', '+', '3', '/', '11'] => ['9', '3', '11', '/', '+'] (9 + 3) = 12 [['9', '+', '3']] => ['9', '3', '+'] (9+3) / 11 = 1.0909090909090908 [['9', '+', '3'], '/', '11'] => ['9', '3', '+', '11', '/'] 9 - 12 - 6 = -9 ['9', '-', '12', '-', '6'] => ['9', '12', '-', '6', '-'] 9 - (12 - 6) = 3 ['9', '-', ['12', '-', '6']] => ['9', '12', '6', '-', '-'] 2*3.14159 = 6.28318 ['2', '*', '3.14159'] => ['2', '3.14159', '*'] 3.1415926535*3.1415926535 / 10 = 0.9869604400525172 ['3.1415926535', '*', '3.1415926535', '/', '10'] => ['3.1415926535', '3.1415926535', '*', '10', '/'] PI * PI / 10 = 0.9869604401089358 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/'] PI*PI/10 = 0.9869604401089358 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/'] PI^2 = 9.869604401089358 ['PI', '^', '2'] => ['PI', '2', '^'] round(PI^2) = 10 [('round', 1), [['PI', '^', '2']]] => ['PI', '2', '^', ('round', 1)] 6.02E23 * 8.048 = 4.844896e+24 ['6.02E23', '*', '8.048'] => ['6.02E23', '8.048', '*'] e / 3 = 0.9060939428196817 ['E', '/', '3'] => ['E', '3', '/'] sin(PI/2) = 1.0 [('sin', 1), [['PI', '/', '2']]] => ['PI', '2', '/', ('sin', 1)] 10+sin(PI/4)^2 = 10.5 ['10', '+', ('sin', 1), [['PI', '/', '4']], '^', '2'] => ['10', 'PI', '4', '/', ('sin', 1), '2', '^', '+'] trunc(E) = 2 [('trunc', 1), [['E']]] => ['E', ('trunc', 1)] trunc(-E) = -2 [('trunc', 1), [['-', 'E']]] => ['E', 'unary -', ('trunc', 1)] round(E) = 3 [('round', 1), [['E']]] => ['E', ('round', 1)] round(-E) = -3 [('round', 1), [['-', 'E']]] => ['E', 'unary -', ('round', 1)] E^PI = 23.140692632779263 ['E', '^', 'PI'] => ['E', 'PI', '^'] exp(0) = 1.0 [('exp', 1), [['0']]] => ['0', ('exp', 1)] exp(1) = 2.718281828459045 [('exp', 1), [['1']]] => ['1', ('exp', 1)] 2^3^2 = 512 ['2', '^', '3', '^', '2'] => ['2', '3', '2', '^', '^'] (2^3)^2 = 64 [['2', '^', '3'], '^', '2'] => ['2', '3', '^', '2', '^'] 2^3+2 = 10 ['2', '^', '3', '+', '2'] => ['2', '3', '^', '2', '+'] 2^3+5 = 13 ['2', '^', '3', '+', '5'] => ['2', '3', '^', '5', '+'] 2^9 = 512 ['2', '^', '9'] => ['2', '9', '^'] sgn(-2) = -1 [('sgn', 1), [['-', '2']]] => ['2', 'unary -', ('sgn', 1)] sgn(0) = 0 [('sgn', 1), [['0']]] => ['0', ('sgn', 1)] sgn(0.1) = 1 [('sgn', 1), [['0.1']]] => ['0.1', ('sgn', 1)] foo(0.1) failed eval: invalid identifier 'foo' ['0.1', ('foo', 1)] round(E, 3) = 2.718 [('round', 2), [['E'], ['3']]] => ['E', '3', ('round', 2)] round(PI^2, 3) = 9.87 [('round', 2), [['PI', '^', '2'], ['3']]] => ['PI', '2', '^', '3', ('round', 2)] sgn(cos(PI/4)) = 1 [('sgn', 1), [[('cos', 1), [['PI', '/', '4']]]]] => ['PI', '4', '/', ('cos', 1), ('sgn', 1)] sgn(cos(PI/2)) = 0 [('sgn', 1), [[('cos', 1), [['PI', '/', '2']]]]] => ['PI', '2', '/', ('cos', 1), ('sgn', 1)] sgn(cos(PI*3/4)) = -1 [('sgn', 1), [[('cos', 1), [['PI', '*', '3', '/', '4']]]]] => ['PI', '3', '*', '4', '/', ('cos', 1), ('sgn', 1)] +(sgn(cos(PI/4))) = 1 ['+', [('sgn', 1), [[('cos', 1), [['PI', '/', '4']]]]]] => ['PI', '4', '/', ('cos', 1), ('sgn', 1)] -(sgn(cos(PI/4))) = -1 ['-', [('sgn', 1), [[('cos', 1), [['PI', '/', '4']]]]]] => ['PI', '4', '/', ('cos', 1), ('sgn', 1), 'unary -'] """ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/gen_ctypes.py0000644000000000000000000001245014412577542015023 0ustar00# # gen_ctypes.py # # Parse a .h header file to generate ctypes argtype and return type definitions # # Copyright 2004-2016, by Paul McGuire # from pyparsing import * typemap = { "byte": "c_byte", "char": "c_char", "char *": "c_char_p", "double": "c_double", "float": "c_float", "int": "c_int", "int16": "c_int16", "int32": "c_int32", "int64": "c_int64", "int8": "c_int8", "long": "c_long", "longlong": "c_longlong", "short": "c_short", "size_t": "c_size_t", "ubyte": "c_ubyte", "uchar": "c_ubyte", "u_char": "c_ubyte", "uint": "c_uint", "u_int": "c_uint", "uint16": "c_uint16", "uint32": "c_uint32", "uint64": "c_uint64", "uint8": "c_uint8", "u_long": "c_ulong", "ulong": "c_ulong", "ulonglong": "c_ulonglong", "ushort": "c_ushort", "u_short": "c_ushort", "void *": "c_void_p", "voidp": "c_voidp", "wchar": "c_wchar", "wchar *": "c_wchar_p", "Bool": "c_bool", "void": "None", } LPAR, RPAR, LBRACE, RBRACE, COMMA, SEMI = Suppress.using_each("(){},;") ident = pyparsing_common.identifier integer = Regex(r"[+-]?\d+") hexinteger = Regex(r"0x[0-9a-fA-F]+") const = Suppress("const") primitiveType = one_of(t for t in typemap if not t.endswith("*")) structType = Suppress("struct") + ident vartype = ( Opt(const) + (primitiveType | structType | ident) + Opt(Word("*")("ptr")) ) def normalizetype(t): if isinstance(t, ParseResults): return " ".join(t) # ~ ret = ParseResults([' '.join(t)]) # ~ return ret vartype.set_parse_action(normalizetype) arg = Group(vartype("argtype") + Opt(ident("argname"))) func_def = ( vartype("fn_type") + ident("fn_name") + LPAR + Opt(DelimitedList(arg | "..."))("fn_args") + RPAR + SEMI ) def derivefields(t): if t.fn_args and t.fn_args[-1] == "...": t["varargs"] = True func_def.set_parse_action(derivefields) fn_typedef = "typedef" + func_def var_typedef = "typedef" + primitiveType("primType") + ident("name") + SEMI enum_def = ( Keyword("enum") + LBRACE + DelimitedList(Group(ident("name") + "=" + (hexinteger | integer)("value")))( "evalues" ) + Opt(COMMA) + RBRACE ) c_header = open("snmp_api.h").read() module = "pynetsnmp" user_defined_types = set() typedefs = [] fn_typedefs = [] functions = [] enum_constants = [] # add structures commonly included from std lib headers def addStdType(t, namespace=""): fullname = namespace + "_" + t if namespace else t typemap[t] = fullname user_defined_types.add(t) addStdType("fd_set", "sys_select") addStdType("timeval", "sys_time") def getUDType(typestr): key = typestr.rstrip(" *") if key not in typemap: user_defined_types.add(key) typemap[key] = "{}_{}".format(module, key) def typeAsCtypes(typestr): if typestr in typemap: return typemap[typestr] if typestr.endswith("*"): return "POINTER(%s)" % typeAsCtypes(typestr.rstrip(" *")) return typestr # scan input header text for primitive typedefs for td, _, _ in var_typedef.scan_string(c_header): typedefs.append((td.name, td.primType)) # add typedef type to typemap to map to itself typemap[td.name] = td.name # scan input header text for function typedefs fn_typedefs = fn_typedef.search_string(c_header) # add each function typedef to typemap to map to itself for fntd in fn_typedefs: typemap[fntd.fn_name] = fntd.fn_name # scan input header text, and keep running list of user-defined types for fn, _, _ in ( cStyleComment.suppress() | fn_typedef.suppress() | func_def ).scan_string(c_header): if not fn: continue getUDType(fn.fn_type) for arg in fn.fn_args: if arg != "...": if arg.argtype not in typemap: getUDType(arg.argtype) functions.append(fn) # scan input header text for enums enum_def.ignore(cpp_style_comment) for en_, _, _ in enum_def.scan_string(c_header): for ev in en_.evalues: enum_constants.append((ev.name, ev.value)) print("from ctypes import *") print("{} = CDLL('{}.dll')".format(module, module)) print() print("# user defined types") for tdname, tdtyp in typedefs: print("{} = {}".format(tdname, typemap[tdtyp])) for fntd in fn_typedefs: print( "{} = CFUNCTYPE({})".format( fntd.fn_name, ",\n ".join(typeAsCtypes(a.argtype) for a in fntd.fn_args) ) ) for udtype in user_defined_types: print("class %s(Structure): pass" % typemap[udtype]) print() print("# constant definitions") for en, ev in enum_constants: print("{} = {}".format(en, ev)) print() print("# functions") for fn in functions: prefix = "{}.{}".format(module, fn.fn_name) print("{}.restype = {}".format(prefix, typeAsCtypes(fn.fn_type))) if fn.varargs: print("# warning - %s takes variable argument list" % prefix) del fn.fn_args[-1] if fn.fn_args.asList() != [["void"]]: print( "{}.argtypes = ({},)".format( prefix, ",".join(typeAsCtypes(a.argtype) for a in fn.fn_args) ) ) else: print("%s.argtypes = ()" % (prefix)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/getNTPserversNew.py0000644000000000000000000000216214412577542016107 0ustar00# getNTPserversNew.py # # Demonstration of the parsing module, implementing a HTML page scanner, # to extract a list of NTP time servers from the NIST web site. # # Copyright 2004-2010, by Paul McGuire # September, 2010 - updated to more current use of setResultsName, new NIST URL # import pyparsing as pp ppc = pp.pyparsing_common from urllib.request import urlopen integer = pp.Word(pp.nums) ipAddress = ppc.ipv4_address() hostname = pp.DelimitedList(pp.Word(pp.alphas, pp.alphanums + "-_"), ".", combine=True) tdStart, tdEnd = pp.make_html_tags("td") timeServerPattern = ( tdStart + hostname("hostname") + tdEnd + tdStart + ipAddress("ipAddr") + tdEnd + tdStart + tdStart.tag_body("loc") + tdEnd ) # get list of time servers nistTimeServerURL = "https://tf.nist.gov/tf-cgi/servers.cgi#" with urlopen(nistTimeServerURL) as serverListPage: serverListHTML = serverListPage.read().decode("UTF-8") addrs = {} for srvr, startloc, endloc in timeServerPattern.scan_string(serverListHTML): print(f"{srvr.ipAddr} ({srvr.hostname.strip()}) - {srvr.loc.strip()}") addrs[srvr.ipAddr] = srvr.loc ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/greeting.py0000644000000000000000000000100614412577542014462 0ustar00# greeting.py # # Demonstration of the pyparsing module, on the prototypical "Hello, World!" # example # # Copyright 2003, 2019 by Paul McGuire # import pyparsing as pp # define grammar greet = pp.Word(pp.alphas) + "," + pp.Word(pp.alphas) + pp.one_of("! ? .") # input string hello = "Hello, World!" # parse input string print(hello, "->", greet.parse_string(hello)) # parse a bunch of input strings greet.run_tests( """\ Hello, World! Ahoy, Matey! Howdy, Pardner! Morning, Neighbor! """ ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/greetingInGreek.py0000644000000000000000000000065114412577542015734 0ustar00# # greetingInGreek.py # # Demonstration of the parsing module, on the prototypical "Hello, World!" example # # Copyright 2004-2016, by Paul McGuire # from pyparsing import Word, pyparsing_unicode as ppu # define grammar alphas = ppu.Greek.alphas greet = Word(alphas) + "," + Word(alphas) + "!" # input string hello = "Καλημέρα, κόσμε!" # parse input string print(greet.parse_string(hello)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/greetingInKorean.py0000644000000000000000000000075714443622312016113 0ustar00# # greetingInKorean.py # # Demonstration of the parsing module, on the prototypical "Hello, World!" example # # Copyright 2004-2016, by Paul McGuire # from pyparsing import Word, pyparsing_unicode as ppu korean_chars = ppu.한국어.alphas korean_word = Word(korean_chars, min=2) # define grammar greet = korean_word + "," + korean_word + "!" # input string hello = "안녕, 여러분!" # "Hello, World!" in Korean # parse input string print(greet.parse_string(hello)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1680539490.0758383 pyparsing-3.1.1/examples/groupUsingListAllMatches.py0000644000000000000000000000077514412577542017626 0ustar00# # A simple example showing the use of the implied listAllMatches=True for # results names with a trailing '*' character. # # This example performs work similar to itertools.groupby, but without # having to sort the input first. # # Copyright 2004-2016, by Paul McGuire # from pyparsing import Word, ZeroOrMore, nums aExpr = Word("A", nums) bExpr = Word("B", nums) cExpr = Word("C", nums) grammar = ZeroOrMore(aExpr("A*") | bExpr("B*") | cExpr("C*")) grammar.runTests("A1 B1 A2 C1 B2 A3") ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/hola_mundo.py0000644000000000000000000000402414443622312014774 0ustar00# escrito por Marco Alfonso, 2004 Noviembre # importamos los símbolos requeridos desde el módulo from pyparsing import ( Word, one_of, nums, Group, OneOrMore, Opt, pyparsing_unicode as ppu, ) # usamos las letras en latin1, que incluye las como 'ñ', 'á', 'é', etc. alphas = ppu.Latin1.alphas # Aqui decimos que la gramatica "saludo" DEBE contener # una palabra compuesta de caracteres alfanumericos # (Word(alphas)) mas una ',' mas otra palabra alfanumerica, # mas '!' y esos seian nuestros tokens saludo = Word(alphas) + "," + Word(alphas) + one_of("! . ?") tokens = saludo.parse_string("Hola, Mundo !") # Ahora parseamos una cadena, "Hola, Mundo!", # el metodo parseString, nos devuelve una lista con los tokens # encontrados, en caso de no haber errores... for i, token in enumerate(tokens): print(f"Token {i} -> {token}") # imprimimos cada uno de los tokens Y listooo!!, he aquí a salida # Token 0 -> Hola # Token 1 -> , # Token 2-> Mundo # Token 3 -> ! # ahora cambia el parseador, aceptando saludos con mas que una sola palabra antes que ',' saludo = Group(OneOrMore(Word(alphas))) + "," + Word(alphas) + one_of("! . ?") tokens = saludo.parse_string("Hasta mañana, Mundo !") for i, token in enumerate(tokens): print(f"Token {i} -> {token}") # Ahora parseamos algunas cadenas, usando el metodo runTests saludo.run_tests("""\ Hola, Mundo! Hasta mañana, Mundo ! """, fullDump=False, ) # Por supuesto, se pueden "reutilizar" gramáticas, por ejemplo: numimag = Word(nums) + "i" numreal = Word(nums) numcomplex = numimag | numreal + Opt("+" + numimag) # Funcion para cambiar a complejo numero durante parsear: def hace_python_complejo(t): valid_python = "".join(t).replace("i", "j") for tipo in (int, complex): try: return tipo(valid_python) except ValueError: pass numcomplex.set_parse_action(hace_python_complejo) numcomplex.run_tests("""\ 3 5i 3+5i """) # Excelente!!, bueno, los dejo, me voy a seguir tirando código... ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1687102666.4839993 pyparsing-3.1.1/examples/html_stripper.py0000644000000000000000000000326714443622312015553 0ustar00# # html_stripper.py # # Sample code for stripping HTML markup tags and scripts from # HTML source files. # # Copyright (c) 2006, 2016, 2023, Paul McGuire # from urllib.request import urlopen from pyparsing import ( LineEnd, quoted_string, make_html_tags, common_html_entity, replace_html_entity, html_comment, any_open_tag, any_close_tag, replace_with, ) # if