pax_global_header00006660000000000000000000000064146324160550014520gustar00rootroot0000000000000052 comment=c8722e6b815bedf4e6aaeea9ccc7d6ff3e9b4f84 dpath-python-2.2.0/000077500000000000000000000000001463241605500141405ustar00rootroot00000000000000dpath-python-2.2.0/.github/000077500000000000000000000000001463241605500155005ustar00rootroot00000000000000dpath-python-2.2.0/.github/tag-changelog-config.js000066400000000000000000000006671463241605500220120ustar00rootroot00000000000000module.exports = { types: [ { types: ["other"], label: "Commits" }, ], renderTypeSection: function (label, commits) { let text = `\n## ${label}\n`; commits.forEach((commit) => { text += `- ${commit.subject}\n`; }); return text; }, renderChangelog: function (release, changes) { const now = new Date(); return `# ${release} - ${now.toISOString().substr(0, 10)}\n` + changes + "\n\n"; }, }; dpath-python-2.2.0/.github/workflows/000077500000000000000000000000001463241605500175355ustar00rootroot00000000000000dpath-python-2.2.0/.github/workflows/deploy.yml000066400000000000000000000042741463241605500215630ustar00rootroot00000000000000name: Deploy and Release # Controls when the workflow will run on: # Triggers the workflow on version change push: branches: - master paths: - dpath/version.py # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: # This workflow contains a single job called "deploy" deploy: # The type of runner that the job will run on runs-on: ubuntu-latest # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 - name: Get Version id: get-version run: | python -c "from dpath.version import VERSION; print(f'::set-output name=version::v{VERSION}');" - name: Check Tag uses: mukunku/tag-exists-action@v1.0.0 id: check-tag with: tag: ${{ steps.get-version.outputs.version }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Tag if: steps.check-tag.outputs.exists == 'false' uses: negz/create-tag@v1 with: version: ${{ steps.get-version.outputs.version }} token: ${{ secrets.GITHUB_TOKEN }} - name: Generate Changelog id: generate-changelog uses: loopwerk/tag-changelog@v1 with: token: ${{ secrets.GITHUB_TOKEN }} config_file: .github/tag-changelog-config.js - name: PyPI Deployment uses: casperdcl/deploy-pypi@v2 with: # PyPI username user: ${{ secrets.PYPI_USER }} # PyPI password or API token password: ${{ secrets.PYPI_PASS }} # `setup.py` command to run ("true" is a shortcut for "clean sdist -d bdist_wheel -d ") build: clean sdist -d dist/ # `pip` command to run ("true" is a shortcut for "wheel -w --no-deps .") pip: true - name: Github Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ steps.get-version.outputs.version }} body: ${{ steps.generate-changelog.outputs.changes }} files: dist/* dpath-python-2.2.0/.github/workflows/tests.yml000066400000000000000000000042741463241605500214310ustar00rootroot00000000000000name: Run tests # Controls when the workflow will run on: # Triggers the workflow on push or pull request events but only for important files push: branches: - master paths: - "dpath/" - "**.py" - "tox.ini" pull_request: paths: - "dpath/" - "**.py" - "tox.ini" # Allows you to run this workflow manually from the Actions tab workflow_dispatch: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: # Run flake8 linter flake8: runs-on: ubuntu-latest steps: - name: Check out code uses: actions/checkout@main - name: Set up Python 3.12 uses: actions/setup-python@main with: python-version: "3.12" - name: Setup flake8 annotations uses: TrueBrain/actions-flake8@v2.3 with: path: setup.py dpath/ tests/ # Generate a common hashseed for all tests generate-hashseed: runs-on: ubuntu-latest outputs: hashseed: ${{ steps.generate.outputs.hashseed }} steps: - name: Generate Hashseed id: generate run: | python -c "import os from random import randint hashseed = randint(0, 4294967295) print(f'{hashseed=}') open(os.environ['GITHUB_OUTPUT'], 'a').write(f'hashseed={hashseed}')" # Tests job tests: # The type of runner that the job will run on runs-on: ubuntu-latest needs: [generate-hashseed, flake8] strategy: matrix: # Match versions specified in tox.ini python-version: ['3.8', '3.9', '3.10', '3.11', 'pypy-3.7', '3.12'] # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Check out code uses: actions/checkout@main - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@main with: python-version: ${{ matrix.python-version }} - name: Run tox with tox-gh-actions uses: ymyzk/run-tox-gh-actions@main with: tox-args: -vv --hashseed=${{ needs.generate-hashseed.outputs.hashseed }} dpath-python-2.2.0/.gitignore000066400000000000000000000001541463241605500161300ustar00rootroot00000000000000/MANIFEST /.tox /build /env .hypothesis *.pyc .vscode venv_39 .idea/ dpath.egg-info/ dist/ tests/.hypothesisdpath-python-2.2.0/LICENSE.txt000066400000000000000000000021621463241605500157640ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2013 Andrew Kesterson , Caleb Case Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dpath-python-2.2.0/MAINTAINERS.md000066400000000000000000000147011463241605500162370ustar00rootroot00000000000000Who Maintains DPATH =================== dpath was created by and originally maintained by Andrew Kesterson and Caleb Case . In July of 2020 they put out a call for new maintainers. [@bigsablept](https://github.com/bigsablept) and [@moomoohk](https://github.com/moomoohk) stepped up to become the new maintainers. There are several individuals in the community who have taken an active role in helping to maintain the project and submit fixes. Those individuals are shown in the git changelog. Where and How do we communicate =============================== The dpath maintainers communicate in 3 primary ways: 1. Email, directly to each other. 2. Github via issue and pull request comments 3. A monthly maintainers meeting via Zoom The remainder of this document is subject to change after further discussion among the new maintainers. What is the roadmap =================== dpath has 3 major series: 1.x, 2.x, and 3.x. 1.x is the original dpath release from way way back. It has a util library with a C-like calling convention, lots of assumptions about how it would be used (it was built originally to solve a somewhat narrow use case), and very bad unicode support. 2.x is a transitional branch that intends to fix the unicode support and to introduce some newer concepts (such as the segments library) while still being backwards compatible with 1.x. 3.x is a total reconstruction of the library that does not guarantee backwards compatibility with 1.x. Finding and Prioritizing Work ============================= There are GitHub project boards which show the work to be done for a given series: https://github.com/akesterson/dpath-python/projects/ Each series has a board with 4 columns: * Backlog. New work for this series appears here. * To Do. This column represents work that has been prioritized and someone has agreed to do the work when they have an available time slot. Each maintainer should never have more than 1 or 2 things in To Do. * In Progress. Maintainers are actively working on these issues. * Done. These issues have been recently completed. Work is prioritized depending on: 1. The type of work. Bugs almost always get worked before features. 2. The versions impacted by the work. Versions which are already in use get worked first (so 1.x before 2.x before 3.x etc) 3. The relative importance/usefulness of the work. "Really useful" tends to get worked before "nice to have". 4. The amount of time to complete the work. Quick issues tend to get worked sooner than issues that will take a long time to resolve. There is no specific SLA around dpath, for features or bugs. However, generally speaking: * All issues get triaged within 1 calendar month * High priority bugs get addressed on the monthly maintainers call * Very severe bugs are often fixed out of cycle in less than 30 days Note that we have not always had anything remotely resembling a rigorous process around this, so there are some bugs that have lingered for several years. This is not something we intend to repeat. Taking and Completing Work ========================== Anyone who wants to is welcome to submit a pull request against a given issue. You do not need any special maintainer permissions to say "hey, I know how to solve that, let me send up a PR". The more complete process goes: 1. Decide what issue(s) you will be working on 2. On the Projects tab on Github, move those items to the To Do column on the appropriate board 3. For the item you are ACTIVELY WORKING, move that item to "In Progress" 4. Create a fork of dpath-python, and name your branch for the work. We name bugfixes as "bugfix/ISSUENUMBER_shortname"; features are named "feature/ISSUENUMBER_shortname". 5. Complete and push your work on your fork. Use tox to test your work against the test suites. Features MUST ship with at least one new unit test that covers the new functionality. Bugfixes MUST ship with one new test (or an updated old test) that guards against regression. 6. Send your pull request 7. If accepted, the maintainers will merge your pull request and close the issue. Branching Strategy ================== We run a clean bleeding edge master. Long term support for major version numbers are broken out into version branches. * master : Current 3.x (bleeding edge) development * version/1.x : 1.x series bugfixes * version/2.x : 2.x series features and bugfixes We name bugfixes as "bugfix/ISSUENUMBER_shortname"; features are named "feature/ISSUENUMBER_shortname". All branches representing work against an issue must have the issue number in the branch name. Cutting a New Release ===================== Releases for dpath occur automatically from Github Actions based on version changes on the master branch. Due to legacy reasons older tag names do not follow a uniform format: akesterson@akesterson:~/dpath-python$ git tag 1.0-0 1.1 1.2-66 1.2-68 1.2-70 build,1.2,70 build,1.2,71 build,1.2,72 build,1.3,0 build,1.3,1 build,1.3,2 build,1.3,3 build,1.4,0 build,1.4,1 build,1.4,3 build,1.5,0 build,2.0,0 Moving forward version numbers and tag names will be identical and follow the standard semver format. The version string is stored in `dpath/version.py` and tag names/release versions are generated using this string. akesterson@akesterson:~/dpath-python$ cat dpath/version.py VERSION = "2.0.0" To cut a new release, follow this procedure: 1. Commit a new `dpath/version.py` on the appropriate branch with the format "MAJOR.MINOR.RELEASE". 2. Github Actions SHOULD push the new release to PyPI on merge to `master`. See `.github/workflows/deploy.yml` for more information. If the Github workflow fails to update pypi, follow the instructions on manually creating a release, here: https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives Deployment CI was previously implemented using [Travis CI](https://travis-ci.org/github/akesterson/dpath-python). Running Tests ============= Tests are managed using [tox](https://tox.readthedocs.io/en/latest/). Environment creation and dependency installation is managed by this tool, all one has to do is install it with `pip` and run `tox` in this repo's root directory. Tests can also be run with Github Actions via the [tests.yml](https://github.com/dpath-maintainers/dpath-python/actions/workflows/tests.yml) workflow. This workflow will run automatically on pretty much any commit to any branch of this repo but manual runs are also available. dpath-python-2.2.0/MANIFEST.in000066400000000000000000000001231463241605500156720ustar00rootroot00000000000000include LICENSE.txt include README.md include README.rst recursive-include tests * dpath-python-2.2.0/README.rst000066400000000000000000000345301463241605500156340ustar00rootroot00000000000000dpath-python ============ |PyPI| |Python Version| |Build Status| |Gitter| A python library for accessing and searching dictionaries via /slashed/paths ala xpath Basically it lets you glob over a dictionary as if it were a filesystem. It allows you to specify globs (ala the bash eglob syntax, through some advanced fnmatch.fnmatch magic) to access dictionary elements, and provides some facility for filtering those results. sdists are available on pypi: http://pypi.python.org/pypi/dpath Installing ========== The best way to install dpath is via easy\_install or pip. :: easy_install dpath pip install dpath Using Dpath =========== .. code-block:: python import dpath Separators ========== All of the functions in this library (except 'merge') accept a 'separator' argument, which is the character that should separate path components. The default is '/', but you can set it to whatever you want. Searching ========= Suppose we have a dictionary like this: .. code-block:: python x = { "a": { "b": { "3": 2, "43": 30, "c": [], "d": ['red', 'buggy', 'bumpers'], } } } ... And we want to ask a simple question, like "Get me the value of the key '43' in the 'b' hash which is in the 'a' hash". That's easy. .. code-block:: pycon >>> help(dpath.get) Help on function get in module dpath: get(obj, glob, separator='/') Given an object which contains only one possible match for the given glob, return the value for the leaf matching the given glob. If more than one leaf matches the glob, ValueError is raised. If the glob is not found, KeyError is raised. >>> dpath.get(x, '/a/b/43') 30 Or you could say "Give me a new dictionary with the values of all elements in ``x['a']['b']`` where the key is equal to the glob ``'[cd]'``. Okay. .. code-block:: pycon >>> help(dpath.search) Help on function search in module dpath: search(obj, glob, yielded=False) Given a path glob, return a dictionary containing all keys that matched the given glob. If 'yielded' is true, then a dictionary will not be returned. Instead tuples will be yielded in the form of (path, value) for every element in the document that matched the glob. ... Sounds easy! .. code-block:: pycon >>> result = dpath.search(x, "a/b/[cd]") >>> print(json.dumps(result, indent=4, sort_keys=True)) { "a": { "b": { "c": [], "d": [ "red", "buggy", "bumpers" ] } } } ... Wow that was easy. What if I want to iterate over the results, and not get a merged view? .. code-block:: pycon >>> for x in dpath.search(x, "a/b/[cd]", yielded=True): print(x) ... ('a/b/c', []) ('a/b/d', ['red', 'buggy', 'bumpers']) ... Or what if I want to just get all the values back for the glob? I don't care about the paths they were found at: .. code-block:: pycon >>> help(dpath.values) Help on function values in module dpath: values(obj, glob, separator='/', afilter=None, dirs=True) Given an object and a path glob, return an array of all values which match the glob. The arguments to this function are identical to those of search(), and it is primarily a shorthand for a list comprehension over a yielded search call. >>> dpath.values(x, '/a/b/d/*') ['red', 'buggy', 'bumpers'] Example: Setting existing keys ============================== Let's use that same dictionary, and set keys like 'a/b/[cd]' to the value 'Waffles'. .. code-block:: pycon >>> help(dpath.set) Help on function set in module dpath: set(obj, glob, value) Given a path glob, set all existing elements in the document to the given value. Returns the number of elements changed. >>> dpath.set(x, 'a/b/[cd]', 'Waffles') 2 >>> print(json.dumps(x, indent=4, sort_keys=True)) { "a": { "b": { "3": 2, "43": 30, "c": "Waffles", "d": "Waffles" } } } Example: Adding new keys ======================== Let's make a new key with the path 'a/b/e/f/g', set it to "Roffle". This behaves like 'mkdir -p' in that it makes all the intermediate paths necessary to get to the terminus. .. code-block:: pycon >>> help(dpath.new) Help on function new in module dpath: new(obj, path, value) Set the element at the terminus of path to value, and create it if it does not exist (as opposed to 'set' that can only change existing keys). path will NOT be treated like a glob. If it has globbing characters in it, they will become part of the resulting keys >>> dpath.new(x, 'a/b/e/f/g', "Roffle") >>> print(json.dumps(x, indent=4, sort_keys=True)) { "a": { "b": { "3": 2, "43": 30, "c": "Waffles", "d": "Waffles", "e": { "f": { "g": "Roffle" } } } } } This works the way we expect with lists, as well. If you have a list object and set index 10 of that list object, it will grow the list object with None entries in order to make it big enough: .. code-block:: pycon >>> dpath.new(x, 'a/b/e/f/h', []) >>> dpath.new(x, 'a/b/e/f/h/13', 'Wow this is a big array, it sure is lonely in here by myself') >>> print(json.dumps(x, indent=4, sort_keys=True)) { "a": { "b": { "3": 2, "43": 30, "c": "Waffles", "d": "Waffles", "e": { "f": { "g": "Roffle", "h": [ null, null, null, null, null, null, null, null, null, null, null, null, null, "Wow this is a big array, it sure is lonely in here by myself" ] } } } } } Handy! Example: Deleting Existing Keys =============================== To delete keys in an object, use dpath.delete, which accepts the same globbing syntax as the other methods. .. code-block:: pycon >>> help(dpath.delete) delete(obj, glob, separator='/', afilter=None): Given a path glob, delete all elements that match the glob. Returns the number of deleted objects. Raises PathNotFound if no paths are found to delete. Example: Merging ================ Also, check out dpath.merge. The python dict update() method is great and all but doesn't handle merging dictionaries deeply. This one does. .. code-block:: pycon >>> help(dpath.merge) Help on function merge in module dpath: merge(dst, src, afilter=None, flags=4, _path='') Merge source into destination. Like dict.update() but performs deep merging. flags is an OR'ed combination of MergeType enum members. * ADDITIVE : List objects are combined onto one long list (NOT a set). This is the default flag. * REPLACE : Instead of combining list objects, when 2 list objects are at an equal depth of merge, replace the destination with the source. * TYPESAFE : When 2 keys at equal levels are of different types, raise a TypeError exception. By default, the source replaces the destination in this situation. >>> y = {'a': {'b': { 'e': {'f': {'h': [None, 0, 1, None, 13, 14]}}}, 'c': 'RoffleWaffles'}} >>> print(json.dumps(y, indent=4, sort_keys=True)) { "a": { "b": { "e": { "f": { "h": [ null, 0, 1, null, 13, 14 ] } } }, "c": "RoffleWaffles" } } >>> dpath.merge(x, y) >>> print(json.dumps(x, indent=4, sort_keys=True)) { "a": { "b": { "3": 2, "43": 30, "c": "Waffles", "d": "Waffles", "e": { "f": { "g": "Roffle", "h": [ null, 0, 1, null, 13, 14, null, null, null, null, null, null, null, "Wow this is a big array, it sure is lonely in here by myself" ] } } }, "c": "RoffleWaffles" } } Now that's handy. You shouldn't try to use this as a replacement for the deepcopy method, however - while merge does create new dict and list objects inside the target, the terminus objects (strings and ints) are not copied, they are just re-referenced in the merged object. Filtering ========= All of the methods in this library (except new()) support a 'afilter' argument. This can be set to a function that will return True or False to say 'yes include that value in my result set' or 'no don't include it'. Filtering functions receive every terminus node in a search - e.g., anything that is not a dict or a list, at the very end of the path. For each value, they return True to include that value in the result set, or False to exclude it. Consider this example. Given the source dictionary, we want to find ALL keys inside it, but we only really want the ones that contain "ffle" in them: .. code-block:: pycon >>> print(json.dumps(x, indent=4, sort_keys=True)) { "a": { "b": { "3": 2, "43": 30, "c": "Waffles", "d": "Waffles", "e": { "f": { "g": "Roffle" } } } } } >>> def afilter(x): ... if "ffle" in str(x): ... return True ... return False ... >>> result = dpath.search(x, '**', afilter=afilter) >>> print(json.dumps(result, indent=4, sort_keys=True)) { "a": { "b": { "c": "Waffles", "d": "Waffles", "e": { "f": { "g": "Roffle" } } } } } Obviously filtering functions can perform more advanced tests (regular expressions, etc etc). Key Names ========= By default, dpath only understands dictionary keys that are integers or strings. String keys must be non-empty. You can change this behavior by setting a library-wide dpath option: .. code-block:: python import dpath.options dpath.options.ALLOW_EMPTY_STRING_KEYS = True Again, by default, this behavior is OFF, and empty string keys will result in ``dpath.exceptions.InvalidKeyName`` being thrown. Separator got you down? Use lists as paths ========================================== The default behavior in dpath is to assume that the path given is a string, which must be tokenized by splitting at the separator to yield a distinct set of path components against which dictionary keys can be individually glob tested. However, this presents a problem when you want to use paths that have a separator in their name; the tokenizer cannot properly understand what you mean by '/a/b/c' if it is possible for '/' to exist as a valid character in a key name. To get around this, you can sidestep the whole "filesystem path" style, and abandon the separator entirely, by using lists as paths. All of the methods in dpath.* support the use of a list instead of a string as a path. So for example: .. code-block:: python >>> x = { 'a': {'b/c': 0}} >>> dpath.get(['a', 'b/c']) 0 dpath.segments : The Low-Level Backend ====================================== dpath is where you want to spend your time: this library has the friendly functions that will understand simple string globs, afilter functions, etc. dpath.segments is the backend pathing library. It passes around tuples of path components instead of string globs. .. |PyPI| image:: https://img.shields.io/pypi/v/dpath.svg?style=flat :target: https://pypi.python.org/pypi/dpath/ :alt: PyPI: Latest Version .. |Python Version| image:: https://img.shields.io/pypi/pyversions/dpath?style=flat :target: https://pypi.python.org/pypi/dpath/ :alt: Supported Python Version .. |Build Status| image:: https://github.com/dpath-maintainers/dpath-python/actions/workflows/tests.yml/badge.svg :target: https://github.com/dpath-maintainers/dpath-python/actions/workflows/tests.yml .. |Gitter| image:: https://badges.gitter.im/dpath-python/chat.svg :target: https://gitter.im/dpath-python/chat?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge :alt: Gitter Contributors ============ We would like to thank the community for their interest and involvement. You have all made this project significantly better than the sum of its parts, and your continued feedback makes it better every day. Thank you so much! The following authors have contributed to this project, in varying capacities: + Caleb Case + Andrew Kesterson + Marc Abramowitz + Richard Han + Stanislav Ochotnicky + Misja Hoebe + Gagandeep Singh + Alan Gibson And many others! If we've missed you please open an PR and add your name here. dpath-python-2.2.0/dpath/000077500000000000000000000000001463241605500152405ustar00rootroot00000000000000dpath-python-2.2.0/dpath/__init__.py000066400000000000000000000277061463241605500173650ustar00rootroot00000000000000# Needed for pre-3.10 versions from __future__ import annotations __all__ = [ "new", "delete", "set", "get", "values", "search", "merge", "exceptions", "options", "segments", "types", "version", "MergeType", "PathSegment", "Filter", "Glob", "Path", "Hints", "Creator", ] from collections.abc import MutableMapping, MutableSequence from typing import Union, List, Any, Callable, Optional from dpath import segments, options from dpath.exceptions import InvalidKeyName, PathNotFound from dpath.types import MergeType, PathSegment, Creator, Filter, Glob, Path, Hints _DEFAULT_SENTINEL = object() def _split_path(path: Path, separator: Optional[str] = "/") -> Union[List[PathSegment], PathSegment]: """ Given a path and separator, return a tuple of segments. If path is already a non-leaf thing, return it. Note that a string path with the separator at index[0] will have the separator stripped off. If you pass a list path, the separator is ignored, and is assumed to be part of each key glob. It will not be stripped. """ if not segments.leaf(path): split_segments = path else: split_segments = path.lstrip(separator).split(separator) return split_segments def new(obj: MutableMapping, path: Path, value, separator="/", creator: Creator | None = None) -> MutableMapping: """ Set the element at the terminus of path to value, and create it if it does not exist (as opposed to 'set' that can only change existing keys). path will NOT be treated like a glob. If it has globbing characters in it, they will become part of the resulting keys creator allows you to pass in a creator method that is responsible for creating missing keys at arbitrary levels of the path (see the help for dpath.path.set) """ split_segments = _split_path(path, separator) if creator: return segments.set(obj, split_segments, value, creator=creator) return segments.set(obj, split_segments, value) def delete(obj: MutableMapping, glob: Glob, separator="/", afilter: Filter | None = None) -> int: """ Given a obj, delete all elements that match the glob. Returns the number of deleted objects. Raises PathNotFound if no paths are found to delete. """ globlist = _split_path(glob, separator) def f(obj, pair, counter): (path_segments, value) = pair # Skip segments if they no longer exist in obj. if not segments.has(obj, path_segments): return matched = segments.match(path_segments, globlist) selected = afilter and segments.leaf(value) and afilter(value) if (matched and not afilter) or selected: key = path_segments[-1] parent = segments.get(obj, path_segments[:-1]) # Deletion behavior depends on parent type if isinstance(parent, MutableMapping): del parent[key] else: # Handle sequence types # TODO: Consider cases where type isn't a simple list (e.g. set) if len(parent) - 1 == key: # Removing the last element of a sequence. It can be # truly removed without affecting the ordering of # remaining items. # # Note: In order to achieve proper behavior we are # relying on the reverse iteration of # non-dictionaries from segments.kvs(). # Otherwise we'd be unable to delete all the tails # of a list and end up with None values when we # don't need them. del parent[key] else: # This key can't be removed completely because it # would affect the order of items that remain in our # result. parent[key] = None counter[0] += 1 [deleted] = segments.foldm(obj, f, [0]) if not deleted: raise PathNotFound(f"Could not find {glob} to delete it") return deleted def set(obj: MutableMapping, glob: Glob, value, separator="/", afilter: Filter | None = None) -> int: """ Given a path glob, set all existing elements in the document to the given value. Returns the number of elements changed. """ globlist = _split_path(glob, separator) def f(obj, pair, counter): (path_segments, found) = pair # Skip segments if they no longer exist in obj. if not segments.has(obj, path_segments): return matched = segments.match(path_segments, globlist) selected = afilter and segments.leaf(found) and afilter(found) if (matched and not afilter) or (matched and selected): segments.set(obj, path_segments, value, creator=None) counter[0] += 1 [changed] = segments.foldm(obj, f, [0]) return changed def get( obj: MutableMapping, glob: Glob, separator="/", default: Any = _DEFAULT_SENTINEL ) -> Union[MutableMapping, object, Callable]: """ Given an object which contains only one possible match for the given glob, return the value for the leaf matching the given glob. If the glob is not found and a default is provided, the default is returned. If more than one leaf matches the glob, ValueError is raised. If the glob is not found and a default is not provided, KeyError is raised. """ if isinstance(glob, str) and glob == "/" or len(glob) == 0: return obj globlist = _split_path(glob, separator) def f(_, pair, results): (path_segments, found) = pair if segments.match(path_segments, globlist): results.append(found) if len(results) > 1: return False results = segments.fold(obj, f, []) if len(results) == 0: if default is not _DEFAULT_SENTINEL: return default raise KeyError(glob) elif len(results) > 1: raise ValueError(f"dpath.get() globs must match only one leaf: {glob}") return results[0] def values(obj: MutableMapping, glob: Glob, separator="/", afilter: Filter | None = None, dirs=True): """ Given an object and a path glob, return an array of all values which match the glob. The arguments to this function are identical to those of search(). """ yielded = True return [v for p, v in search(obj, glob, yielded, separator, afilter, dirs)] def search(obj: MutableMapping, glob: Glob, yielded=False, separator="/", afilter: Filter | None = None, dirs=True): """ Given a path glob, return a dictionary containing all keys that matched the given glob. If 'yielded' is true, then a dictionary will not be returned. Instead, tuples will be yielded in the form of (path, value) for every element in the document that matched the glob. """ split_glob = _split_path(glob, separator) def keeper(path, found): """ Generalized test for use in both yielded and folded cases. Returns True if we want this result. Otherwise, returns False. """ if not dirs and not segments.leaf(found): return False matched = segments.match(path, split_glob) selected = afilter and afilter(found) return (matched and not afilter) or (matched and selected) if yielded: def yielder(): for path, found in segments.walk(obj): if keeper(path, found): yield separator.join(map(segments.int_str, path)), found return yielder() else: def f(obj, pair, result): (path, found) = pair if keeper(path, found): segments.set(result, path, found, hints=segments.types(obj, path)) return segments.fold(obj, f, {}) def merge( dst: MutableMapping, src: MutableMapping, separator="/", afilter: Filter | None = None, flags=MergeType.ADDITIVE ): """ Merge source into destination. Like dict.update() but performs deep merging. NOTE: This does not do a deep copy of the source object. Applying merge will result in references to src being present in the dst tree. If you do not want src to potentially be modified by other changes in dst (e.g. more merge calls), then use a deep copy of src. NOTE that merge() does NOT copy objects - it REFERENCES. If you merge take these two dictionaries: >>> a = {'a': [0] } >>> b = {'a': [1] } ... and you merge them into an empty dictionary, like so: >>> d = {} >>> dpath.merge(d, a) >>> dpath.merge(d, b) ... you might be surprised to find that a['a'] now contains [0, 1]. This is because merge() says (d['a'] = a['a']), and thus creates a reference. This reference is then modified when b is merged, causing both d and a to have ['a'][0, 1]. To avoid this, make your own deep copies of source objects that you intend to merge. For further notes see https://github.com/akesterson/dpath-python/issues/58 flags is an OR'ed combination of MergeType enum members. """ filtered_src = search(src, '**', afilter=afilter, separator='/') def are_both_mutable(o1, o2): mapP = isinstance(o1, MutableMapping) and isinstance(o2, MutableMapping) seqP = isinstance(o1, MutableSequence) and isinstance(o2, MutableSequence) if mapP or seqP: return True return False def merger(dst, src, _segments=()): for key, found in segments.make_walkable(src): # Our current path in the source. current_path = _segments + (key,) if len(key) == 0 and not options.ALLOW_EMPTY_STRING_KEYS: raise InvalidKeyName("Empty string keys not allowed without " "dpath.options.ALLOW_EMPTY_STRING_KEYS=True: " f"{current_path}") # Validate src and dst types match. if flags & MergeType.TYPESAFE: if segments.has(dst, current_path): target = segments.get(dst, current_path) tt = type(target) ft = type(found) if tt != ft: path = separator.join(current_path) raise TypeError(f"Cannot merge objects of type {tt} and {ft} at {path}") # Path not present in destination, create it. if not segments.has(dst, current_path): segments.set(dst, current_path, found) continue # Retrieve the value in the destination. target = segments.get(dst, current_path) # If the types don't match, replace it. if type(found) is not type(target) and not are_both_mutable(found, target): segments.set(dst, current_path, found) continue # If target is a leaf, the replace it. if segments.leaf(target): segments.set(dst, current_path, found) continue # At this point we know: # # * The target exists. # * The types match. # * The target isn't a leaf. # # Pretend we have a sequence and account for the flags. try: if flags & MergeType.ADDITIVE: target += found continue if flags & MergeType.REPLACE: try: target[""] except TypeError: segments.set(dst, current_path, found) continue except Exception: raise except Exception: # We have a dictionary like thing and we need to attempt to # recursively merge it. merger(dst, found, current_path) merger(dst, filtered_src) return dst dpath-python-2.2.0/dpath/exceptions.py000066400000000000000000000006611463241605500177760ustar00rootroot00000000000000class InvalidGlob(Exception): """The glob passed is invalid.""" pass class PathNotFound(Exception): """One or more elements of the requested path did not exist in the object""" pass class InvalidKeyName(Exception): """This key contains the separator character or another invalid character""" pass class FilteredValue(Exception): """Unable to return a value, since the filter rejected it""" pass dpath-python-2.2.0/dpath/options.py000066400000000000000000000000401463241605500172770ustar00rootroot00000000000000ALLOW_EMPTY_STRING_KEYS = False dpath-python-2.2.0/dpath/py.typed000066400000000000000000000000001463241605500167250ustar00rootroot00000000000000dpath-python-2.2.0/dpath/segments.py000066400000000000000000000316641463241605500174510ustar00rootroot00000000000000from copy import deepcopy from fnmatch import fnmatchcase from typing import Sequence, Tuple, Iterator, Any, Union, Optional, MutableMapping, MutableSequence from dpath import options from dpath.exceptions import InvalidGlob, InvalidKeyName, PathNotFound from dpath.types import PathSegment, Creator, Hints, Glob, Path, ListIndex def make_walkable(node) -> Iterator[Tuple[PathSegment, Any]]: """ Returns an iterator which yields tuple pairs of (node index, node value), regardless of node type. * For dict nodes `node.items()` will be returned. * For sequence nodes (lists/tuples/etc.) a zip between index number and index value will be returned. * Edge cases will result in an empty iterator being returned. make_walkable(node) -> (generator -> (key, value)) """ try: return iter(node.items()) except AttributeError: try: indices = range(len(node)) # Convert all list indices to objects so negative indices are supported. indices = map(lambda i: ListIndex(i, len(node)), indices) return zip(indices, node) except TypeError: # This can happen in cases where the node isn't leaf(node) == True, # but also isn't actually iterable. Instead of this being an error # we will treat this node as if it has no children. return enumerate([]) def leaf(thing): """ Return True if thing is a leaf, otherwise False. """ leaves = (bytes, str, int, float, bool, type(None)) return isinstance(thing, leaves) def leafy(thing): """ Same as leaf(thing), but also treats empty sequences and dictionaries as True. """ try: return leaf(thing) or len(thing) == 0 except TypeError: # In case thing has no len() return False def walk(obj, location=()): """ Yield all valid (segments, value) pairs (from a breadth-first search, right-to-left on sequences). walk(obj) -> (generator -> (segments, value)) """ if not leaf(obj): for k, v in make_walkable(obj): length = None try: length = len(k) except TypeError: pass if length is not None and length == 0 and not options.ALLOW_EMPTY_STRING_KEYS: raise InvalidKeyName("Empty string keys not allowed without " "dpath.options.ALLOW_EMPTY_STRING_KEYS=True: " f"{location + (k,)}") yield (location + (k,)), v for k, v in make_walkable(obj): for found in walk(v, location + (k,)): yield found def get(obj, segments: Path): """ Return the value at the path indicated by segments. get(obj, segments) -> value """ current = obj for i, segment in enumerate(segments): if leaf(current): raise PathNotFound(f"Path: {segments}[{i}]") if isinstance(current, Sequence) and isinstance(segment, str) and segment.isdecimal(): segment = int(segment) current = current[segment] return current def has(obj, segments): """ Return True if the path exists in the obj. Otherwise return False. has(obj, segments) -> bool """ try: get(obj, segments) return True except: return False def expand(segments): """ Yield a tuple of segments for each possible length of segments. Starting from the shortest length of segments and increasing by 1. expand(keys) -> (..., keys[:-2], keys[:-1]) """ index = 0 for _ in segments: index += 1 yield segments[:index] def types(obj, segments): """ For each segment produce a tuple of (segment, type(value)). types(obj, segments) -> ((segment[0], type0), (segment[1], type1), ...) """ result = [] for depth in expand(segments): result.append((depth[-1], type(get(obj, depth)))) return tuple(result) def leaves(obj): """ Yield all leaves as (segment, value) pairs. leaves(obj) -> (generator -> (segment, value)) """ return filter(lambda p: leafy(p[1]), walk(obj)) def int_str(segment: PathSegment) -> PathSegment: """ If the segment is an integer, return the string conversion. Otherwise return the segment unchanged. The conversion uses 'str'. int_str(segment) -> str """ if isinstance(segment, int): return str(segment) return segment class Star(object): """ Used to create a global STAR symbol for tracking stars added when expanding star-star globs. """ pass STAR = Star() def match(segments: Path, glob: Glob): """ Return True if the segments match the given glob, otherwise False. For the purposes of matching, integers are converted to their string equivalent (via str(segment)). This conversion happens on both the segments and the glob. This implies you cannot (with this function) differentiate a list index 0 from a dictionary key '0'. Star-star segments are a special case in that they will expand to 0 or more star segments and the type will be coerced to match that of the segment. A segment is considered to match a glob if the function fnmatch.fnmatchcase returns True. If fnmatchcase returns False or throws an exception the result will be False. match(segments, glob) -> bool """ segments = tuple(segments) glob = tuple(glob) path_len = len(segments) glob_len = len(glob) # The star-star normalized glob ('**' has been removed). ss_glob = glob if '**' in glob: # Index of the star-star in the glob. ss = glob.index('**') if '**' in glob[ss + 1:]: raise InvalidGlob(f"Invalid glob. Only one '**' is permitted per glob: {glob}") # Convert '**' segment into multiple '*' segments such that the # lengths of the path and glob match. '**' also can collapse and # result in the removal of 1 segment. if path_len >= glob_len: # Path and glob have the same number of stars or the glob # needs more stars (which we add). more_stars = (STAR,) * (path_len - glob_len + 1) ss_glob = glob[:ss] + more_stars + glob[ss + 1:] elif path_len == glob_len - 1: # Glob has one more segment than the path. Here we remove # the '**' segment altogether to match the lengths up. ss_glob = glob[:ss] + glob[ss + 1:] # If we were successful in matching up the lengths, then we can # compare them using fnmatch. if path_len == len(ss_glob): i = zip(segments, ss_glob) for s, g in i: # Match the stars we added to the glob to the type of the # segment itself. if g is STAR: if isinstance(s, bytes): g = b'*' else: g = '*' try: # If search path segment (s) is an int then assume currently evaluated index (g) might be a sequence # index as well. Try converting it to an int. if isinstance(s, int) and s == int(g): continue except: # Will reach this point if g can't be converted to an int (e.g. when g is a RegEx pattern). # In this case convert s to a str so fnmatch can work on it. s = str(s) try: # Let's see if the glob matches. We will turn any kind of # exception while attempting to match into a False for the # match. if not fnmatchcase(s, g): return False except: return False # All of the segments matched so we have a complete match. return True # Otherwise the lengths aren't the same and we couldn't have a # match. return False def extend(thing: MutableSequence, index: int, value=None): """ Extend a sequence like thing such that it contains at least index + 1 many elements. The extension values will be None (default). extend(thing, int) -> [thing..., None, ...] """ try: expansion = type(thing)() # Using this rather than the multiply notation in order to support a # wider variety of sequence like things. extra = (index + 1) - len(thing) for i in range(extra): expansion += [value] thing.extend(expansion) except TypeError: # We attempted to extend something that doesn't support it. In # this case we assume thing is actually more like a dictionary # and doesn't need to be extended. pass return thing def _default_creator( current: Union[MutableMapping, Sequence], segments: Sequence[PathSegment], i: int, hints: Sequence[Tuple[PathSegment, type]] = () ): """ Create missing path components. If the segment is an int, then it will create a list. Otherwise a dictionary is created. set(obj, segments, value) -> obj """ segment = segments[i] length = len(segments) if isinstance(current, Sequence): segment = int(segment) if isinstance(current, MutableSequence): extend(current, segment) # Infer the type from the hints provided. if i < len(hints): current[segment] = hints[i][1]() else: # Peek at the next segment to determine if we should be # creating an array for it to access or dictionary. if i + 1 < length: segment_next = segments[i + 1] else: segment_next = None if isinstance(segment_next, int) or (isinstance(segment_next, str) and segment_next.isdecimal()): current[segment] = [] else: current[segment] = {} def set( obj: MutableMapping, segments: Sequence[PathSegment], value, creator: Optional[Creator] = _default_creator, hints: Hints = () ) -> MutableMapping: """ Set the value in obj at the place indicated by segments. If creator is not None (default _default_creator), then call the creator function to create any missing path components. set(obj, segments, value) -> obj """ current = obj length = len(segments) # For everything except the last value, walk down the path and # create if creator is set. for (i, segment) in enumerate(segments[:-1]): # If segment is non-int but supposed to be a sequence index if isinstance(segment, str) and isinstance(current, Sequence) and segment.isdecimal(): segment = int(segment) try: # Optimistically try to get the next value. This makes the # code agnostic to whether current is a list or a dict. # Unfortunately, for our use, 'x in thing' for lists checks # values, not keys whereas dicts check keys. current[segment] except: if creator is not None: creator(current, segments, i, hints) else: raise current = current[segment] if i != length - 1 and leaf(current): raise PathNotFound(f"Path: {segments}[{i}]") last_segment = segments[-1] # Resolve ambiguity of last segment if isinstance(last_segment, str) and isinstance(current, Sequence) and last_segment.isdecimal(): last_segment = int(last_segment) if isinstance(last_segment, int): extend(current, last_segment) current[last_segment] = value return obj def fold(obj, f, acc): """ Walk obj applying f to each path and returning accumulator acc. The function f will be called, for each result in walk(obj): f(obj, (segments, value), acc) If the function f returns False (exactly False), then processing will stop. Otherwise processing will continue with the next value retrieved from the walk. fold(obj, f(obj, (segments, value), acc) -> bool, acc) -> acc """ for pair in walk(obj): if f(obj, pair, acc) is False: break return acc def foldm(obj, f, acc): """ Same as fold(), but permits mutating obj. This requires all paths in walk(obj) to be loaded into memory (whereas fold does not). foldm(obj, f(obj, (segments, value), acc) -> bool, acc) -> acc """ pairs = tuple(walk(obj)) for pair in pairs: if f(obj, pair, acc) is False: break return acc def view(obj: MutableMapping, glob: Glob): """ Return a view of the object where the glob matches. A view retains the same form as the obj, but is limited to only the paths that matched. Views are new objects (a deepcopy of the matching values). view(obj, glob) -> obj' """ def f(obj, pair, result): (segments, value) = pair if match(segments, glob): if not has(result, segments): set(result, segments, deepcopy(value), hints=types(obj, segments)) return fold(obj, f, type(obj)()) dpath-python-2.2.0/dpath/types.py000066400000000000000000000046121463241605500167610ustar00rootroot00000000000000from enum import IntFlag, auto from typing import Union, Any, Callable, Sequence, Tuple, List, Optional, MutableMapping class ListIndex(int): """Same as a normal int but mimics the behavior of list indices (can be compared to a negative number).""" def __new__(cls, value: int, list_length: int, *args, **kwargs): if value >= list_length: raise TypeError( f"Tried to initiate a {cls.__name__} with a value ({value}) " f"greater than the provided max value ({list_length})" ) obj = super().__new__(cls, value) obj.list_length = list_length return obj def __eq__(self, other): if not isinstance(other, int): return False # Based on how Python sequences handle negative indices as described in footnote (3) of https://docs.python.org/3/library/stdtypes.html#common-sequence-operations return other == int(self) or self.list_length + other == int(self) def __repr__(self): return f"<{self.__class__.__name__} {int(self)}/{self.list_length}>" def __str__(self): return str(int(self)) class MergeType(IntFlag): ADDITIVE = auto() """List objects are combined onto one long list (NOT a set). This is the default flag.""" REPLACE = auto() """Instead of combining list objects, when 2 list objects are at an equal depth of merge, replace the destination \ with the source.""" TYPESAFE = auto() """When 2 keys at equal levels are of different types, raise a TypeError exception. By default, the source \ replaces the destination in this situation.""" PathSegment = Union[int, str, bytes] """Type alias for dict path segments where integers are explicitly casted.""" Filter = Callable[[Any], bool] """Type alias for filter functions. (Any) -> bool""" Glob = Union[str, Sequence[str]] """Type alias for glob parameters.""" Path = Union[str, Sequence[PathSegment]] """Type alias for path parameters.""" Hints = Sequence[Tuple[PathSegment, type]] """Type alias for creator function hint sequences.""" Creator = Callable[[Union[MutableMapping, List], Path, int, Optional[Hints]], None] """Type alias for creator functions. Example creator function signature: def creator( current: Union[MutableMapping, List], segments: Sequence[PathSegment], i: int, hints: Sequence[Tuple[PathSegment, type]] = () )""" dpath-python-2.2.0/dpath/util.py000066400000000000000000000025331463241605500165720ustar00rootroot00000000000000import warnings import dpath from dpath import _DEFAULT_SENTINEL from dpath.types import MergeType def deprecated(func): message = \ "The dpath.util package is being deprecated. All util functions have been moved to dpath package top level." def wrapper(*args, **kwargs): warnings.warn(message, DeprecationWarning, stacklevel=2) return func(*args, **kwargs) return wrapper @deprecated def new(obj, path, value, separator="/", creator=None): return dpath.new(obj, path, value, separator, creator) @deprecated def delete(obj, glob, separator="/", afilter=None): return dpath.delete(obj, glob, separator, afilter) @deprecated def set(obj, glob, value, separator="/", afilter=None): return dpath.set(obj, glob, value, separator, afilter) @deprecated def get(obj, glob, separator="/", default=_DEFAULT_SENTINEL): return dpath.get(obj, glob, separator, default) @deprecated def values(obj, glob, separator="/", afilter=None, dirs=True): return dpath.values(obj, glob, separator, afilter, dirs) @deprecated def search(obj, glob, yielded=False, separator="/", afilter=None, dirs=True): return dpath.search(obj, glob, yielded, separator, afilter, dirs) @deprecated def merge(dst, src, separator="/", afilter=None, flags=MergeType.ADDITIVE): return dpath.merge(dst, src, separator, afilter, flags) dpath-python-2.2.0/dpath/version.py000066400000000000000000000000221463241605500172710ustar00rootroot00000000000000VERSION = "2.2.0" dpath-python-2.2.0/flake8.ini000066400000000000000000000000701463241605500160100ustar00rootroot00000000000000[flake8] filename= setup.py, dpath/, tests/ dpath-python-2.2.0/maintainers_log.md000066400000000000000000000011761463241605500176420ustar00rootroot00000000000000# 03/29/2020 Attendees : Caleb, Andrew ## Old business : * Need to onboard new member Vladimir Ulogov * No movement * Need to make project board for 1.5 open bugs * Done ## New business : * Andrew to define maintainers meeting process and establish log of decisions, process for filing open action items * Andrew to forward maintainers invite to Vladimir and include in next monthly maintainers meeting * Andrew to set followup for 1wk from now to check for comments on PRs and cut release version for 1.x / 2.x * Andrew to rename LTS branches from version/1.0 version/2.0 to version/1.x and version/2.x dpath-python-2.2.0/setup.py000066400000000000000000000041051463241605500156520ustar00rootroot00000000000000import os from setuptools import setup import dpath.version long_description = open( os.path.join( os.path.dirname(__file__), 'README.rst' ) ).read() if __name__ == "__main__": setup( name="dpath", url="https://github.com/dpath-maintainers/dpath-python", version=dpath.version.VERSION, description="Filesystem-like pathing and searching for dictionaries", long_description=long_description, author=("Caleb Case, " "Andrew Kesterson"), author_email="calebcase@gmail.com, andrew@aklabs.net", license="MIT", install_requires=[], scripts=[], packages=["dpath"], data_files=[], package_data={"dpath": ["py.typed"]}, # Type hints are great. # Function annotations were added in Python 3.0. # Typing module was added in Python 3.5. # Variable annotations were added in Python 3.6. # Python versions that are >=3.6 are more popular. # (Source: https://github.com/hugovk/pypi-tools/blob/master/README.md) # # Conclusion: In order to accommodate type hinting support must be limited to Python versions >=3.6. # 3.6 was dropped because of EOL and this issue: https://github.com/actions/setup-python/issues/544 python_requires=">=3.7", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Topic :: Software Development :: Libraries :: Python Modules', 'Typing :: Typed', ], ) dpath-python-2.2.0/tests/000077500000000000000000000000001463241605500153025ustar00rootroot00000000000000dpath-python-2.2.0/tests/__init__.py000066400000000000000000000001051463241605500174070ustar00rootroot00000000000000import warnings warnings.simplefilter("always", DeprecationWarning) dpath-python-2.2.0/tests/test_broken_afilter.py000066400000000000000000000030511463241605500217000ustar00rootroot00000000000000import dpath import sys def test_broken_afilter(): def afilter(x): if x in [1, 2]: return True return False dict = { "a": { "view_failure": "a", "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } paths = [ 'a/b/c/e', 'a/b/c/f', ] for (path, value) in dpath.search(dict, '/**', yielded=True, afilter=afilter): assert path in paths assert "view_failure" not in dpath.search(dict, '/**', afilter=afilter)['a'] assert "d" not in dpath.search(dict, '/**', afilter=afilter)['a']['b']['c'] for (path, value) in dpath.search(dict, ['**'], yielded=True, afilter=afilter): assert path in paths assert "view_failure" not in dpath.search(dict, ['**'], afilter=afilter)['a'] assert "d" not in dpath.search(dict, ['**'], afilter=afilter)['a']['b']['c'] def filter(x): sys.stderr.write(str(x)) if hasattr(x, 'get'): return x.get('type', None) == 'correct' return False a = { 'actions': [ { 'type': 'correct' }, { 'type': 'incorrect' }, ], } results = [[x[0], x[1]] for x in dpath.search(a, 'actions/*', yielded=True)] results = [[x[0], x[1]] for x in dpath.search(a, 'actions/*', afilter=filter, yielded=True)] assert len(results) == 1 assert results[0][1]['type'] == 'correct' dpath-python-2.2.0/tests/test_delete.py000066400000000000000000000016631463241605500201630ustar00rootroot00000000000000from nose2.tools.such import helper import dpath import dpath.exceptions def test_delete_separator(): dict = { "a": { "b": 0, }, } dpath.delete(dict, ';a;b', separator=";") assert 'b' not in dict['a'] def test_delete_existing(): dict = { "a": { "b": 0, }, } dpath.delete(dict, '/a/b') assert 'b' not in dict['a'] def test_delete_missing(): dict = { "a": { }, } with helper.assertRaises(dpath.exceptions.PathNotFound): dpath.delete(dict, '/a/b') def test_delete_filter(): def afilter(x): if int(x) == 31: return True return False dict = { "a": { "b": 0, "c": 1, "d": 31, }, } dpath.delete(dict, '/a/*', afilter=afilter) assert dict['a']['b'] == 0 assert dict['a']['c'] == 1 assert 'd' not in dict['a'] dpath-python-2.2.0/tests/test_get_values.py000066400000000000000000000116101463241605500210500ustar00rootroot00000000000000import datetime import decimal import time from unittest import mock from nose2.tools.such import helper import dpath def test_util_get_root(): x = {'p': {'a': {'t': {'h': 'value'}}}} ret = dpath.get(x, '/p/a/t/h') assert ret == 'value' ret = dpath.get(x, '/') assert ret == x ret = dpath.get(x, []) assert ret == x def test_get_explicit_single(): ehash = { "a": { "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } assert dpath.get(ehash, '/a/b/c/f') == 2 assert dpath.get(ehash, ['a', 'b', 'c', 'f']) == 2 assert dpath.get(ehash, ['a', 'b', 'c', 'f'], default=5) == 2 assert dpath.get(ehash, ['does', 'not', 'exist'], default=None) is None assert dpath.get(ehash, ['doesnt', 'exist'], default=5) == 5 def test_get_glob_single(): ehash = { "a": { "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } assert dpath.get(ehash, '/a/b/*/f') == 2 assert dpath.get(ehash, ['a', 'b', '*', 'f']) == 2 assert dpath.get(ehash, ['a', 'b', '*', 'f'], default=5) == 2 assert dpath.get(ehash, ['doesnt', '*', 'exist'], default=6) == 6 def test_get_glob_multiple(): ehash = { "a": { "b": { "c": { "d": 0, }, "e": { "d": 0, }, }, }, } helper.assertRaises(ValueError, dpath.get, ehash, '/a/b/*/d') helper.assertRaises(ValueError, dpath.get, ehash, ['a', 'b', '*', 'd']) helper.assertRaises(ValueError, dpath.get, ehash, ['a', 'b', '*', 'd'], default=3) def test_get_absent(): ehash = {} helper.assertRaises(KeyError, dpath.get, ehash, '/a/b/c/d/f') helper.assertRaises(KeyError, dpath.get, ehash, ['a', 'b', 'c', 'd', 'f']) def test_values(): ehash = { "a": { "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } ret = dpath.values(ehash, '/a/b/c/*') assert isinstance(ret, list) assert 0 in ret assert 1 in ret assert 2 in ret ret = dpath.values(ehash, ['a', 'b', 'c', '*']) assert isinstance(ret, list) assert 0 in ret assert 1 in ret assert 2 in ret @mock.patch('dpath.search') def test_values_passes_through(searchfunc): searchfunc.return_value = [] def y(): return False dpath.values({}, '/a/b', ':', y, False) searchfunc.assert_called_with({}, '/a/b', True, ':', y, False) dpath.values({}, ['a', 'b'], ':', y, False) searchfunc.assert_called_with({}, ['a', 'b'], True, ':', y, False) def test_none_values(): d = {'p': {'a': {'t': {'h': None}}}} v = dpath.get(d, 'p/a/t/h') assert v is None def test_values_list(): a = { 'actions': [ { 'type': 'correct', }, { 'type': 'incorrect', }, ], } ret = dpath.values(a, 'actions/*') assert isinstance(ret, list) assert len(ret) == 2 def test_non_leaf_leaf(): # The leaves in this test aren't leaf(thing) == True, but we should still # be able to get them. They should also not prevent fetching other values. def func(x): return x testdict = { 'a': func, 'b': lambda x: x, 'c': [ { 'a', 'b', }, ], 'd': [ decimal.Decimal(1.5), decimal.Decimal(2.25), ], 'e': datetime.datetime(2020, 1, 1), 'f': { 'config': 'something', }, } # It should be possible to get the callables: assert dpath.get(testdict, 'a') == func assert dpath.get(testdict, 'b')(42) == 42 # It should be possible to get other values: assert dpath.get(testdict, 'c/0') == testdict['c'][0] assert dpath.get(testdict, 'd')[0] == testdict['d'][0] assert dpath.get(testdict, 'd/0') == testdict['d'][0] assert dpath.get(testdict, 'd/1') == testdict['d'][1] assert dpath.get(testdict, 'e') == testdict['e'] # Values should also still work: assert dpath.values(testdict, 'f/config') == ['something'] # Data classes should also be retrievable: try: import dataclasses except: return @dataclasses.dataclass class Connection: group_name: str channel_name: str last_seen: float testdict['g'] = { 'my-key': Connection( group_name='foo', channel_name='bar', last_seen=time.time(), ), } assert dpath.search(testdict, 'g/my*')['g']['my-key'] == testdict['g']['my-key'] dpath-python-2.2.0/tests/test_merge.py000066400000000000000000000100561463241605500200140ustar00rootroot00000000000000import copy from nose2.tools.such import helper import dpath from dpath import MergeType def test_merge_typesafe_and_separator(): src = { "dict": { "integer": 0, }, } dst = { "dict": { "integer": "3", }, } try: dpath.merge(dst, src, flags=(dpath.MergeType.ADDITIVE | dpath.MergeType.TYPESAFE), separator=";") except TypeError as e: assert str(e).endswith("dict;integer") return raise Exception("MERGE_TYPESAFE failed to raise an exception when merging between str and int!") def test_merge_simple_int(): src = { "integer": 0, } dst = { "integer": 3, } dpath.merge(dst, src) assert dst["integer"] == src["integer"], "%r != %r" % (dst["integer"], src["integer"]) def test_merge_simple_string(): src = { "string": "lol I am a string", } dst = { "string": "lol I am a string", } dpath.merge(dst, src) assert dst["string"] == src["string"], "%r != %r" % (dst["string"], src["string"]) def test_merge_simple_list_additive(): src = { "list": [7, 8, 9, 10], } dst = { "list": [0, 1, 2, 3], } dpath.merge(dst, src, flags=MergeType.ADDITIVE) assert dst["list"] == [0, 1, 2, 3, 7, 8, 9, 10], "%r != %r" % (dst["list"], [0, 1, 2, 3, 7, 8, 9, 10]) def test_merge_simple_list_replace(): src = { "list": [7, 8, 9, 10], } dst = { "list": [0, 1, 2, 3], } dpath.merge(dst, src, flags=dpath.MergeType.REPLACE) assert dst["list"] == [7, 8, 9, 10], "%r != %r" % (dst["list"], [7, 8, 9, 10]) def test_merge_simple_dict(): src = { "dict": { "key": "WEHAW", }, } dst = { "dict": { "key": "", }, } dpath.merge(dst, src) assert dst["dict"]["key"] == src["dict"]["key"], "%r != %r" % (dst["dict"]["key"], src["dict"]["key"]) def test_merge_filter(): def afilter(x): if "rubber" not in str(x): return False return True src = { "key": "metal", "key2": "rubber", "otherdict": { "key3": "I shouldn't be here", }, } dst = {} dpath.merge(dst, src, afilter=afilter) assert "key2" in dst assert "key" not in dst assert "otherdict" not in dst def test_merge_typesafe(): src = { "dict": { }, } dst = { "dict": [ ], } helper.assertRaises(TypeError, dpath.merge, dst, src, flags=dpath.MergeType.TYPESAFE) def test_merge_mutables(): class tcid(dict): pass class tcis(list): pass src = { "mm": { "a": "v1", }, "ms": [ 0, ], } dst = { "mm": tcid([ ("a", "v2"), ("casserole", "this should keep"), ]), "ms": tcis(['a', 'b', 'c']), } dpath.merge(dst, src) print(dst) assert dst["mm"]["a"] == src["mm"]["a"] assert dst['ms'][2] == 'c' assert "casserole" in dst["mm"] helper.assertRaises(TypeError, dpath.merge, dst, src, flags=dpath.MergeType.TYPESAFE) def test_merge_replace_1(): dct_a = {"a": {"b": [1, 2, 3]}} dct_b = {"a": {"b": [1]}} dpath.merge(dct_a, dct_b, flags=dpath.MergeType.REPLACE) assert len(dct_a['a']['b']) == 1 def test_merge_replace_2(): d1 = {'a': [0, 1, 2]} d2 = {'a': ['a']} dpath.merge(d1, d2, flags=dpath.MergeType.REPLACE) assert len(d1['a']) == 1 assert d1['a'][0] == 'a' def test_merge_list(): src = {"l": [1]} p1 = {"l": [2], "v": 1} p2 = {"v": 2} dst1 = {} for d in [copy.deepcopy(src), copy.deepcopy(p1)]: dpath.merge(dst1, d) dst2 = {} for d in [copy.deepcopy(src), copy.deepcopy(p2)]: dpath.merge(dst2, d) assert dst1["l"] == [1, 2] assert dst2["l"] == [1] dst1 = {} for d in [src, p1]: dpath.merge(dst1, d) dst2 = {} for d in [src, p2]: dpath.merge(dst2, d) assert dst1["l"] == [1, 2] assert dst2["l"] == [1, 2] dpath-python-2.2.0/tests/test_new.py000066400000000000000000000051641463241605500175120ustar00rootroot00000000000000import dpath def test_set_new_separator(): dict = { "a": { }, } dpath.new(dict, ';a;b', 1, separator=";") assert dict['a']['b'] == 1 dpath.new(dict, ['a', 'b'], 1, separator=";") assert dict['a']['b'] == 1 def test_set_new_dict(): dict = { "a": { }, } dpath.new(dict, '/a/b', 1) assert dict['a']['b'] == 1 dpath.new(dict, ['a', 'b'], 1) assert dict['a']['b'] == 1 def test_set_new_list(): dict = { "a": [ ], } dpath.new(dict, '/a/1', 1) assert dict['a'][1] == 1 assert dict['a'][0] is None dpath.new(dict, ['a', 1], 1) assert dict['a'][1] == 1 assert dict['a'][0] is None def test_set_list_with_dict_int_ambiguity(): d = {"list": [{"root": {"1": {"k": None}}}]} dpath.new(d, "list/0/root/1/k", "new") expected = {"list": [{"root": {"1": {"k": "new"}}}]} assert d == expected def test_int_segment_list_type_check(): d = {} dpath.new(d, "a/b/0/c/0", "hello") assert 'b' in d.get("a", {}) assert isinstance(d["a"]["b"], list) assert len(d["a"]["b"]) == 1 assert 'c' in d["a"]["b"][0] assert isinstance(d["a"]["b"][0]["c"], list) assert len(d["a"]["b"][0]["c"]) == 1 def test_int_segment_dict_type_check(): d = {"a": {"b": {"0": {}}}} dpath.new(d, "a/b/0/c/0", "hello") assert "b" in d.get("a", {}) assert isinstance(d["a"]["b"], dict) assert '0' in d["a"]["b"] assert 'c' in d["a"]["b"]["0"] assert isinstance(d["a"]["b"]["0"]["c"], list) def test_set_new_list_path_with_separator(): # This test kills many birds with one stone, forgive me dict = { "a": { }, } dpath.new(dict, ['a', 'b/c/d', 0], 1) assert len(dict['a']) == 1 assert len(dict['a']['b/c/d']) == 1 assert dict['a']['b/c/d'][0] == 1 def test_set_new_list_integer_path_with_creator(): d = {} def mycreator(obj, pathcomp, nextpathcomp, hints): print(hints) print(pathcomp) print(nextpathcomp) print("...") target = pathcomp[0] if isinstance(obj, list) and (target.isdigit()): target = int(target) if ((nextpathcomp is not None) and (isinstance(nextpathcomp, int) or str(nextpathcomp).isdigit())): obj[target] = [None] * (int(nextpathcomp) + 1) print("Created new list in target") else: print("Created new dict in target") obj[target] = {} print(obj) dpath.new(d, '/a/2', 3, creator=mycreator) print(d) assert isinstance(d['a'], list) assert len(d['a']) == 3 assert d['a'][2] == 3 dpath-python-2.2.0/tests/test_path_get.py000066400000000000000000000006551463241605500205140ustar00rootroot00000000000000import dpath.segments import dpath.exceptions def test_path_get_list_of_dicts(): tdict = { "a": { "b": [ {0: 0}, {0: 1}, {0: 2}, ], }, } segments = ['a', 'b', 0, 0] res = dpath.segments.view(tdict, segments) assert isinstance(res['a']['b'], list) assert len(res['a']['b']) == 1 assert res['a']['b'][0][0] == 0 dpath-python-2.2.0/tests/test_path_paths.py000066400000000000000000000014011463241605500210420ustar00rootroot00000000000000from nose2.tools.such import helper import dpath.segments import dpath.exceptions import dpath.options def test_path_paths_empty_key_disallowed(): tdict = { "Empty": { "": { "Key": "" } } } with helper.assertRaises(dpath.exceptions.InvalidKeyName): for x in dpath.segments.walk(tdict): pass def test_path_paths_empty_key_allowed(): tdict = { "Empty": { "": { "Key": "" } } } segments = [] dpath.options.ALLOW_EMPTY_STRING_KEYS = True for segments, value in dpath.segments.leaves(tdict): pass dpath.options.ALLOW_EMPTY_STRING_KEYS = False assert "/".join(segments) == "Empty//Key" dpath-python-2.2.0/tests/test_paths.py000066400000000000000000000003131463241605500200270ustar00rootroot00000000000000import dpath def test_util_safe_path_list(): res = dpath._split_path(["Ignore", "the/separator"], None) assert len(res) == 2 assert res[0] == "Ignore" assert res[1] == "the/separator" dpath-python-2.2.0/tests/test_search.py000066400000000000000000000127251463241605500201670ustar00rootroot00000000000000import dpath def test_search_paths_with_separator(): dict = { "a": { "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } paths = [ 'a', 'a;b', 'a;b;c', 'a;b;c;d', 'a;b;c;e', 'a;b;c;f', ] for (path, value) in dpath.search(dict, '/**', yielded=True, separator=";"): assert path in paths for (path, value) in dpath.search(dict, ['**'], yielded=True, separator=";"): assert path in paths def test_search_paths(): dict = { "a": { "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } paths = [ 'a', 'a/b', 'a/b/c', 'a/b/c/d', 'a/b/c/e', 'a/b/c/f', ] for (path, value) in dpath.search(dict, '/**', yielded=True): assert path in paths for (path, value) in dpath.search(dict, ['**'], yielded=True): assert path in paths def test_search_afilter(): def afilter(x): if x in [1, 2]: return True return False dict = { "a": { "view_failure": "a", "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } paths = [ 'a/b/c/e', 'a/b/c/f', ] for (path, value) in dpath.search(dict, '/**', yielded=True, afilter=afilter): assert path in paths assert "view_failure" not in dpath.search(dict, '/**', afilter=afilter)['a'] assert "d" not in dpath.search(dict, '/**', afilter=afilter)['a']['b']['c'] for (path, value) in dpath.search(dict, ['**'], yielded=True, afilter=afilter): assert path in paths assert "view_failure" not in dpath.search(dict, ['**'], afilter=afilter)['a'] assert "d" not in dpath.search(dict, ['**'], afilter=afilter)['a']['b']['c'] def test_search_globbing(): dict = { "a": { "b": { "c": { "d": 0, "e": 1, "f": 2, }, }, }, } paths = [ 'a/b/c/d', 'a/b/c/f', ] for (path, value) in dpath.search(dict, '/a/**/[df]', yielded=True): assert path in paths for (path, value) in dpath.search(dict, ['a', '**', '[df]'], yielded=True): assert path in paths def test_search_return_dict_head(): tdict = { "a": { "b": { 0: 0, 1: 1, 2: 2, }, }, } res = dpath.search(tdict, '/a/b') assert isinstance(res['a']['b'], dict) assert len(res['a']['b']) == 3 assert res['a']['b'] == {0: 0, 1: 1, 2: 2} res = dpath.search(tdict, ['a', 'b']) assert isinstance(res['a']['b'], dict) assert len(res['a']['b']) == 3 assert res['a']['b'] == {0: 0, 1: 1, 2: 2} def test_search_return_dict_globbed(): tdict = { "a": { "b": { 0: 0, 1: 1, 2: 2, }, }, } res = dpath.search(tdict, '/a/b/[02]') assert isinstance(res['a']['b'], dict) assert len(res['a']['b']) == 2 assert res['a']['b'] == {0: 0, 2: 2} res = dpath.search(tdict, ['a', 'b', '[02]']) assert isinstance(res['a']['b'], dict) assert len(res['a']['b']) == 2 assert res['a']['b'] == {0: 0, 2: 2} def test_search_return_list_head(): tdict = { "a": { "b": [ 0, 1, 2, ], }, } res = dpath.search(tdict, '/a/b') assert isinstance(res['a']['b'], list) assert len(res['a']['b']) == 3 assert res['a']['b'] == [0, 1, 2] res = dpath.search(tdict, ['a', 'b']) assert isinstance(res['a']['b'], list) assert len(res['a']['b']) == 3 assert res['a']['b'] == [0, 1, 2] def test_search_return_list_globbed(): tdict = { "a": { "b": [ 0, 1, 2, ] } } res = dpath.search(tdict, '/a/b/[02]') assert isinstance(res['a']['b'], list) assert len(res['a']['b']) == 3 assert res['a']['b'] == [0, None, 2] res = dpath.search(tdict, ['a', 'b', '[02]']) assert isinstance(res['a']['b'], list) assert len(res['a']['b']) == 3 assert res['a']['b'] == [0, None, 2] def test_search_list_key_with_separator(): tdict = { "a": { "b": { "d": 'failure', }, "/b/d": 'success', }, } res = dpath.search(tdict, ['a', '/b/d']) assert 'b' not in res['a'] assert res['a']['/b/d'] == 'success' def test_search_multiple_stars(): testdata = { 'a': [ { 'b': [ {'c': 1}, {'c': 2}, {'c': 3}, ], }, ], } testpath = 'a/*/b/*/c' res = dpath.search(testdata, testpath) assert len(res['a'][0]['b']) == 3 assert res['a'][0]['b'][0]['c'] == 1 assert res['a'][0]['b'][1]['c'] == 2 assert res['a'][0]['b'][2]['c'] == 3 def test_search_negative_index(): d = {'a': {'b': [1, 2, 3]}} res = dpath.search(d, 'a/b/-1') assert res == dpath.search(d, "a/b/2") dpath-python-2.2.0/tests/test_segments.py000066400000000000000000000243321463241605500205440ustar00rootroot00000000000000import os from unittest import TestCase import hypothesis.strategies as st from hypothesis import given, assume, settings, HealthCheck import dpath.segments as api from dpath import options settings.register_profile("default", suppress_health_check=(HealthCheck.too_slow,)) settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default')) random_key_int = st.integers(0, 1000) random_key_str = st.binary() | st.text() random_key = random_key_str | random_key_int random_segments = st.lists(random_key) random_leaf = st.integers() | st.floats() | st.booleans() | st.binary() | st.text() | st.none() random_thing = st.recursive( random_leaf, lambda children: st.lists(children) | st.tuples(children) | st.dictionaries(st.binary() | st.text(), children), max_leaves=100 ) random_node = random_thing.filter(lambda thing: isinstance(thing, (list, tuple, dict))) random_mutable_thing = st.recursive( random_leaf, lambda children: st.lists(children) | st.dictionaries(st.binary() | st.text(), children) ) random_mutable_node = random_mutable_thing.filter(lambda thing: isinstance(thing, (list, dict))) @st.composite def mutate(draw, segment): # Convert number segments. segment = api.int_str(segment) # Infer the type constructor for the result. kind = type(segment) # Produce a valid kind conversion for our wildcards. if isinstance(segment, bytes): def to_kind(v): try: return bytes(v, 'utf-8') except: return kind(v) else: def to_kind(v): return kind(v) # Convert to an list of single values. converted = [] for i in range(len(segment)): # This carefully constructed nonsense to get a single value # is necessary to work around limitations in the bytes type # iteration returning integers instead of byte strings of # length 1. c = segment[i:i + 1] # Check for values that need to be escaped. if c in tuple(map(to_kind, ('*', '?', '[', ']'))): c = to_kind('[') + c + to_kind(']') converted.append(c) # Start with a non-mutated result. result = converted # 50/50 chance we will attempt any mutation. change = draw(st.sampled_from((True, False))) if change: result = [] # For every value in segment maybe mutate, maybe not. for c in converted: # If the length isn't 1 then, we know this value is already # an escaped special character. We will not mutate these. if len(c) != 1: result.append(c) else: result.append(draw(st.sampled_from((c, to_kind('?'), to_kind('*'))))) combined = kind().join(result) # If we by chance produce the star-star result, then just revert # back to the original converted segment. This is not the mutation # you are looking for. if combined == to_kind('**'): combined = kind().join(converted) return combined @st.composite def random_segments_with_glob(draw): segments = draw(random_segments) glob = list(map(lambda x: draw(mutate(x)), segments)) # 50/50 chance we will attempt to add a star-star to the glob. use_ss = draw(st.sampled_from((True, False))) if use_ss: # Decide if we are inserting a new segment or replacing a range. insert_ss = draw(st.sampled_from((True, False))) if insert_ss: index = draw(st.integers(0, len(glob))) glob.insert(index, '**') else: start = draw(st.integers(0, len(glob))) stop = draw(st.integers(start, len(glob))) glob[start:stop] = ['**'] return segments, glob @st.composite def random_segments_with_nonmatching_glob(draw): (segments, glob) = draw(random_segments_with_glob()) # Generate a segment that is not in segments. invalid = draw(random_key.filter(lambda x: x not in segments and x not in ('*', '**'))) # Do we just have a star-star glob? It matches everything, so we # need to replace it entirely. if len(glob) == 1 and glob[0] == '**': glob = [invalid] # Do we have a star glob and only one segment? It matches anything # in the segment, so we need to replace it entirely. elif len(glob) == 1 and glob[0] == '*' and len(segments) == 1: glob = [invalid] # Otherwise we can add something we know isn't in the segments to # the glob. else: index = draw(st.integers(0, len(glob))) glob.insert(index, invalid) return (segments, glob) @st.composite def random_walk(draw): node = draw(random_mutable_node) found = tuple(api.walk(node)) assume(len(found) > 0) return (node, draw(st.sampled_from(found))) @st.composite def random_leaves(draw): node = draw(random_mutable_node) found = tuple(api.leaves(node)) assume(len(found) > 0) return (node, draw(st.sampled_from(found))) class TestSegments(TestCase): @classmethod def setUpClass(cls): # Allow empty strings in segments. options.ALLOW_EMPTY_STRING_KEYS = True @classmethod def tearDownClass(cls): # Revert back to default. options.ALLOW_EMPTY_STRING_KEYS = False @given(random_node) def test_kvs(self, node): ''' Given a node, kvs should produce a key that when used to extract from the node renders the exact same value given. ''' for k, v in api.make_walkable(node): assert node[k] is v @given(random_leaf) def test_leaf_with_leaf(self, leaf): ''' Given a leaf, leaf should return True. ''' assert api.leaf(leaf) is True @given(random_node) def test_leaf_with_node(self, node): ''' Given a node, leaf should return False. ''' assert api.leaf(node) is False @given(random_thing) def test_walk(self, thing): ''' Given a thing to walk, walk should yield key, value pairs where key is a tuple of non-zero length. ''' for k, v in api.walk(thing): assert isinstance(k, tuple) assert len(k) > 0 @given(random_node) def test_get(self, node): ''' Given a node, get should return the exact value given a key for all key, value pairs in the node. ''' for k, v in api.walk(node): assert api.get(node, k) is v @given(random_node) def test_has(self, node): ''' Given a node, has should return True for all paths, False otherwise. ''' for k, v in api.walk(node): assert api.has(node, k) is True # If we are at a leaf, then we can create a value that isn't # present easily. if api.leaf(v): assert api.has(node, k + (0,)) is False @given(random_segments) def test_expand(self, segments): ''' Given segments expand should produce as many results are there were segments and the last result should equal the given segments. ''' count = len(segments) result = list(api.expand(segments)) assert count == len(result) if count > 0: assert segments == result[-1] @given(random_node) def test_types(self, node): ''' Given a node, types should yield a tuple of key, type pairs and the type indicated should equal the type of the value. ''' for k, v in api.walk(node): ts = api.types(node, k) ta = () for tk, tt in ts: ta += (tk,) assert type(api.get(node, ta)) is tt @given(random_node) def test_leaves(self, node): ''' Given a node, leaves should yield only leaf key, value pairs. ''' for k, v in api.leaves(node): assert api.leafy(v) @given(random_segments_with_glob()) def test_match(self, pair): ''' Given segments and a known good glob, match should be True. ''' (segments, glob) = pair assert api.match(segments, glob) is True @given(random_segments_with_nonmatching_glob()) def test_match_nonmatching(self, pair): ''' Given segments and a known bad glob, match should be False. ''' (segments, glob) = pair assert api.match(segments, glob) is False @given(walkable=random_walk(), value=random_thing) def test_set_walkable(self, walkable, value): ''' Given a walkable location, set should be able to update any value. ''' (node, (segments, found)) = walkable api.set(node, segments, value) assert api.get(node, segments) is value @given(walkable=random_leaves(), kstr=random_key_str, kint=random_key_int, value=random_thing, extension=random_segments) def test_set_create_missing(self, walkable, kstr, kint, value, extension): ''' Given a walkable non-leaf, set should be able to create missing nodes and set a new value. ''' (node, (segments, found)) = walkable assume(api.leaf(found)) parent_segments = segments[:-1] parent = api.get(node, parent_segments) if isinstance(parent, list): assume(len(parent) < kint) destination = parent_segments + (kint,) + tuple(extension) elif isinstance(parent, dict): assume(kstr not in parent) destination = parent_segments + (kstr,) + tuple(extension) else: raise Exception('mad mad world') api.set(node, destination, value) assert api.get(node, destination) is value @given(thing=random_thing) def test_fold(self, thing): ''' Given a thing, count paths with fold. ''' def f(o, p, a): a[0] += 1 [count] = api.fold(thing, f, [0]) assert count == len(tuple(api.walk(thing))) @given(walkable=random_walk()) def test_view(self, walkable): ''' Given a walkable location, view that location. ''' (node, (segments, found)) = walkable assume(found == found) # Hello, nan! We don't want you here. view = api.view(node, segments) assert api.get(view, segments) == api.get(node, segments) dpath-python-2.2.0/tests/test_set.py000066400000000000000000000031501463241605500175050ustar00rootroot00000000000000import dpath def test_set_existing_separator(): dict = { "a": { "b": 0, }, } dpath.set(dict, ';a;b', 1, separator=";") assert dict['a']['b'] == 1 dict['a']['b'] = 0 dpath.set(dict, ['a', 'b'], 1, separator=";") assert dict['a']['b'] == 1 def test_set_existing_dict(): dict = { "a": { "b": 0, }, } dpath.set(dict, '/a/b', 1) assert dict['a']['b'] == 1 dict['a']['b'] = 0 dpath.set(dict, ['a', 'b'], 1) assert dict['a']['b'] == 1 def test_set_existing_list(): dict = { "a": [ 0, ], } dpath.set(dict, '/a/0', 1) assert dict['a'][0] == 1 dict['a'][0] = 0 dpath.set(dict, ['a', '0'], 1) assert dict['a'][0] == 1 def test_set_filter(): def afilter(x): if int(x) == 31: return True return False dict = { "a": { "b": 0, "c": 1, "d": 31, } } dpath.set(dict, '/a/*', 31337, afilter=afilter) assert dict['a']['b'] == 0 assert dict['a']['c'] == 1 assert dict['a']['d'] == 31337 dict = { "a": { "b": 0, "c": 1, "d": 31, } } dpath.set(dict, ['a', '*'], 31337, afilter=afilter) assert dict['a']['b'] == 0 assert dict['a']['c'] == 1 assert dict['a']['d'] == 31337 def test_set_existing_path_with_separator(): dict = { "a": { 'b/c/d': 0, }, } dpath.set(dict, ['a', 'b/c/d'], 1) assert len(dict['a']) == 1 assert dict['a']['b/c/d'] == 1 dpath-python-2.2.0/tests/test_types.py000066400000000000000000000066731463241605500200730ustar00rootroot00000000000000from collections.abc import MutableSequence, MutableMapping from nose2.tools.such import helper import dpath from dpath import MergeType class TestMapping(MutableMapping): def __init__(self, data=None): if data is None: data = {} self._mapping = {} self._mapping.update(data) def __len__(self): return len(self._mapping) def __iter__(self): return iter(self._mapping) def __contains__(self, key): return key in self._mapping def __getitem__(self, key): return self._mapping[key] def __setitem__(self, key, value): self._mapping[key] = value def __delitem__(self, key): del self._mapping[key] class TestSequence(MutableSequence): def __init__(self, data=None): if data is None: data = list() self._list = [] + data def __len__(self): return len(self._list) def __getitem__(self, idx): return self._list[idx] def __delitem__(self, idx): del self._list[idx] def __setitem__(self, idx, value): self._list[idx] = value def __str__(self): return str(self._list) def __eq__(self, other): return self._list == other._list def __ne__(self, other): return not self.__eq__(other) def insert(self, idx, value): self._list.insert(idx, value) def append(self, value): self.insert(len(self._list), value) def test_types_set(): data = TestMapping({"a": TestSequence([0])}) dpath.set(data, '/a/0', 1) assert data['a'][0] == 1 data['a'][0] = 0 dpath.set(data, ['a', '0'], 1) assert data['a'][0] == 1 def test_types_get_list_of_dicts(): tdict = TestMapping({ "a": TestMapping({ "b": TestSequence([ {0: 0}, {0: 1}, {0: 2}, ]), }), }) res = dpath.segments.view(tdict, ['a', 'b', 0, 0]) assert isinstance(res['a']['b'], TestSequence) assert len(res['a']['b']) == 1 assert res['a']['b'][0][0] == 0 def test_types_merge_simple_list_replace(): src = TestMapping({ "list": TestSequence([7, 8, 9, 10]) }) dst = TestMapping({ "list": TestSequence([0, 1, 2, 3]) }) dpath.merge(dst, src, flags=MergeType.REPLACE) assert dst["list"] == TestSequence([7, 8, 9, 10]), "%r != %r" % (dst["list"], TestSequence([7, 8, 9, 10])) def test_types_get_absent(): ehash = TestMapping() helper.assertRaises(KeyError, dpath.get, ehash, '/a/b/c/d/f') helper.assertRaises(KeyError, dpath.get, ehash, ['a', 'b', 'c', 'd', 'f']) def test_types_get_glob_multiple(): ehash = TestMapping({ "a": TestMapping({ "b": TestMapping({ "c": TestMapping({ "d": 0, }), "e": TestMapping({ "d": 0, }), }), }), }) helper.assertRaises(ValueError, dpath.get, ehash, '/a/b/*/d') helper.assertRaises(ValueError, dpath.get, ehash, ['a', 'b', '*', 'd']) def test_delete_filter(): def afilter(x): if int(x) == 31: return True return False data = TestMapping({ "a": TestMapping({ "b": 0, "c": 1, "d": 31, }), }) dpath.delete(data, '/a/*', afilter=afilter) assert data['a']['b'] == 0 assert data['a']['c'] == 1 assert 'd' not in data['a'] dpath-python-2.2.0/tests/test_unicode.py000066400000000000000000000012561463241605500203450ustar00rootroot00000000000000import dpath def test_unicode_merge(): a = {'中': 'zhong'} b = {'文': 'wen'} dpath.merge(a, b) assert len(a.keys()) == 2 assert a['中'] == 'zhong' assert a['文'] == 'wen' def test_unicode_search(): a = {'中': 'zhong'} results = [[x[0], x[1]] for x in dpath.search(a, '*', yielded=True)] assert len(results) == 1 assert results[0][0] == '中' assert results[0][1] == 'zhong' def test_unicode_str_hybrid(): a = {'first': u'1'} b = {u'second': '2'} dpath.merge(a, b) assert len(a.keys()) == 2 assert a[u'second'] == '2' assert a['second'] == u'2' assert a[u'first'] == '1' assert a['first'] == u'1' dpath-python-2.2.0/tox.ini000066400000000000000000000010061463241605500154500ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [flake8] ignore = E501,E722 [tox] envlist = pypy37, py38, py39, py310, py311, py312 [gh-actions] python = pypy-3.7: pypy37 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 [testenv] deps = hypothesis nose2 commands = nose2 {posargs}