././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4972184 BTrees-6.0/0000755000076500000240000000000014626041203011423 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/.coveragerc0000644000076500000240000000113114626022106013541 0ustar00jensstaff# Generated from: # https://github.com/zopefoundation/meta/tree/master/config/c-code [run] source = BTrees # New in 5.0; required for the GHA coveralls submission. relative_files = True branch = true [paths] source = src/ .tox/*/lib/python*/site-packages/ .tox/pypy*/site-packages/ [report] show_missing = true precision = 2 ignore_errors = True exclude_lines = except ImportError: if __name__ == '__main__': pragma: no cover pragma: nocover raise AssertionError raise NotImplementedError raise unittest.Skip self.fail\( [html] directory = parts/htmlcov ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/.manylinux-install.sh0000755000076500000240000000632214626022106015534 0ustar00jensstaff#!/usr/bin/env bash # Generated from: # https://github.com/zopefoundation/meta/tree/master/config/c-code set -e -x # Running inside docker # Set a cache directory for pip. This was # mounted to be the same as it is outside docker so it # can be persisted. export XDG_CACHE_HOME="/cache" # XXX: This works for macOS, where everything bind-mounted # is seen as owned by root in the container. But when the host is Linux # the actual UIDs come through to the container, triggering # pip to disable the cache when it detects that the owner doesn't match. # The below is an attempt to fix that, taken from bcrypt. It seems to work on # Github Actions. if [ -n "$GITHUB_ACTIONS" ]; then echo Adjusting pip cache permissions mkdir -p $XDG_CACHE_HOME/pip chown -R $(whoami) $XDG_CACHE_HOME fi ls -ld /cache ls -ld /cache/pip export CFLAGS="-pipe" if [ `uname -m` == 'aarch64' ]; then # Compiling with -O3 on the arm emulator takes hours. The default settings have -O3, # and adding -Os doesn't help much; -O1 seems too. echo "Compiling with -O1" export CFLAGS="$CFLAGS -O1" else echo "Compiling with -O3" export CFLAGS="-O3 $CFLAGS" fi export PURE_PYTHON=0 # We need some libraries because we build wheels from scratch: yum -y install libffi-devel tox_env_map() { case $1 in *"cp313"*) echo 'py313';; *"cp38"*) echo 'py38';; *"cp39"*) echo 'py39';; *"cp310"*) echo 'py310';; *"cp311"*) echo 'py311';; *"cp312"*) echo 'py312';; *) echo 'py';; esac } # Compile wheels for PYBIN in /opt/python/*/bin; do if \ [[ "${PYBIN}" == *"cp313/"* ]] || \ [[ "${PYBIN}" == *"cp311/"* ]] || \ [[ "${PYBIN}" == *"cp312/"* ]] || \ [[ "${PYBIN}" == *"cp38/"* ]] || \ [[ "${PYBIN}" == *"cp39/"* ]] || \ [[ "${PYBIN}" == *"cp310/"* ]] ; then if [[ "${PYBIN}" == *"cp313/"* ]] ; then "${PYBIN}/pip" install --pre -e /io/ "${PYBIN}/pip" wheel /io/ --pre -w wheelhouse/ else "${PYBIN}/pip" install -e /io/ "${PYBIN}/pip" wheel /io/ -w wheelhouse/ fi if [ `uname -m` == 'aarch64' ]; then # Running the test suite takes forever in # emulation; an early run (using tox, which is also slow) # took over an hour to build and then run the tests sequentially # for the Python versions. We still want to run tests, though! # We don't want to distribute wheels for a platform that's # completely untested. Consequently, we limit it to running # in just one interpreter, the newest one on the list (which in principle # should be the fastest), and we don't install the ZODB extra. if [[ "${PYBIN}" == *"cp311"* ]]; then cd /io/ "${PYBIN}/pip" install -e .[test] "${PYBIN}/python" -c 'import BTrees.OOBTree; print(BTrees.OOBTree.BTree, BTrees.OOBTree.BTreePy)' "${PYBIN}/python" -m unittest discover -s src cd .. fi fi rm -rf /io/build /io/*.egg-info fi done # Bundle external shared libraries into the wheels for whl in wheelhouse/BTrees*.whl; do auditwheel repair "$whl" -w /io/wheelhouse/ done ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1716554220.0 BTrees-6.0/.manylinux.sh0000755000076500000240000000077514624104754014106 0ustar00jensstaff#!/usr/bin/env bash # Generated from: # https://github.com/zopefoundation/meta/tree/master/config/c-code set -e -x # Mount the current directory as /io # Mount the pip cache directory as /cache # `pip cache` requires pip 20.1 echo Setting up caching python --version python -mpip --version LCACHE="$(dirname `python -mpip cache dir`)" echo Sharing pip cache at $LCACHE $(ls -ld $LCACHE) docker run --rm -e GITHUB_ACTIONS -v "$(pwd)":/io -v "$LCACHE:/cache" $DOCKER_IMAGE $PRE_CMD /io/.manylinux-install.sh ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1716554220.0 BTrees-6.0/.readthedocs.yaml0000644000076500000240000000123014624104754014657 0ustar00jensstaff# Generated from: # https://github.com/zopefoundation/meta/tree/master/config/c-code # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the version of Python and other tools you might need build: os: ubuntu-22.04 tools: python: "3.11" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py # We recommend specifying your dependencies to enable reproducible builds: # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: docs/requirements.txt - method: pip path: . ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052502.0 BTrees-6.0/CHANGES.rst0000644000076500000240000004254714626022126013244 0ustar00jensstaff================== BTrees Changelog ================== 6.0 (2024-05-30) ================ - Drop support for Python 3.7. - Build Windows wheels on GHA. 5.2 (2024-02-07) ================ - Add preliminary support for Python 3.13 as of 3.13a3. 5.1 (2023-10-05) ================ - Drop using ``setup_requires`` due to constant problems on GHA. - Add support for Python 3.12. 5.0 (2023-02-10) ================ - Build Linux binary wheels for Python 3.11. - Drop support for Python 2.7, 3.5, 3.6. 4.11.3 (2022-11-17) =================== - point release to rebuild full set of wheels 4.11.2 (2022-11-16) =================== - Add support for building arm64 wheels on macOS. 4.11.1 (2022-11-09) =================== - Fix macOS wheel build issues on GitHub Actions - We no longer provide 32bit wheels for the Windows platform, only x86_64. 4.11.0 (2022-11-03) =================== - Add support for Python 3.11. 4.10.1 (2022-09-12) =================== - Disable unsafe math optimizations in C code. (`#184 `_) 4.10.0 (2022-03-09) =================== - Add support for Python 3.10. 4.9.2 (2021-06-09) ================== - Fix ``fsBTree.TreeSet`` and ``fsBTree.BTree`` raising ``SystemError``. See `issue 170 `_. - Fix all the ``fsBTree`` objects to provide the correct interfaces and be instances of the appropriate collection ABCs. This was done for the other modules in release 4.8.0. - Fix the ``multiunion``, ``union``, ``intersection``, and ``difference`` functions when used with arbitrary iterables. Previously, the iterable had to be pre-sorted, meaning only sequences like ``list`` and ``tuple`` could reliably be used; this was not documented though. If the iterable wasn't sorted, the function would produce garbage output. Now, if the function detects an arbitrary iterable, it automatically sorts a copy. 4.9.1 (2021-05-27) ================== - Fix setting unknown class attributes on subclasses of BTrees when using the C extension. This prevented subclasses from being decorated with ``@component.adapter()``. See `issue 168 `_. 4.9.0 (2021-05-26) ================== - Fix the C implementation to match the Python implementation and allow setting custom node sizes for an entire application directly by changing ``BTree.max_leaf_size`` and ``BTree.max_internal_size`` attributes, without having to create a new subclass. These attributes can now also be read from the classes in the C implementation. See `issue 166 `_. - Add various small performance improvements for storing zope.interface attributes on ``BTree`` and ``TreeSet`` as well as deactivating persistent objects from this package. 4.8.0 (2021-04-14) ================== - Make Python 2 forbid the use of type objects as keys (unless a custom metaclass is used that implements comparison as required by BTrees.) On Python 3, types are not orderable so they were already forbidden, but on Python 2 types can be ordered by memory address, which makes them unsuitable for use as keys. See `issue `_. - Make the ``multiunion``, ``union``, ``intersection``, and ``difference`` functions accept arbitrary Python iterables (that iterate across the correct types). Previously, the Python implementation allowed this, but the C implementation only allowed objects (like ``TreeSet`` or ``Bucket``) defined in the same module providing the function. See `issue 24 `_. - Fix persistency bug in the Python version (`#118 `_). - Fix ``Tree.__setstate__`` to no longer accept children besides tree or bucket types to prevent crashes. See `PR 143 `_ for details. - Make BTrees, TreeSet, Set and Buckets implements the ``__and__``, ``__or__`` and ``__sub__`` special methods as shortcuts for ``BTrees.Interfaces.IMerge.intersection``, ``BTrees.Interfaces.IMerge.union`` and ``BTrees.Interfaces.IMerge.difference``. - Add support for Python 3.9. - Build and upload aarch64 wheels. - Make a value of ``0`` in the ``PURE_PYTHON`` environment variable require the C extensions (except on PyPy). Previously, and if this variable is unset, missing or unusable C extensions would be silently ignored. With this variable set to ``0``, an ``ImportError`` will be raised if the C extensions are unavailable. See `issue 156 `_. - Make the BTree objects (``BTree``, ``TreeSet``, ``Set``, ``Bucket``) of each module actually provide the interfaces defined in ``BTrees.Interfaces``. Previously, they provided no interfaces. - Make all the BTree and Bucket objects instances of ``collections.abc.MutableMapping`` (that is, ``isinstance(btree, MutableMapping)`` is now true; no actual inheritance has changed). As part of this, they now provide the ``popitem()`` method. - Make all the TreeSet and Set objects instances of ``collections.abc.MutableSet`` (that is, ``isinstance(tree_set, MutableSet)`` is now true; no actual inheritance has changed). As part of this, they now provide several more methods, including ``isdisjoint``, ``discard``, and ``pop``, and support in-place mutation operators such as ``tree_set |= other``, ``tree_set += other``, ``tree_set -= other`` and ``tree_set ^= other``. See `issue 121 `_. - Update the definitions of ``ISized`` and ``IReadSequence`` to simply be ``zope.interface.common.collections.ISized`` and ``zope.interface.common.sequence.IMinimalSequence`` respectively. - Remove the ``__nonzero__`` interface method from ``ICollection``. No objects actually implemented such a method; instead, the boolean value is typically taken from ``__len__``. - Adjust the definition of ``ISet`` to produce the same resolution order under the C3 and legacy orderings. This means that the legacy order has changed slightly, but that this package emits no warnings when ``ZOPE_INTERFACE_LOG_CHANGED_IRO=1``. Note that the legacy order was not being used for these objects because the C3 ordering was still consistent; it could only be obtained using ``ZOPE_INTERFACE_USE_LEGACY_IRO=1``. See `PR 159 `_ for all the interface updates. - Fix the ``get``, ``setdefault`` and ``pop`` methods, as well as the ``in`` operator, to not suppress ``POSKeyError`` if the object or subobjects are corrupted. Previously, such errors were logged by ZODB, but not propagated. See `issue 161 `_. 4.7.2 (2020-04-07) ================== - Fix more cases of C and Python inconsistency. The C implementation now behaves like the Python implementation when it comes to integer overflow for the integer keys for ``in``, ``get`` and ``has_key``. Now they return False, the default value, and False, respectively in both versions if the tested value would overflow or underflow. Previously, the C implementation would raise ``OverflowError`` or ``KeyError``, while the Python implementation functioned as expected. See `issue 140 `_. .. note:: The unspecified true return values of ``has_key`` have changed. 4.7.1 (2020-03-22) ================== - Fix the definitions of ``__all__`` in modules. In 4.7.0, they incorrectly left out names. See `PR 132 `_. - Ensure the interface resolution order of all objects is consistent. See `issue 137 `_. 4.7.0 (2020-03-17) ================== - Add unsigned variants of the trees. These use the initial "U" for 32-bit data and "Q" for 64-bit data (for "quad", which is similar to what the C ``printf`` function uses and the Python struct module uses). - Fix the value for ``BTrees.OIBTree.using64bits`` when using the pure Python implementation (PyPy and when ``PURE_PYTHON`` is in the environment). - Make the errors that are raised when values are out of range more consistent between Python 2 and Python 3 and between 32-bit and 64-bit variants. - Make the Bucket types consistent with the BTree types as updated in versions 4.3.2: Querying for keys with default comparisons or that are not integers no longer raises ``TypeError``. 4.6.1 (2019-11-07) ================== - Add support for Python 3.8. 4.6.0 (2019-07-30) ================== - Drop support for Python 3.4. - Fix tests against persistent 4.4. - Stop accidentally installing the 'terryfy' package in macOS wheels. See `issue 98 `_. - Fix segmentation fault in ``bucket_repr()``. See `issue 106 `_. 4.5.1 (2018-08-09) ================== - Produce binary wheels for Python 3.7. - Use pyproject.toml to specify build dependencies. This requires pip 18 or later to build from source. 4.5.0 (2018-04-23) ================== - Add support for Python 3.6 and 3.7. - Drop support for Python 3.3. - Raise an ``ImportError`` consistently on Python 3 if the C extension for BTrees is used but the ``persistent`` C extension is not available. Previously this could result in an odd ``AttributeError``. See https://github.com/zopefoundation/BTrees/pull/55 - Fix the possibility of a rare crash in the C extension when deallocating items. See https://github.com/zopefoundation/BTrees/issues/75 - Respect the ``PURE_PYTHON`` environment variable at runtime even if the C extensions are available. See https://github.com/zopefoundation/BTrees/issues/78 - Always attempt to build the C extensions, but make their success optional. - Fix a ``DeprecationWarning`` that could come from I and L objects in Python 2 in pure-Python mode. See https://github.com/zopefoundation/BTrees/issues/79 4.4.1 (2017-01-24) ================== Fixed a packaging bug that caused extra files to be included (some of which caused problems in some platforms). 4.4.0 (2017-01-11) ================== - Allow None as a special key (sorted smaller than all others). This is a bit of a return to BTrees 3 behavior in that Nones are allowed as keys again. Other objects with default ordering are still not allowed as keys. 4.3.2 (2017-01-05) ================== - Make the CPython implementation consistent with the pure-Python implementation and only check object keys for default comparison when setting keys. In Python 2 this makes it possible to remove keys that were added using a less restrictive version of BTrees. (In Python 3 keys that are unorderable still cannot be removed.) Likewise, all versions can unpickle trees that already had such keys. See: https://github.com/zopefoundation/BTrees/issues/53 and https://github.com/zopefoundation/BTrees/issues/51 - Make the Python implementation consistent with the CPython implementation and check object key identity before checking equality and performing comparisons. This can allow fixing trees that have keys that now have broken comparison functions. See https://github.com/zopefoundation/BTrees/issues/50 - Make the CPython implementation consistent with the pure-Python implementation and no longer raise ``TypeError`` for an object key (in object-keyed trees) with default comparison on ``__getitem__``, ``get`` or ``in`` operations. Instead, the results will be a ``KeyError``, the default value, and ``False``, respectively. Previously, CPython raised a ``TypeError`` in those cases, while the Python implementation behaved as specified. Likewise, non-integer keys in integer-keyed trees will raise ``KeyError``, return the default and return ``False``, respectively, in both implementations. Previously, pure-Python raised a ``KeyError``, returned the default, and raised a ``TypeError``, while CPython raised ``TypeError`` in all three cases. 4.3.1 (2016-05-16) ================== - Packaging: fix password used to automate wheel creation on Travis. 4.3.0 (2016-05-10) ================== - Fix unexpected ``OverflowError`` when passing 64bit values to long keys / values on Win64. See: https://github.com/zopefoundation/BTrees/issues/32 - When testing ``PURE_PYTHON`` environments under ``tox``, avoid poisoning the user's global wheel cache. - Ensure that the pure-Python implementation, used on PyPy and when a C compiler isn't available for CPython, pickles identically to the C version. Unpickling will choose the best available implementation. This change prevents interoperability problems and database corruption if both implementations are in use. While it is no longer possible to pickle a Python implementation and have it unpickle to the Python implementation if the C implementation is available, existing Python pickles will still unpickle to the Python implementation (until pickled again). See: https://github.com/zopefoundation/BTrees/issues/19 - Avoid creating invalid objects when unpickling empty BTrees in a pure-Python environment. - Drop support for Python 2.6 and 3.2. 4.2.0 (2015-11-13) ================== - Add support for Python 3.5. 4.1.4 (2015-06-02) ================== - Ensure that pure-Python Bucket and Set objects have a human readable ``__repr__`` like the C versions. 4.1.3 (2015-05-19) ================== - Fix ``_p_changed`` when removing items from small pure-Python BTrees/TreeSets and when adding items to small pure-Python Sets. See: https://github.com/zopefoundation/BTrees/issues/13 4.1.2 (2015-04-07) ================== - Suppress testing 64-bit values in OLBTrees on 32 bit machines. See: https://github.com/zopefoundation/BTrees/issues/9 - Fix ``_p_changed`` when adding items to small pure-Python BTrees/TreeSets. See: https://github.com/zopefoundation/BTrees/issues/11 4.1.1 (2014-12-27) ================== - Accomodate long values in pure-Python OLBTrees. 4.1.0 (2014-12-26) ================== - Add support for PyPy and PyPy3. - Add support for Python 3.4. - BTree subclasses can define ``max_leaf_size`` or ``max_internal_size`` to control maximum sizes for Bucket/Set and BTree/TreeSet nodes. - Detect integer overflow on 32-bit machines correctly under Python 3. - Update pure-Python and C trees / sets to accept explicit None to indicate max / min value for ``minKey``, ``maxKey``. (PR #3) - Update pure-Python trees / sets to accept explicit None to indicate open ranges for ``keys``, ``values``, ``items``. (PR #3) 4.0.8 (2013-05-25) ================== - Fix value-based comparison for objects under Py3k: addresses invalid merges of ``[OLI]OBTrees/OBuckets``. - Ensure that pure-Python implementation of ``OOBTree.byValue`` matches semantics (reversed-sort) of C implementation. 4.0.7 (2013-05-22) ================== - Issue #2: compilation error on 32-bit mode of OS/X. - Test ``PURE_PYTHON`` environment variable support: if set, the C extensions will not be built, imported, or tested. 4.0.6 (2013-05-14) ================== - Changed the ``ZODB`` extra to require only the real ``ZODB`` package, rather than the ``ZODB3`` metapackage: depending on the version used, the metapackage could pull in stale versions of **this** package and ``persistent``. - Fixed Python version check in ``setup.py``. 4.0.5 (2013-01-15) ================== - Fit the ``repr`` of bucket objects, which could contain garbage characters. 4.0.4 (2013-01-12) ================== - Emulate the (private) iterators used by the C extension modules from pure Python. This change is "cosmetic" only: it prevents the ZCML ``zope.app.security:permission.zcml`` from failing. The emulated classes are **not** functional, and should be considered implementation details. - Accomodate buildout to the fact that we no longer bundle a copy of 'persistent.h'. - Fix test failures on Windows: no longer rely on overflows from ``sys.maxint``. 4.0.3 (2013-01-04) ================== - Added ``setup_requires==['persistent']``. 4.0.2 (2013-01-03) ================== - Updated Trove classifiers. - Added explicit support for Python 3.2, Python 3.3, and PyPy. Note that the C extensions are not (yet) available on PyPy. - Python reference implementations now tested separately from the C verions on all platforms. - 100% unit test coverage. 4.0.1 (2012-10-21) ================== - Provide local fallback for persistent C header inclusion if the persistent distribution isn't installed. This makes the winbot happy. 4.0.0 (2012-10-20) ================== Platform Changes ---------------- - Dropped support for Python < 2.6. - Factored ``BTrees`` as a separate distribution. Testing Changes --------------- - All covered platforms tested under ``tox``. - Added support for continuous integration using ``tox`` and ``jenkins``. - Added ``setup.py dev`` alias (installs ``nose`` and ``coverage``). - Dropped dependency on ``zope.testing`` / ``zope.testrunner``: tests now run with ``setup.py test``. Documentation Changes --------------------- - Added API reference, generated via Spinx' autodoc. - Added Sphinx documentation based on ZODB Guide (snippets are exercised via 'tox'). - Added ``setup.py docs`` alias (installs ``Sphinx`` and ``repoze.sphinx.autointerface``). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1716554220.0 BTrees-6.0/CONTRIBUTING.md0000644000076500000240000000143714624104754013672 0ustar00jensstaff # Contributing to zopefoundation projects The projects under the zopefoundation GitHub organization are open source and welcome contributions in different forms: * bug reports * code improvements and bug fixes * documentation improvements * pull request reviews For any changes in the repository besides trivial typo fixes you are required to sign the contributor agreement. See https://www.zope.dev/developer/becoming-a-committer.html for details. Please visit our [Developer Guidelines](https://www.zope.dev/developer/guidelines.html) if you'd like to contribute code changes and our [guidelines for reporting bugs](https://www.zope.dev/developer/reporting-bugs.html) if you want to file a bug report. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/COPYRIGHT.txt0000644000076500000240000000004014330745562013541 0ustar00jensstaffZope Foundation and Contributors././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/LICENSE.txt0000644000076500000240000000402614330745562013263 0ustar00jensstaffZope Public License (ZPL) Version 2.1 A copyright notice accompanies this license document that identifies the copyright holders. This license has been certified as open source. It has also been designated as GPL compatible by the Free Software Foundation (FSF). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions in source code must retain the accompanying copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the accompanying copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Names of the copyright holders must not be used to endorse or promote products derived from this software without prior written permission from the copyright holders. 4. The right to distribute this software or to use it for any purpose does not give you the right to use Servicemarks (sm) or Trademarks (tm) of the copyright holders. Use of them is covered by separate agreement with the copyright holders. 5. If any files are modified, you must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/MANIFEST.in0000644000076500000240000000077514626022106013173 0ustar00jensstaff# Generated from: # https://github.com/zopefoundation/meta/tree/master/config/c-code include *.md include *.rst include *.txt include buildout.cfg include tox.ini include .coveragerc recursive-include docs *.py recursive-include docs *.rst recursive-include docs *.txt recursive-include docs Makefile recursive-include src *.py include *.yaml include *.sh recursive-include docs *.bat recursive-include docs *.css recursive-include include/persistent *.h recursive-include src *.c recursive-include src *.h ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4971464 BTrees-6.0/PKG-INFO0000644000076500000240000005106114626041203012523 0ustar00jensstaffMetadata-Version: 2.1 Name: BTrees Version: 6.0 Summary: Scalable persistent object containers Home-page: https://github.com/zopefoundation/BTrees Author: Zope Foundation Author-email: zodb-dev@zope.org License: ZPL 2.1 Project-URL: Documentation, https://btrees.readthedocs.io Project-URL: Issue Tracker, https://github.com/zopefoundation/BTrees/issues Project-URL: Sources, https://github.com/zopefoundation/BTrees Platform: any Classifier: Development Status :: 6 - Mature Classifier: License :: OSI Approved :: Zope Public License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Framework :: ZODB Classifier: Topic :: Database Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: Unix Requires-Python: >=3.8 License-File: LICENSE.txt Requires-Dist: persistent>=4.1.0 Requires-Dist: zope.interface>=5.0.0 Provides-Extra: test Requires-Dist: persistent>=4.4.3; extra == "test" Requires-Dist: transaction; extra == "test" Requires-Dist: zope.testrunner; extra == "test" Provides-Extra: zodb Requires-Dist: ZODB; extra == "zodb" Provides-Extra: docs Requires-Dist: Sphinx; extra == "docs" Requires-Dist: repoze.sphinx.autointerface; extra == "docs" Requires-Dist: sphinx_rtd_theme; extra == "docs" ============================================= ``BTrees``: scalable persistent components ============================================= .. image:: https://github.com/zopefoundation/BTrees/actions/workflows/tests.yml/badge.svg :target: https://github.com/zopefoundation/BTrees/actions/workflows/tests.yml .. image:: https://ci.appveyor.com/api/projects/status/github/zopefoundation/BTrees?branch=master&svg=true :target: https://ci.appveyor.com/project/mgedmin/BTrees .. image:: https://coveralls.io/repos/github/zopefoundation/BTrees/badge.svg?branch=master :target: https://coveralls.io/github/zopefoundation/BTrees?branch=master .. image:: https://readthedocs.org/projects/btrees/badge/?version=latest :target: https://btrees.readthedocs.io/en/latest/ :alt: Documentation Status .. image:: https://img.shields.io/pypi/v/BTrees.svg :target: https://pypi.org/project/BTrees/ :alt: Current version on PyPI .. image:: https://img.shields.io/pypi/pyversions/BTrees.svg :target: https://pypi.org/project/BTrees/ :alt: Supported Python versions This package contains a set of persistent object containers built around a modified BTree data structure. The trees are optimized for use inside ZODB's "optimistic concurrency" paradigm, and include explicit resolution of conflicts detected by that mechanism. Please see `the Sphinx documentation `_ for further information. ================== BTrees Changelog ================== 6.0 (2024-05-30) ================ - Drop support for Python 3.7. - Build Windows wheels on GHA. 5.2 (2024-02-07) ================ - Add preliminary support for Python 3.13 as of 3.13a3. 5.1 (2023-10-05) ================ - Drop using ``setup_requires`` due to constant problems on GHA. - Add support for Python 3.12. 5.0 (2023-02-10) ================ - Build Linux binary wheels for Python 3.11. - Drop support for Python 2.7, 3.5, 3.6. 4.11.3 (2022-11-17) =================== - point release to rebuild full set of wheels 4.11.2 (2022-11-16) =================== - Add support for building arm64 wheels on macOS. 4.11.1 (2022-11-09) =================== - Fix macOS wheel build issues on GitHub Actions - We no longer provide 32bit wheels for the Windows platform, only x86_64. 4.11.0 (2022-11-03) =================== - Add support for Python 3.11. 4.10.1 (2022-09-12) =================== - Disable unsafe math optimizations in C code. (`#184 `_) 4.10.0 (2022-03-09) =================== - Add support for Python 3.10. 4.9.2 (2021-06-09) ================== - Fix ``fsBTree.TreeSet`` and ``fsBTree.BTree`` raising ``SystemError``. See `issue 170 `_. - Fix all the ``fsBTree`` objects to provide the correct interfaces and be instances of the appropriate collection ABCs. This was done for the other modules in release 4.8.0. - Fix the ``multiunion``, ``union``, ``intersection``, and ``difference`` functions when used with arbitrary iterables. Previously, the iterable had to be pre-sorted, meaning only sequences like ``list`` and ``tuple`` could reliably be used; this was not documented though. If the iterable wasn't sorted, the function would produce garbage output. Now, if the function detects an arbitrary iterable, it automatically sorts a copy. 4.9.1 (2021-05-27) ================== - Fix setting unknown class attributes on subclasses of BTrees when using the C extension. This prevented subclasses from being decorated with ``@component.adapter()``. See `issue 168 `_. 4.9.0 (2021-05-26) ================== - Fix the C implementation to match the Python implementation and allow setting custom node sizes for an entire application directly by changing ``BTree.max_leaf_size`` and ``BTree.max_internal_size`` attributes, without having to create a new subclass. These attributes can now also be read from the classes in the C implementation. See `issue 166 `_. - Add various small performance improvements for storing zope.interface attributes on ``BTree`` and ``TreeSet`` as well as deactivating persistent objects from this package. 4.8.0 (2021-04-14) ================== - Make Python 2 forbid the use of type objects as keys (unless a custom metaclass is used that implements comparison as required by BTrees.) On Python 3, types are not orderable so they were already forbidden, but on Python 2 types can be ordered by memory address, which makes them unsuitable for use as keys. See `issue `_. - Make the ``multiunion``, ``union``, ``intersection``, and ``difference`` functions accept arbitrary Python iterables (that iterate across the correct types). Previously, the Python implementation allowed this, but the C implementation only allowed objects (like ``TreeSet`` or ``Bucket``) defined in the same module providing the function. See `issue 24 `_. - Fix persistency bug in the Python version (`#118 `_). - Fix ``Tree.__setstate__`` to no longer accept children besides tree or bucket types to prevent crashes. See `PR 143 `_ for details. - Make BTrees, TreeSet, Set and Buckets implements the ``__and__``, ``__or__`` and ``__sub__`` special methods as shortcuts for ``BTrees.Interfaces.IMerge.intersection``, ``BTrees.Interfaces.IMerge.union`` and ``BTrees.Interfaces.IMerge.difference``. - Add support for Python 3.9. - Build and upload aarch64 wheels. - Make a value of ``0`` in the ``PURE_PYTHON`` environment variable require the C extensions (except on PyPy). Previously, and if this variable is unset, missing or unusable C extensions would be silently ignored. With this variable set to ``0``, an ``ImportError`` will be raised if the C extensions are unavailable. See `issue 156 `_. - Make the BTree objects (``BTree``, ``TreeSet``, ``Set``, ``Bucket``) of each module actually provide the interfaces defined in ``BTrees.Interfaces``. Previously, they provided no interfaces. - Make all the BTree and Bucket objects instances of ``collections.abc.MutableMapping`` (that is, ``isinstance(btree, MutableMapping)`` is now true; no actual inheritance has changed). As part of this, they now provide the ``popitem()`` method. - Make all the TreeSet and Set objects instances of ``collections.abc.MutableSet`` (that is, ``isinstance(tree_set, MutableSet)`` is now true; no actual inheritance has changed). As part of this, they now provide several more methods, including ``isdisjoint``, ``discard``, and ``pop``, and support in-place mutation operators such as ``tree_set |= other``, ``tree_set += other``, ``tree_set -= other`` and ``tree_set ^= other``. See `issue 121 `_. - Update the definitions of ``ISized`` and ``IReadSequence`` to simply be ``zope.interface.common.collections.ISized`` and ``zope.interface.common.sequence.IMinimalSequence`` respectively. - Remove the ``__nonzero__`` interface method from ``ICollection``. No objects actually implemented such a method; instead, the boolean value is typically taken from ``__len__``. - Adjust the definition of ``ISet`` to produce the same resolution order under the C3 and legacy orderings. This means that the legacy order has changed slightly, but that this package emits no warnings when ``ZOPE_INTERFACE_LOG_CHANGED_IRO=1``. Note that the legacy order was not being used for these objects because the C3 ordering was still consistent; it could only be obtained using ``ZOPE_INTERFACE_USE_LEGACY_IRO=1``. See `PR 159 `_ for all the interface updates. - Fix the ``get``, ``setdefault`` and ``pop`` methods, as well as the ``in`` operator, to not suppress ``POSKeyError`` if the object or subobjects are corrupted. Previously, such errors were logged by ZODB, but not propagated. See `issue 161 `_. 4.7.2 (2020-04-07) ================== - Fix more cases of C and Python inconsistency. The C implementation now behaves like the Python implementation when it comes to integer overflow for the integer keys for ``in``, ``get`` and ``has_key``. Now they return False, the default value, and False, respectively in both versions if the tested value would overflow or underflow. Previously, the C implementation would raise ``OverflowError`` or ``KeyError``, while the Python implementation functioned as expected. See `issue 140 `_. .. note:: The unspecified true return values of ``has_key`` have changed. 4.7.1 (2020-03-22) ================== - Fix the definitions of ``__all__`` in modules. In 4.7.0, they incorrectly left out names. See `PR 132 `_. - Ensure the interface resolution order of all objects is consistent. See `issue 137 `_. 4.7.0 (2020-03-17) ================== - Add unsigned variants of the trees. These use the initial "U" for 32-bit data and "Q" for 64-bit data (for "quad", which is similar to what the C ``printf`` function uses and the Python struct module uses). - Fix the value for ``BTrees.OIBTree.using64bits`` when using the pure Python implementation (PyPy and when ``PURE_PYTHON`` is in the environment). - Make the errors that are raised when values are out of range more consistent between Python 2 and Python 3 and between 32-bit and 64-bit variants. - Make the Bucket types consistent with the BTree types as updated in versions 4.3.2: Querying for keys with default comparisons or that are not integers no longer raises ``TypeError``. 4.6.1 (2019-11-07) ================== - Add support for Python 3.8. 4.6.0 (2019-07-30) ================== - Drop support for Python 3.4. - Fix tests against persistent 4.4. - Stop accidentally installing the 'terryfy' package in macOS wheels. See `issue 98 `_. - Fix segmentation fault in ``bucket_repr()``. See `issue 106 `_. 4.5.1 (2018-08-09) ================== - Produce binary wheels for Python 3.7. - Use pyproject.toml to specify build dependencies. This requires pip 18 or later to build from source. 4.5.0 (2018-04-23) ================== - Add support for Python 3.6 and 3.7. - Drop support for Python 3.3. - Raise an ``ImportError`` consistently on Python 3 if the C extension for BTrees is used but the ``persistent`` C extension is not available. Previously this could result in an odd ``AttributeError``. See https://github.com/zopefoundation/BTrees/pull/55 - Fix the possibility of a rare crash in the C extension when deallocating items. See https://github.com/zopefoundation/BTrees/issues/75 - Respect the ``PURE_PYTHON`` environment variable at runtime even if the C extensions are available. See https://github.com/zopefoundation/BTrees/issues/78 - Always attempt to build the C extensions, but make their success optional. - Fix a ``DeprecationWarning`` that could come from I and L objects in Python 2 in pure-Python mode. See https://github.com/zopefoundation/BTrees/issues/79 4.4.1 (2017-01-24) ================== Fixed a packaging bug that caused extra files to be included (some of which caused problems in some platforms). 4.4.0 (2017-01-11) ================== - Allow None as a special key (sorted smaller than all others). This is a bit of a return to BTrees 3 behavior in that Nones are allowed as keys again. Other objects with default ordering are still not allowed as keys. 4.3.2 (2017-01-05) ================== - Make the CPython implementation consistent with the pure-Python implementation and only check object keys for default comparison when setting keys. In Python 2 this makes it possible to remove keys that were added using a less restrictive version of BTrees. (In Python 3 keys that are unorderable still cannot be removed.) Likewise, all versions can unpickle trees that already had such keys. See: https://github.com/zopefoundation/BTrees/issues/53 and https://github.com/zopefoundation/BTrees/issues/51 - Make the Python implementation consistent with the CPython implementation and check object key identity before checking equality and performing comparisons. This can allow fixing trees that have keys that now have broken comparison functions. See https://github.com/zopefoundation/BTrees/issues/50 - Make the CPython implementation consistent with the pure-Python implementation and no longer raise ``TypeError`` for an object key (in object-keyed trees) with default comparison on ``__getitem__``, ``get`` or ``in`` operations. Instead, the results will be a ``KeyError``, the default value, and ``False``, respectively. Previously, CPython raised a ``TypeError`` in those cases, while the Python implementation behaved as specified. Likewise, non-integer keys in integer-keyed trees will raise ``KeyError``, return the default and return ``False``, respectively, in both implementations. Previously, pure-Python raised a ``KeyError``, returned the default, and raised a ``TypeError``, while CPython raised ``TypeError`` in all three cases. 4.3.1 (2016-05-16) ================== - Packaging: fix password used to automate wheel creation on Travis. 4.3.0 (2016-05-10) ================== - Fix unexpected ``OverflowError`` when passing 64bit values to long keys / values on Win64. See: https://github.com/zopefoundation/BTrees/issues/32 - When testing ``PURE_PYTHON`` environments under ``tox``, avoid poisoning the user's global wheel cache. - Ensure that the pure-Python implementation, used on PyPy and when a C compiler isn't available for CPython, pickles identically to the C version. Unpickling will choose the best available implementation. This change prevents interoperability problems and database corruption if both implementations are in use. While it is no longer possible to pickle a Python implementation and have it unpickle to the Python implementation if the C implementation is available, existing Python pickles will still unpickle to the Python implementation (until pickled again). See: https://github.com/zopefoundation/BTrees/issues/19 - Avoid creating invalid objects when unpickling empty BTrees in a pure-Python environment. - Drop support for Python 2.6 and 3.2. 4.2.0 (2015-11-13) ================== - Add support for Python 3.5. 4.1.4 (2015-06-02) ================== - Ensure that pure-Python Bucket and Set objects have a human readable ``__repr__`` like the C versions. 4.1.3 (2015-05-19) ================== - Fix ``_p_changed`` when removing items from small pure-Python BTrees/TreeSets and when adding items to small pure-Python Sets. See: https://github.com/zopefoundation/BTrees/issues/13 4.1.2 (2015-04-07) ================== - Suppress testing 64-bit values in OLBTrees on 32 bit machines. See: https://github.com/zopefoundation/BTrees/issues/9 - Fix ``_p_changed`` when adding items to small pure-Python BTrees/TreeSets. See: https://github.com/zopefoundation/BTrees/issues/11 4.1.1 (2014-12-27) ================== - Accomodate long values in pure-Python OLBTrees. 4.1.0 (2014-12-26) ================== - Add support for PyPy and PyPy3. - Add support for Python 3.4. - BTree subclasses can define ``max_leaf_size`` or ``max_internal_size`` to control maximum sizes for Bucket/Set and BTree/TreeSet nodes. - Detect integer overflow on 32-bit machines correctly under Python 3. - Update pure-Python and C trees / sets to accept explicit None to indicate max / min value for ``minKey``, ``maxKey``. (PR #3) - Update pure-Python trees / sets to accept explicit None to indicate open ranges for ``keys``, ``values``, ``items``. (PR #3) 4.0.8 (2013-05-25) ================== - Fix value-based comparison for objects under Py3k: addresses invalid merges of ``[OLI]OBTrees/OBuckets``. - Ensure that pure-Python implementation of ``OOBTree.byValue`` matches semantics (reversed-sort) of C implementation. 4.0.7 (2013-05-22) ================== - Issue #2: compilation error on 32-bit mode of OS/X. - Test ``PURE_PYTHON`` environment variable support: if set, the C extensions will not be built, imported, or tested. 4.0.6 (2013-05-14) ================== - Changed the ``ZODB`` extra to require only the real ``ZODB`` package, rather than the ``ZODB3`` metapackage: depending on the version used, the metapackage could pull in stale versions of **this** package and ``persistent``. - Fixed Python version check in ``setup.py``. 4.0.5 (2013-01-15) ================== - Fit the ``repr`` of bucket objects, which could contain garbage characters. 4.0.4 (2013-01-12) ================== - Emulate the (private) iterators used by the C extension modules from pure Python. This change is "cosmetic" only: it prevents the ZCML ``zope.app.security:permission.zcml`` from failing. The emulated classes are **not** functional, and should be considered implementation details. - Accomodate buildout to the fact that we no longer bundle a copy of 'persistent.h'. - Fix test failures on Windows: no longer rely on overflows from ``sys.maxint``. 4.0.3 (2013-01-04) ================== - Added ``setup_requires==['persistent']``. 4.0.2 (2013-01-03) ================== - Updated Trove classifiers. - Added explicit support for Python 3.2, Python 3.3, and PyPy. Note that the C extensions are not (yet) available on PyPy. - Python reference implementations now tested separately from the C verions on all platforms. - 100% unit test coverage. 4.0.1 (2012-10-21) ================== - Provide local fallback for persistent C header inclusion if the persistent distribution isn't installed. This makes the winbot happy. 4.0.0 (2012-10-20) ================== Platform Changes ---------------- - Dropped support for Python < 2.6. - Factored ``BTrees`` as a separate distribution. Testing Changes --------------- - All covered platforms tested under ``tox``. - Added support for continuous integration using ``tox`` and ``jenkins``. - Added ``setup.py dev`` alias (installs ``nose`` and ``coverage``). - Dropped dependency on ``zope.testing`` / ``zope.testrunner``: tests now run with ``setup.py test``. Documentation Changes --------------------- - Added API reference, generated via Spinx' autodoc. - Added Sphinx documentation based on ZODB Guide (snippets are exercised via 'tox'). - Added ``setup.py docs`` alias (installs ``Sphinx`` and ``repoze.sphinx.autointerface``). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1668592736.0 BTrees-6.0/README.rst0000644000076500000240000000267414335132140013121 0ustar00jensstaff============================================= ``BTrees``: scalable persistent components ============================================= .. image:: https://github.com/zopefoundation/BTrees/actions/workflows/tests.yml/badge.svg :target: https://github.com/zopefoundation/BTrees/actions/workflows/tests.yml .. image:: https://ci.appveyor.com/api/projects/status/github/zopefoundation/BTrees?branch=master&svg=true :target: https://ci.appveyor.com/project/mgedmin/BTrees .. image:: https://coveralls.io/repos/github/zopefoundation/BTrees/badge.svg?branch=master :target: https://coveralls.io/github/zopefoundation/BTrees?branch=master .. image:: https://readthedocs.org/projects/btrees/badge/?version=latest :target: https://btrees.readthedocs.io/en/latest/ :alt: Documentation Status .. image:: https://img.shields.io/pypi/v/BTrees.svg :target: https://pypi.org/project/BTrees/ :alt: Current version on PyPI .. image:: https://img.shields.io/pypi/pyversions/BTrees.svg :target: https://pypi.org/project/BTrees/ :alt: Supported Python versions This package contains a set of persistent object containers built around a modified BTree data structure. The trees are optimized for use inside ZODB's "optimistic concurrency" paradigm, and include explicit resolution of conflicts detected by that mechanism. Please see `the Sphinx documentation `_ for further information. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/buildout.cfg0000644000076500000240000000150114330745562013743 0ustar00jensstaff[buildout] eggs = persistent zope.interface parts = w_persistent develop test test_w_zodb scripts [w_persistent] # Generate an interpreter w/ persistent installed, so that the # 'develop' phase doesn't faux-install the eggs in the root and # screw up the reset of the bulidout. recipe = zc.recipe.egg interpreter = w_persistent eggs = persistent [develop] recipe = zc.recipe.egg:develop setup = . executable = ${buildout:bin-directory}/w_persistent # Force the :develop recipe to prefer the excecutable defined in this section. python = develop [test] recipe = zc.recipe.testrunner eggs = BTrees [test] ${buildout:eggs} defaults = ['-sBTrees'] [test_w_zodb] <= test eggs = BTrees [test,ZODB] ${buildout:eggs} [scripts] recipe = zc.recipe.egg eggs = ${test_w_zodb:eggs} interpreter = py ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4812105 BTrees-6.0/docs/0000755000076500000240000000000014626041203012353 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/Makefile0000644000076500000240000001267414330745562014040 0ustar00jensstaff# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BTrees.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BTrees.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/BTrees" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BTrees" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1717060227.476158 BTrees-6.0/docs/_build/0000755000076500000240000000000014626041203013611 5ustar00jensstaff././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4813468 BTrees-6.0/docs/_build/doctest/0000755000076500000240000000000014626041203015256 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1715873107.0 BTrees-6.0/docs/_build/doctest/output.txt0000644000076500000240000000057614621422523017372 0ustar00jensstaffResults of doctest builder run on 2024-05-16 17:25:06 ===================================================== Document: overview ------------------ 1 items passed all tests: 38 tests in default 38 tests in 1 items. 38 passed and 0 failed. Test passed. Doctest summary =============== 38 tests 0 failures in tests 0 failures in setup code 0 failures in cleanup code ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4762528 BTrees-6.0/docs/_build/html/0000755000076500000240000000000014626041203014555 5ustar00jensstaff././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4822521 BTrees-6.0/docs/_build/html/_sources/0000755000076500000240000000000014626041203016377 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_sources/api.rst.txt0000644000076500000240000000701714330745562020540 0ustar00jensstaff=============== API Reference =============== Protocol APIs ============= .. module:: BTrees.Interfaces .. versionchanged:: 4.8.0 Previously, ``ISized`` was defined here, but now it is imported from :mod:`zope.interface.common.collections`. The definition is the same. Similarly, ``IReadSequence``, previously defined here, has been replaced with :mod:`zope.interface.common.sequence.IMinimalSequence `. .. caution:: Before version 4.8.0, most of these interfaces served as documentation only, and were *not* implemented by the classes of this package. For example, :class:`BTrees.OOBTree.BTree` did *not* implement `IBTree`. (The exceptions were the :class:`IBTreeModule` and :class:`IBTreeFamily` families of interfaces and implementations.) Beginning with version 4.8.0, objects implement the expected interface; the ``BTree`` classes implement ``IBTree``, the set classes implement the appropriate set interface and so on. .. autointerface:: ICollection .. autointerface:: IKeyed .. autointerface:: ISetMutable .. autointerface:: IKeySequence .. autointerface:: IMinimalDictionary .. autointerface:: IDictionaryIsh .. autointerface:: IMerge .. autointerface:: IIMerge .. autointerface:: IMergeIntegerKey BTree Family APIs ----------------- .. autointerface:: ISet .. autointerface:: ITreeSet .. autointerface:: IBTree .. autointerface:: IBTreeFamily There are two families defined: .. autodata:: BTrees.family32 .. autodata:: BTrees.family64 Module APIs ----------- .. autointerface:: IBTreeModule .. autointerface:: IObjectObjectBTreeModule .. autointerface:: IIntegerObjectBTreeModule .. autointerface:: IObjectIntegerBTreeModule .. autointerface:: IIntegerIntegerBTreeModule .. autointerface:: IIntegerFloatBTreeModule Utilities ========= .. automodule:: BTrees.Length .. automodule:: BTrees.check BTree Data Structure Variants ============================= Integer Keys ------------ Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.IFBTree Integer Values ~~~~~~~~~~~~~~ .. automodule:: BTrees.IIBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.IOBTree Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.IUBTree Long Integer Keys ----------------- Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.LFBTree Long Integer Values ~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.LLBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.LOBTree Quad Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.LQBTree Object Keys ----------- Integer Values ~~~~~~~~~~~~~~ .. automodule:: BTrees.OIBTree Long Integer Values ~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.OLBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.OOBTree Quad Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.OQBTree Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.OUBTree Quad Unsigned Integer Keys -------------------------- Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.QFBTree Long Integer Values ~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.QLBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.QOBTree Quad Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.QQBTree Unsigned Integer Keys --------------------- Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.UFBTree Integer Values ~~~~~~~~~~~~~~ .. automodule:: BTrees.UIBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.UOBTree Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.UUBTree ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_sources/changes.rst.txt0000644000076500000240000000003414330745562021367 0ustar00jensstaff.. include:: ../CHANGES.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_sources/development.rst.txt0000644000076500000240000004134414330745562022312 0ustar00jensstaff======================= Developer Information ======================= This document provides information for developers who maintain or extend `BTrees`. Macros ====== `BTrees` are defined using a "template", roughly akin to a C++ template. To create a new family of `BTrees`, create a source file that defines macros used to handle differences in key and value types: Configuration Macros -------------------- ``MASTER_ID`` A string to hold an RCS/CVS Id key to be included in compiled binaries. ``MOD_NAME_PREFIX`` A string (like "IO" or "OO") that provides the prefix used for the module. This gets used to generate type names and the internal module name string. Macros for Keys --------------- ``KEY_TYPE`` The C type declaration for keys (e.g., ``int`` or ``PyObject*``). ``KEY_TYPE_IS_PYOBJECT`` Define if ``KEY_TYPE`` is a ``PyObject*`, else ``undef``. ``KEY_CHECK(K)`` Tests whether the ``PyObject* K`` can be converted to the (``C``) key type (``KEY_TYPE``). The macro should return a boolean (zero for false, non-zero for true). When it returns false, its caller should probably set a ``TypeError`` exception. ``KEY_CHECK_ON_SET(K)`` Like ``KEY_CHECK``, but only checked during ``__setitem__``. ``TEST_KEY_SET_OR(V, K, T)`` Like Python's ``cmp()``. Compares K(ey) to T(arget), where ``K`` and ``T`` are ``C`` values of type `KEY_TYPE`. ``V`` is assigned an `int` value depending on the outcome:: < 0 if K < T == 0 if K == T > 0 if K > T This macro acts like an ``if``, where the following statement is executed only if a Python exception has been raised because the values could not be compared. ``DECREF_KEY(K)`` ``K`` is a value of ``KEY_TYPE``. If ``KEY_TYPE`` is a flavor of ``PyObject*``, write this to do ``Py_DECREF(K)``. Else (e.g., ``KEY_TYPE`` is ``int``) make it a nop. ``INCREF_KEY(K)`` ``K`` is a value of `KEY_TYPE`. If `KEY_TYPE` is a flavor of ``PyObject*``, write this to do ``Py_INCREF(K)``. Else (e.g., `KEY_TYPE` is ``int``) make it a nop. ``COPY_KEY(K, E)`` Like ``K=E``. Copy a key from ``E`` to ``K``, both of ``KEY_TYPE``. Note that this doesn't ``decref K`` or ``incref E`` when ``KEY_TYPE`` is a ``PyObject*``; the caller is responsible for keeping refcounts straight. ``COPY_KEY_TO_OBJECT(O, K)`` Roughly like ``O=K``. ``O`` is a ``PyObject*``, and the macro must build a Python object form of ``K``, assign it to ``O``, and ensure that ``O`` owns the reference to its new value. It may do this by creating a new Python object based on ``K`` (e.g., ``PyInt_FromLong(K)`` when ``KEY_TYPE`` is ``int``), or simply by doing ``Py_INCREF(K)`` if ``KEY_TYPE`` is a ``PyObject*``. ``COPY_KEY_FROM_ARG(TARGET, ARG, STATUS)`` Copy an argument to the target without creating a new reference to ``ARG``. ``ARG`` is a ``PyObject*``, and ``TARGET`` is of type ``KEY_TYPE``. If this can't be done (for example, ``KEY_CHECK(ARG)`` returns false), set a Python error and set status to ``0``. If there is no error, leave status alone. Macros for Values ----------------- ``VALUE_TYPE`` The C type declaration for values (e.g., ``int`` or ``PyObject*``). ``VALUE_TYPE_IS_PYOBJECT`` Define if ``VALUE_TYPE`` is a ``PyObject*``, else ``undef``. ``TEST_VALUE(X, Y)`` Like Python's ``cmp()``. Compares ``X`` to ``Y``, where ``X`` & ``Y`` are ``C`` values of type ``VALUE_TYPE``. The macro returns an ``int``, with value:: < 0 if X < Y == 0 if X == Y > 0 if X > Y Bug: There is no provision for determining whether the comparison attempt failed (set a Python exception). ``DECREF_VALUE(K)`` Like ``DECREF_KEY``, except applied to values of ``VALUE_TYPE``. ``INCREF_VALUE(K)`` Like ``INCREF_KEY``, except applied to values of ``VALUE_TYPE``. ``COPY_VALUE(K, E)`` Like ``COPY_KEY``, except applied to values of ``VALUE_TYPE``. ``COPY_VALUE_TO_OBJECT(O, K)`` Like ``COPY_KEY_TO_OBJECT``, except applied to values of ``VALUE_TYPE``. ``COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS)`` Like ``COPY_KEY_FROM_ARG``, except applied to values of ``VALUE_TYPE``. ``NORMALIZE_VALUE(V, MIN)`` Normalize the value, ``V``, using the parameter ``MIN``. This is almost certainly a YAGNI. It is a no-op for most types. For integers, ``V`` is replaced by ``V/MIN`` only if ``MIN > 0``. Macros for Set Operations ------------------------- ``MERGE_DEFAULT`` A value of ``VALUE_TYPE`` specifying the value to associate with set elements when sets are merged with mappings via weighed union or weighted intersection. ``MERGE(O1, w1, O2, w2)`` Performs a weighted merge of two values, ``O1`` and ``O2``, using weights ``w1`` and ``w2``. The result must be of ``VALUE_TYPE``. Note that weighted unions and weighted intersections are not enabled if this macro is left undefined. ``MERGE_WEIGHT(O, w)`` Computes a weighted value for ``O``. The result must be of ``VALUE_TYPE``. This is used for "filling out" weighted unions, i.e. to compute a weighted value for keys that appear in only one of the input mappings. If left undefined, ``MERGE_WEIGHT`` defaults to:: #define MERGE_WEIGHT(O, w) (O) ``MULTI_INT_UNION`` The value doesn't matter. If defined, `SetOpTemplate.c` compiles code for a ``multiunion()`` function (compute a union of many input sets at high speed). This currently makes sense only for structures with integer keys. Datatypes ========= There are two tunable values exposed on BTree and TreeSet classes. Their default values are found in ``_datatypes.py`` and shared across C and Python. ``max_leaf_size`` An int giving the maximum bucket size (number of key/value pairs). When a bucket gets larger than this due to an insertion *into a BTREE*, it splits. Inserting into a bucket directly doesn't split, and functions that produce a bucket output (e.g., ``union()``) also have no bound on how large a bucket may get. This used to come from the C macro ``DEFAULT_MAX_BUCKET_SIZE``. ``max_internal_size`` An ``int`` giving the maximum size (number of children) of an internal btree node. This used to come from the C macro ``DEFAULT_MAX_BTREE_SIZE`` BTree Clues =========== More or less random bits of helpful info. + In papers and textbooks, this flavor of BTree is usually called a B+-Tree, where "+" is a superscript. + All keys and all values live in the bucket leaf nodes. Keys in interior (BTree) nodes merely serve to guide a search efficiently toward the correct leaf. + When a key is deleted, it's physically removed from the bucket it's in, but this doesn't propagate back up the tree: since keys in interior nodes only serve to guide searches, it's OK-- and saves time --to leave "stale" keys in interior nodes. + No attempt is made to rebalance the tree after a deletion, unless a bucket thereby becomes entirely empty. "Classic BTrees" do rebalance, keeping all buckets at least half full (provided there are enough keys in the entire tree to fill half a bucket). The tradeoffs are murky. Pathological cases in the presence of deletion do exist. Pathologies include trees tending toward only one key per bucket, and buckets at differing depths (all buckets are at the same depth in a classic BTree). + ``max_leaf_size`` and ``max_internal_size`` are chosen mostly to "even out" pickle sizes in storage. That's why, e.g., an `IIBTree` has larger values than an `OOBTree`: pickles store ints more efficiently than they can store arbitrary Python objects. + In a non-empty BTree, every bucket node contains at least one key, and every BTree node contains at least one child and a non-NULL firstbucket pointer. However, a BTree node may not contain any keys. + An empty BTree consists solely of a BTree node with ``len==0`` and ``firstbucket==NULL``. + Although a BTree can become unbalanced under a mix of inserts and deletes (meaning both that there's nothing stronger that can be said about buckets than that they're not empty, and that buckets can appear at different depths), a BTree node always has children of the same kind: they're all buckets, or they're all BTree nodes. The ``BTREE_SEARCH`` Macro ========================== For notational ease, consider a fixed BTree node ``x``, and let :: K(i) mean x->data.key[i] C(i) mean all the keys reachable from x->data.child[i] For each ``i`` in ``0`` to ``x->len-1`` inclusive, :: K(i) <= C(i) < K(i+1) is a BTree node invariant, where we pretend that ``K(0)`` holds a key smaller than any possible key, and ``K(x->len)`` holds a key larger than any possible key. (Note that ``K(x->len)`` doesn't actually exist, and ``K(0)`` is never used although space for it exists in non-empty BTree nodes.) When searching for a key ``k``, then, the child pointer we want to follow is the one at index ``i`` such that ``K(i) <= k < K(i+1)``. There can be at most one such ``i``, since the ``K(i)`` are strictly increasing. And there is at least one such ``i`` provided the tree isn't empty (so that ``0 < len``). For the moment, assume the tree isn't empty (we'll get back to that later). The macro's chief loop invariant is :: K(lo) < k < K(hi) This holds trivially at the start, since ``lo`` is set to ``0``, and ``hi`` to ``x->len``, and we pretend ``K(0)`` is minus infinity and ``K(len)`` is plus infinity. Inside the loop, if ``K(i) < k`` we set ``lo`` to ``i``, and if ``K(i) > k`` we set ``hi`` to ``i``. These obviously preserve the invariant. If ``K(i) == k``, the loop breaks and sets the result to ``i``, and since ``K(i) == k`` in that case ``i`` is obviously the correct result. Other cases depend on how ``i = floor((lo + hi)/2)`` works, exactly. Suppose ``lo + d = hi`` for some ``d >= 0``. Then ``i = floor((lo + lo + d)/2) = floor(lo + d/2) = lo + floor(d/2)``. So: a. ``[d == 0] (lo == i == hi)`` if and only if ``(lo == hi)``. b. ``[d == 1] (lo == i < hi)`` if and only if ``(lo+1 == hi)``. c. ``[d > 1] (lo < i < hi)`` if and only if ``(lo+1 < hi)``. If the node is empty ``(x->len == 0)``, then ``lo==i==hi==0`` at the start, and the loop exits immediately (the first ``i > lo`` test fails), without entering the body. Else ``lo < hi`` at the start, and the invariant ``K(lo) < k < K(hi)`` holds. If ``lo+1 < hi``, we're in case (c): ``i`` is strictly between ``lo`` and ``hi``, so the loop body is entered, and regardless of whether the body sets the new ``lo`` or the new ``hi`` to ``i``, the new ``lo`` is strictly less than the new ``hi``, and the difference between the new ``lo`` and new ``hi`` is strictly less than the difference between the old ``lo`` and old ``hi``. So long as the new ``lo + 1`` remains < the new ``hi``, we stay in this case. We can't stay in this case forever, though: because ``hi-lo`` decreases on each trip but remains > ``0``, ``lo+1 == hi`` must eventually become true. (In fact, it becomes true quickly, in about ``log2(x->len)`` trips; the point is more that ``lo`` doesn't equal ``hi`` when the loop ends, it has to end with ``lo+1==hi`` and ``i==lo``). Then we're in case (b): ``i==lo==hi-1`` then, and the loop exits. The invariant still holds, with ``lo==i`` and ``hi==lo+1==i+1``:: K(i) < k < K(i+1) so ``i`` is again the correct answer. Optimization points ------------------- + Division by 2 is done via shift rather via "/2". These are signed ints, and almost all C compilers treat signed int division as truncating, and shifting is not the same as truncation for signed int division. The compiler has no way to know these values aren't negative, so has to generate longer-winded code for "/2". But we know these values aren't negative, and exploit it. + The order of _cmp comparisons matters. We're in an interior BTree node, and are looking at only a tiny fraction of all the keys that exist. So finding the key exactly in this node is unlikely, and checking ``_cmp == 0`` is a waste of time to the same extent. It doesn't matter whether we check for ``_cmp < 0`` or ``_cmp > 0`` first, so long as we do both before worrying about equality. + At the start of a routine, it's better to run this macro even if ``x->len`` is ``0`` (check for that afterwards). We just called a function and so probably drained the pipeline. If the first thing we do then is read up ``self->len`` and check it against ``0``, we just sit there waiting for the data to get read up, and then another immediate test-and-branch, and for a very unlikely case (BTree nodes are rarely empty). It's better to get into the loop right away so the normal case makes progress ASAP. The ``BUCKET_SEARCH`` Macro =========================== This has a different job than ``BTREE_SEARCH``: the key ``0`` slot is legitimate in a bucket, and we want to find the index at which the key belongs. If the key is larger than the bucket's largest key, a new slot at index len is where it belongs, else it belongs at the smallest ``i`` with ``keys[i]`` >= the key we're looking for. We also need to know whether or not the key is present (``BTREE_SEARCH`` didn't care; it only wanted to find the next node to search). The mechanics of the search are quite similar, though. The primary loop invariant changes to (say we're searching for key ``k``):: K(lo-1) < k < K(hi) where ``K(i)`` means ``keys[i]``, and we pretend ``K(-1)`` is minus infinity and ``K(len)`` is plus infinity. If the bucket is empty, ``lo=hi=i=0`` at the start, the loop body is never entered, and the macro sets ``INDEX`` to 0 and ``ABSENT`` to true. That's why ``_cmp`` is initialized to 1 (``_cmp`` becomes ``ABSENT``). Else the bucket is not empty, lok``, ``hi`` is set to ``i``, preserving that ``K[hi] = K[i] > k``. If the loop exits after either of those, ``_cmp != 0``, so ``ABSENT`` becomes true. If ``K[i]=k``, the loop breaks, so that ``INDEX`` becomes ``i``, and ``ABSENT`` becomes false (``_cmp=0`` in this case). The same case analysis for ``BTREE_SEARCH`` on ``lo`` and ``hi`` holds here: a. ``(lo == i == hi)`` if and only if ``(lo == hi)``. b. ``(lo == i < hi)`` if and only if ``(lo+1 == hi)``. c. ``(lo < i < hi)`` if and only if ``(lo+1 < hi)``. So long as ``lo+1 < hi``, we're in case (c), and either break with equality (in which case the right results are obviously computed) or narrow the range. If equality doesn't obtain, the range eventually narrows to cases (a) or (b). To go from (c) to (a), we must have ``lo+2==hi`` at the start, and ``K[i]=K[lo+1] key``), because when it pays it narrows the range more (we get a little boost from setting ``lo=i+1`` in this case; the other case sets ``hi=i``, which isn't as much of a narrowing). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_sources/index.rst.txt0000644000076500000240000000102514330745562021067 0ustar00jensstaff====================== BTrees Documentation ====================== This package contains a set of persistent object containers built around a modified BTree data structure. The trees are optimized for use inside ZODB's "optimistic concurrency" paradigm, and include explicit resolution of conflicts detected by that mechanism. Contents: .. toctree:: :maxdepth: 2 overview api development changes ==================== Indices and tables ==================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_sources/overview.rst.txt0000644000076500000240000005242414330745562021637 0ustar00jensstaff========== Overview ========== When programming with the ZODB, Python dictionaries aren't always what you need. The most important case is where you want to store a very large mapping. When a Python dictionary is accessed in a ZODB, the whole dictionary has to be unpickled and brought into memory. If you're storing something very large, such as a 100,000-entry user database, unpickling such a large object will be slow. BTrees are a balanced tree data structure that behave like a mapping but distribute keys throughout a number of tree nodes. The nodes are stored in sorted order (this has important consequences -- see below). Nodes are then only unpickled and brought into memory as they're accessed, so the entire tree doesn't have to occupy memory (unless you really are touching every single key). Related Data Structures ======================= The BTrees package provides a large collection of related data structures. The most general data structures store arbitrary ordered_ objects. There are variants of the data structures specialized to numbers, which are faster and more memory efficient than those dealing with objects. There are several modules that handle the different variants. The first two letters of the module name specify the types of the keys and values in mappings. For example, the :mod:`BTrees.IOBTree` module provides a mapping with 32-bit integer keys and arbitrary objects as values. .. list-table:: Data Type Notation :widths: auto :class: wrapped :header-rows: 1 * - Letter - Mnemonic Device - Data Type - Notes * - O - "Object" - Any sortable Python object - * - I - "Integer" - 32-bit signed integer - Values from ``-2**31 - 1`` through ``2**31 - 1`` (about plus or minus two billion) * - L - "Long integer" - 64-bit signed integer - Values from ``-2**63 - 1`` through ``2**63 - 1`` (about plus or minus nine quintillion) * - F - "Float" - 32-bit C-language ``float`` - New in ZODB 3.4 * - U - "Unsigned" - 32-bit unsigned integer - (New in BTrees 4.7.0) Values from 0 through ``2**32`` (about four billion) * - Q - "Quad" - 64-bit unsigned integer - Values from 0 through ``2**64`` (about 18 quintillion) (New in BTrees 4.7.0) The four data structures provide by each module are a BTree, a Bucket, a TreeSet, and a Set. The BTree and Bucket types are mappings and support all the usual mapping methods, e.g. :func:`~BTrees.Interfaces.ISetMutable.update` and :func:`~BTrees.Interfaces.IKeyed.keys`. The TreeSet and Set types are similar to mappings but they have no values; they support the methods that make sense for a mapping with no keys, e.g. :func:`~BTrees.Interfaces.IKeyed.keys` but not :func:`~BTrees.Interfaces.IMinimalDictionary.items`. The Bucket and Set types are the individual building blocks for BTrees and TreeSets, respectively. A Bucket or Set can be used when you are sure that it will have few elements. If the data structure will grow large, you should use a BTree or TreeSet. Like Python lists, Buckets and Sets are allocated in one contiguous piece, and insertions and deletions can take time proportional to the number of existing elements. Also like Python lists, a Bucket or Set is a single object, and is pickled and unpickled in its entirety. BTrees and TreeSets are multi-level tree structures with much better (logarithmic) worst- case time bounds, and the tree structure is built out of multiple objects, which ZODB can load individually as needed. The two letter prefixes are repeated in the data types names. For example, the :mod:`BTrees.OOBTree` module defines the following types: :class:`BTrees.OOBTree.OOBTree`, :class:`BTrees.OOBTree.OOBucket`, :class:`BTrees.OOBTree.OOSet`, and :class:`BTrees.OOBTree.OOTreeSet`. Similarly, the other modules each define their own variants of those four types. For convenience, BTrees groups the most closely related data structures together into a "family" (defined by :class:`BTrees.Interfaces.IBTreeFamily`). :obj:`BTrees.family32` groups 32-bit data structures, while :obj:`BTrees.family64` contains 64-bit data structures. It is a common practice for code that creates BTrees to be parameterized on the family so that the caller can choose the desired characteristics. Behaviour ========= The `keys`, :func:`values`, and :func:`items` methods on BTree and TreeSet types do not materialize a list with all of the data. Instead, they return lazy sequences that fetch data from the BTree as needed. They also support optional arguments to specify the minimum and maximum values to return, often called "range searching". Because all these types are stored in sorted order, range searching is very efficient. The :func:`keys`, :func:`values`, and :func:`items` methods on Bucket and Set types do return lists with all the data. Starting in ZODB 3.3, there are also :func:`iterkeys`, :func:`itervalues`, and :func:`iteritems` methods that return iterators (in the Python 2.2 sense). Those methods also apply to BTree and TreeSet objects. A BTree object supports all the methods you would expect of a mapping, with a few extensions that exploit the fact that the keys are sorted. The example below demonstrates how some of the methods work. The extra methods are :func:`minKey` and :func:`maxKey`, which find the minimum and maximum key value subject to an optional bound argument, and :func:`byValue`, which should probably be ignored (it's hard to explain exactly what it does, and as a result it's almost never used -- best to consider it deprecated). The various methods for enumerating keys, values and items also accept minimum and maximum key arguments ("range search"), and (new in ZODB 3.3) optional Boolean arguments to control whether a range search is inclusive or exclusive of the range's endpoints. .. doctest:: >>> from BTrees.OOBTree import OOBTree >>> t = OOBTree() >>> t.update({1: "red", 2: "green", 3: "blue", 4: "spades"}) >>> len(t) 4 >>> t[2] 'green' >>> s = t.keys() # this is a "lazy" sequence object >>> s <...TreeItems object at ...> >>> len(s) # it acts like a Python list 4 >>> s[-2] 3 >>> list(s) # materialize the full list [1, 2, 3, 4] >>> list(t.values()) ['red', 'green', 'blue', 'spades'] >>> list(t.values(1, 2)) # values at keys in 1 to 2 inclusive ['red', 'green'] >>> list(t.values(2)) # values at keys >= 2 ['green', 'blue', 'spades'] >>> list(t.values(min=1, max=4)) # keyword args new in ZODB 3.3 ['red', 'green', 'blue', 'spades'] >>> list(t.values(min=1, max=4, excludemin=True, excludemax=True)) ['green', 'blue'] >>> t.minKey() # smallest key 1 >>> t.minKey(1.5) # smallest key >= 1.5 2 >>> [k for k in t.keys()] [1, 2, 3, 4] >>> [k for k in t] # new in ZODB 3.3 [1, 2, 3, 4] >>> [pair for pair in t.iteritems()] # new in ZODB 3.3 [(1, 'red'), (2, 'green'), (3, 'blue'), (4, 'spades')] >>> t.has_key(4) # returns a true value True >>> t.has_key(5) False >>> 4 in t # new in ZODB 3.3 True >>> 5 in t # new in ZODB 3.3 False >>> Each of the modules also defines some functions that operate on BTrees -- :func:`~BTrees.Interfaces.IMerge.difference`, :func:`~BTrees.Interfaces.IMerge.union`, and :func:`~BTrees.Interfaces.IMerge.intersection`. The :func:`~BTrees.Interfaces.IMerge.difference` function returns a Bucket, while the other two methods return a Set. If the keys are integers, then the module also defines :func:`~BTrees.Interfaces.IMergeIntegerKey.multiunion`. If the values are integers or floats, then the module also defines :func:`~BTrees.Interfaces.IIMerge.weightedIntersection` and :func:`~BTrees.Interfaces.IIMerge.weightedUnion`. The function doc strings describe each function briefly. .. % XXX I'm not sure all of the following is actually correct. The .. % XXX set functions have complicated behavior. :mod:`~BTrees.Interfaces` defines the operations, and is the official documentation. Note that the interfaces don't define the concrete types returned by most operations, and you shouldn't rely on the concrete types that happen to be returned: stick to operations guaranteed by the interface. In particular, note that the interfaces don't specify anything about comparison behavior, and so nothing about it is guaranteed. In ZODB 3.3, for example, two BTrees happen to use Python's default object comparison, which amounts to comparing the (arbitrary but fixed) memory addresses of the BTrees. This may or may not be true in future releases. If the interfaces don't specify a behavior, then whether that behavior appears to work, and exactly happens if it does appear to work, are undefined and should not be relied on. .. _ordered: Total Ordering and Persistence ============================== The BTree-based data structures differ from Python dicts in several fundamental ways. One of the most important is that while dicts require that keys support hash codes and equality comparison, the BTree-based structures don't use hash codes and require a total ordering on keys. Total ordering means three things: #. Reflexive. For each *x*, ``x == x`` is true. #. Trichotomy. For each *x* and *y*, exactly one of ``x < y``, ``x == y``, and ``x > y`` is true. #. Transitivity. Whenever ``x <= y`` and ``y <= z``, it's also true that ``x <= z``. The default comparison functions for most objects that come with Python satisfy these rules, with some crucial cautions explained later. Complex numbers are an example of an object whose default comparison function does not satisfy these rules: complex numbers only support ``==`` and ``!=`` comparisons, and raise an exception if you try to compare them in any other way. They don't satisfy the trichotomy rule, and must not be used as keys in BTree-based data structures (although note that complex numbers can be used as keys in Python dicts, which do not require a total ordering). Examples of objects that are wholly safe to use as keys in BTree-based structures include ints, longs, floats, 8-bit strings, Unicode strings, and tuples composed (possibly recursively) of objects of wholly safe types. It's important to realize that even if two types satisfy the rules on their own, mixing objects of those types may not. For example, 8-bit strings and Unicode strings both supply total orderings, but mixing the two loses trichotomy; e.g., ``'x' < chr(255)`` and ``u'x' == 'x'``, but trying to compare ``chr(255)`` to ``u'x'`` raises an exception. Partly for this reason (another is given later), it can be dangerous to use keys with multiple types in a single BTree-based structure. Don't try to do that, and you don't have to worry about it. Another potential problem is mutability: when a key is inserted in a BTree- based structure, it must retain the same order relative to the other keys over time. This is easy to run afoul of if you use mutable objects as keys. For example, lists supply a total ordering, and then .. doctest:: >>> L1, L2, L3 = [1], [2], [3] >>> from BTrees.OOBTree import OOSet >>> s = OOSet((L2, L3, L1)) # this is fine, so far >>> list(s.keys()) # note that the lists are in sorted order [[1], [2], [3]] >>> s.has_key([3]) # and [3] is in the set True >>> L2[0] = 5 # horrible -- the set is insane now >>> s.has_key([3]) # for example, it's insane this way False >>> s.__class__ >>> list(s) [[1], [5], [3]] Key lookup relies on that the keys remain in sorted order (an efficient form of binary search is used). By mutating key L2 after inserting it, we destroyed the invariant that the OOSet is sorted. As a result, all future operations on this set are unpredictable. A subtler variant of this problem arises due to persistence: by default, Python does several kinds of comparison by comparing the memory addresses of two objects. Because Python never moves an object in memory, this does supply a usable (albeit arbitrary) total ordering across the life of a program run (an object's memory address doesn't change). But if objects compared in this way are used as keys of a BTree-based structure that's stored in a database, when the objects are loaded from the database again they will almost certainly wind up at different memory addresses. There's no guarantee then that if key K1 had a memory address smaller than the memory address of key K2 at the time K1 and K2 were inserted in a BTree, K1's address will also be smaller than K2's when that BTree is loaded from a database later. The result will be an insane BTree, where various operations do and don't work as expected, seemingly at random. Now each of the types identified above as "wholly safe to use" never compares two instances of that type by memory address, so there's nothing to worry about here if you use keys of those types. The most common mistake is to use keys that are instances of a user-defined class that doesn't supply its own :meth:`__cmp__` method. Python compares such instances by memory address. This is fine if such instances are used as keys in temporary BTree-based structures used only in a single program run. It can be disastrous if that BTree-based structure is stored to a database, though. .. doctest:: :options: +SKIP >>> class C: ... pass ... >>> a, b = C(), C() >>> print(a < b) # this may print 0 if you try it True >>> del a, b >>> a, b = C(), C() >>> print(a < b) # and this may print 0 or 1 False >>> That example illustrates that comparison of instances of classes that don't define :meth:`__cmp__` yields arbitrary results (but consistent results within a single program run). Another problem occurs with instances of classes that do define :meth:`__cmp__`, but define it incorrectly. It's possible but rare for a custom :meth:`__cmp__` implementation to violate one of the three required formal properties directly. It's more common for it to "fall back" to address-based comparison by mistake. For example, .. doctest:: >>> class Mine: ... def __cmp__(self, other): ... if other.__class__ is Mine: ... return cmp(self.data, other.data) ... else: ... return cmp(self.data, other) It's quite possible there that the :keyword:`else` clause allows a result to be computed based on memory address. The bug won't show up until a BTree-based structure uses objects of class :class:`Mine` as keys, and also objects of other types as keys, and the structure is loaded from a database, and a sequence of comparisons happens to execute the :keyword:`else` clause in a case where the relative order of object memory addresses happened to change. This is as difficult to track down as it sounds, so best to stay far away from the possibility. You'll stay out of trouble by follwing these rules, violating them only with great care: #. Use objects of simple immutable types as keys in BTree-based data structures. #. Within a single BTree-based data structure, use objects of a single type as keys. Don't use multiple key types in a single structure. #. If you want to use class instances as keys, and there's any possibility that the structure may be stored in a database, it's crucial that the class define a :meth:`__cmp__` method, and that the method is carefully implemented. Any part of a comparison implementation that relies (explicitly or implicitly) on an address-based comparison result will eventually cause serious failure. #. Do not use :class:`~persistent.Persistent` objects as keys, or objects of a subclass of :class:`~persistent.Persistent`. That last item may be surprising. It stems from details of how conflict resolution is implemented: the states passed to conflict resolution do not materialize persistent subobjects (if a persistent object P is a key in a BTree, then P is a subobject of the bucket containing P). Instead, if an object O references a persistent subobject P directly, and O is involved in a conflict, the states passed to conflict resolution contain an instance of an internal :class:`~persistent.PersistentReference` stub class everywhere O references P. Two :class:`~persistent.PersistentReference` instances compare equal if and only if they "represent" the same persistent object; when they're not equal, they compare by memory address, and, as explained before, memory-based comparison must never happen in a sane persistent BTree. Note that it doesn't help in this case if your :class:`~persistent.Persistent` subclass defines a sane :meth:`__cmp__` method: conflict resolution doesn't know about your class, and so also doesn't know about its :meth:`__cmp__` method. It only sees instances of the internal :class:`~persistent.PersistentReference` stub class. Iteration and Mutation ====================== As with a Python dictionary or list, you should not mutate a BTree-based data structure while iterating over it, except that it's fine to replace the value associated with an existing key while iterating. You won't create internal damage in the structure if you try to remove, or add new keys, while iterating, but the results are undefined and unpredictable. A weak attempt is made to raise :exc:`RuntimeError` if the size of a BTree-based structure changes while iterating, but it doesn't catch most such cases, and is also unreliable. Example .. doctest:: :options: +SKIP >>> from BTrees.IIBTree import IISet >>> s = IISet(range(10)) >>> list(s) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> for i in s: # the output is undefined ... print(i) ... s.remove(i) 0 2 4 6 8 Traceback (most recent call last): File "", line 1, in ? RuntimeError: the bucket being iterated changed size >>> list(s) # this output is also undefined [1, 3, 5, 7, 9] >>> Also as with Python dictionaries and lists, the safe and predictable way to mutate a BTree-based structure while iterating over it is to iterate over a copy of the keys. Example .. doctest:: >>> from BTrees.IIBTree import IISet >>> s = IISet(range(10)) >>> for i in list(s.keys()): # this is well defined ... print(i) ... s.remove(i) 0 1 2 3 4 5 6 7 8 9 >>> list(s) [] >>> BTree node sizes ================ BTrees (and TreeSets) are made up of a tree of Buckets (and Sets) and internal nodes. There are maximum sizes of these notes configured for the various key and value types (unsigned and quad unsigned follow integer and long, respectively): ======== ========== ========================== ============================= Key Type Value Type Maximum Bucket or Set Size Maximum BTree or TreeSet Size ======== ========== ========================== ============================= Integer Float 120 500 Integer Integer 120 500 Integer Object 60 500 Long Float 120 500 Long Long 120 500 Long Object 60 500 Object Integer 60 250 Object Long 60 250 Object Object 30 250 ======== ========== ========================== ============================= For your application, especially when using object keys or values, you may want to override the default sizes. You can do this by subclassing any of the BTree (or TreeSet) classes and specifying new values for ``max_leaf_size`` or ``max_internal_size`` in your subclass:: >>> import BTrees.OOBTree >>> class MyBTree(BTrees.OOBTree.BTree): ... max_leaf_size = 500 ... max_internal_size = 1000 As of version 4.9, you can also set these values directly on an existing BTree class if you wish to tune them across your entire application. ``max_leaf_size`` is used for leaf nodes in a BTree, either Buckets or Sets. ``max_internal_size`` is used for internal nodes, either BTrees or TreeSets. BTree Diagnostic Tools ====================== A BTree (or TreeSet) is a complex data structure, really a graph of variable- size nodes, connected in multiple ways via three distinct kinds of C pointers. There are some tools available to help check internal consistency of a BTree as a whole. Most generally useful is the :mod:`~BTrees.check` module. The :func:`~BTrees.check.check` function examines a BTree (or Bucket, Set, or TreeSet) for value-based consistency, such as that the keys are in strictly increasing order. See the function docstring for details. The :func:`~BTrees.check.display` function displays the internal structure of a BTree. BTrees and TreeSets also have a :meth:`_check` method. This verifies that the (possibly many) internal pointers in a BTree or TreeSet are mutually consistent, and raises :exc:`AssertionError` if they're not. If a :func:`~BTrees.check.check` or :meth:`_check` call fails, it may point to a bug in the implementation of BTrees or conflict resolution, or may point to database corruption. Repairing a damaged BTree is usually best done by making a copy of it. For example, if *self.data* is bound to a corrupted IOBTree, .. doctest:: :options: +SKIP >>> self.data = IOBTree(self.data) usually suffices. If object identity needs to be preserved, .. doctest:: :options: +SKIP >>> acopy = IOBTree(self.data) >>> self.data.clear() >>> self.data.update(acopy) does the same, but leaves *self.data* bound to the same object. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4828496 BTrees-6.0/docs/_build/html/_static/0000755000076500000240000000000014626041203016203 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1715873103.0 BTrees-6.0/docs/_build/html/_static/basic.css0000644000076500000240000003536614621422517020021 0ustar00jensstaff/* * basic.css * ~~~~~~~~~ * * Sphinx stylesheet -- basic theme. * * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ /* -- main layout ----------------------------------------------------------- */ div.clearer { clear: both; } div.section::after { display: block; content: ''; clear: left; } /* -- relbar ---------------------------------------------------------------- */ div.related { width: 100%; font-size: 90%; } div.related h3 { display: none; } div.related ul { margin: 0; padding: 0 0 0 10px; list-style: none; } div.related li { display: inline; } div.related li.right { float: right; margin-right: 5px; } /* -- sidebar --------------------------------------------------------------- */ div.sphinxsidebarwrapper { padding: 10px 5px 0 10px; } div.sphinxsidebar { float: left; width: 230px; margin-left: -100%; font-size: 90%; word-wrap: break-word; overflow-wrap : break-word; } div.sphinxsidebar ul { list-style: none; } div.sphinxsidebar ul ul, div.sphinxsidebar ul.want-points { margin-left: 20px; list-style: square; } div.sphinxsidebar ul ul { margin-top: 0; margin-bottom: 0; } div.sphinxsidebar form { margin-top: 10px; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } div.sphinxsidebar #searchbox form.search { overflow: hidden; } div.sphinxsidebar #searchbox input[type="text"] { float: left; width: 80%; padding: 0.25em; box-sizing: border-box; } div.sphinxsidebar #searchbox input[type="submit"] { float: left; width: 20%; border-left: none; padding: 0.25em; box-sizing: border-box; } img { border: 0; max-width: 100%; } /* -- search page ----------------------------------------------------------- */ ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li p.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* -- index page ------------------------------------------------------------ */ table.contentstable { width: 90%; margin-left: auto; margin-right: auto; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } /* -- general index --------------------------------------------------------- */ table.indextable { width: 100%; } table.indextable td { text-align: left; vertical-align: top; } table.indextable ul { margin-top: 0; margin-bottom: 0; list-style-type: none; } table.indextable > tbody > tr > td > ul { padding-left: 0em; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } div.modindex-jumpbox { border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 1em 0 1em 0; padding: 0.4em; } div.genindex-jumpbox { border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 1em 0 1em 0; padding: 0.4em; } /* -- domain module index --------------------------------------------------- */ table.modindextable td { padding: 2px; border-collapse: collapse; } /* -- general body styles --------------------------------------------------- */ div.body { min-width: 360px; max-width: 800px; } div.body p, div.body dd, div.body li, div.body blockquote { -moz-hyphens: auto; -ms-hyphens: auto; -webkit-hyphens: auto; hyphens: auto; } a.headerlink { visibility: hidden; } a:visited { color: #551A8B; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink, caption:hover > a.headerlink, p.caption:hover > a.headerlink, div.code-block-caption:hover > a.headerlink { visibility: visible; } div.body p.caption { text-align: inherit; } div.body td { text-align: left; } .first { margin-top: 0 !important; } p.rubric { margin-top: 30px; font-weight: bold; } img.align-left, figure.align-left, .figure.align-left, object.align-left { clear: left; float: left; margin-right: 1em; } img.align-right, figure.align-right, .figure.align-right, object.align-right { clear: right; float: right; margin-left: 1em; } img.align-center, figure.align-center, .figure.align-center, object.align-center { display: block; margin-left: auto; margin-right: auto; } img.align-default, figure.align-default, .figure.align-default { display: block; margin-left: auto; margin-right: auto; } .align-left { text-align: left; } .align-center { text-align: center; } .align-default { text-align: center; } .align-right { text-align: right; } /* -- sidebars -------------------------------------------------------------- */ div.sidebar, aside.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; padding: 7px; background-color: #ffe; width: 40%; float: right; clear: right; overflow-x: auto; } p.sidebar-title { font-weight: bold; } nav.contents, aside.topic, div.admonition, div.topic, blockquote { clear: left; } /* -- topics ---------------------------------------------------------------- */ nav.contents, aside.topic, div.topic { border: 1px solid #ccc; padding: 7px; margin: 10px 0 10px 0; } p.topic-title { font-size: 1.1em; font-weight: bold; margin-top: 10px; } /* -- admonitions ----------------------------------------------------------- */ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; } div.admonition dt { font-weight: bold; } p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; } div.body p.centered { text-align: center; margin-top: 25px; } /* -- content of sidebars/topics/admonitions -------------------------------- */ div.sidebar > :last-child, aside.sidebar > :last-child, nav.contents > :last-child, aside.topic > :last-child, div.topic > :last-child, div.admonition > :last-child { margin-bottom: 0; } div.sidebar::after, aside.sidebar::after, nav.contents::after, aside.topic::after, div.topic::after, div.admonition::after, blockquote::after { display: block; content: ''; clear: both; } /* -- tables ---------------------------------------------------------------- */ table.docutils { margin-top: 10px; margin-bottom: 10px; border: 0; border-collapse: collapse; } table.align-center { margin-left: auto; margin-right: auto; } table.align-default { margin-left: auto; margin-right: auto; } table caption span.caption-number { font-style: italic; } table caption span.caption-text { } table.docutils td, table.docutils th { padding: 1px 8px 1px 5px; border-top: 0; border-left: 0; border-right: 0; border-bottom: 1px solid #aaa; } th { text-align: left; padding-right: 5px; } table.citation { border-left: solid 1px gray; margin-left: 1px; } table.citation td { border-bottom: none; } th > :first-child, td > :first-child { margin-top: 0px; } th > :last-child, td > :last-child { margin-bottom: 0px; } /* -- figures --------------------------------------------------------------- */ div.figure, figure { margin: 0.5em; padding: 0.5em; } div.figure p.caption, figcaption { padding: 0.3em; } div.figure p.caption span.caption-number, figcaption span.caption-number { font-style: italic; } div.figure p.caption span.caption-text, figcaption span.caption-text { } /* -- field list styles ----------------------------------------------------- */ table.field-list td, table.field-list th { border: 0 !important; } .field-list ul { margin: 0; padding-left: 1em; } .field-list p { margin: 0; } .field-name { -moz-hyphens: manual; -ms-hyphens: manual; -webkit-hyphens: manual; hyphens: manual; } /* -- hlist styles ---------------------------------------------------------- */ table.hlist { margin: 1em 0; } table.hlist td { vertical-align: top; } /* -- object description styles --------------------------------------------- */ .sig { font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; } .sig-name, code.descname { background-color: transparent; font-weight: bold; } .sig-name { font-size: 1.1em; } code.descname { font-size: 1.2em; } .sig-prename, code.descclassname { background-color: transparent; } .optional { font-size: 1.3em; } .sig-paren { font-size: larger; } .sig-param.n { font-style: italic; } /* C++ specific styling */ .sig-inline.c-texpr, .sig-inline.cpp-texpr { font-family: unset; } .sig.c .k, .sig.c .kt, .sig.cpp .k, .sig.cpp .kt { color: #0033B3; } .sig.c .m, .sig.cpp .m { color: #1750EB; } .sig.c .s, .sig.c .sc, .sig.cpp .s, .sig.cpp .sc { color: #067D17; } /* -- other body styles ----------------------------------------------------- */ ol.arabic { list-style: decimal; } ol.loweralpha { list-style: lower-alpha; } ol.upperalpha { list-style: upper-alpha; } ol.lowerroman { list-style: lower-roman; } ol.upperroman { list-style: upper-roman; } :not(li) > ol > li:first-child > :first-child, :not(li) > ul > li:first-child > :first-child { margin-top: 0px; } :not(li) > ol > li:last-child > :last-child, :not(li) > ul > li:last-child > :last-child { margin-bottom: 0px; } ol.simple ol p, ol.simple ul p, ul.simple ol p, ul.simple ul p { margin-top: 0; } ol.simple > li:not(:first-child) > p, ul.simple > li:not(:first-child) > p { margin-top: 0; } ol.simple p, ul.simple p { margin-bottom: 0; } aside.footnote > span, div.citation > span { float: left; } aside.footnote > span:last-of-type, div.citation > span:last-of-type { padding-right: 0.5em; } aside.footnote > p { margin-left: 2em; } div.citation > p { margin-left: 4em; } aside.footnote > p:last-of-type, div.citation > p:last-of-type { margin-bottom: 0em; } aside.footnote > p:last-of-type:after, div.citation > p:last-of-type:after { content: ""; clear: both; } dl.field-list { display: grid; grid-template-columns: fit-content(30%) auto; } dl.field-list > dt { font-weight: bold; word-break: break-word; padding-left: 0.5em; padding-right: 5px; } dl.field-list > dd { padding-left: 0.5em; margin-top: 0em; margin-left: 0em; margin-bottom: 0em; } dl { margin-bottom: 15px; } dd > :first-child { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .sig dd { margin-top: 0px; margin-bottom: 0px; } .sig dl { margin-top: 0px; margin-bottom: 0px; } dl > dd:last-child, dl > dd:last-child > :last-child { margin-bottom: 0; } dt:target, span.highlighted { background-color: #fbe54e; } rect.highlighted { fill: #fbe54e; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } .versionmodified { font-style: italic; } .system-message { background-color: #fda; padding: 5px; border: 3px solid red; } .footnote:target { background-color: #ffa; } .line-block { display: block; margin-top: 1em; margin-bottom: 1em; } .line-block .line-block { margin-top: 0; margin-bottom: 0; margin-left: 1.5em; } .guilabel, .menuselection { font-family: sans-serif; } .accelerator { text-decoration: underline; } .classifier { font-style: oblique; } .classifier:before { font-style: normal; margin: 0 0.5em; content: ":"; display: inline-block; } abbr, acronym { border-bottom: dotted 1px; cursor: help; } .translated { background-color: rgba(207, 255, 207, 0.2) } .untranslated { background-color: rgba(255, 207, 207, 0.2) } /* -- code displays --------------------------------------------------------- */ pre { overflow: auto; overflow-y: hidden; /* fixes display issues on Chrome browsers */ } pre, div[class*="highlight-"] { clear: both; } span.pre { -moz-hyphens: none; -ms-hyphens: none; -webkit-hyphens: none; hyphens: none; white-space: nowrap; } div[class*="highlight-"] { margin: 1em 0; } td.linenos pre { border: 0; background-color: transparent; color: #aaa; } table.highlighttable { display: block; } table.highlighttable tbody { display: block; } table.highlighttable tr { display: flex; } table.highlighttable td { margin: 0; padding: 0; } table.highlighttable td.linenos { padding-right: 0.5em; } table.highlighttable td.code { flex: 1; overflow: hidden; } .highlight .hll { display: block; } div.highlight pre, table.highlighttable pre { margin: 0; } div.code-block-caption + div { margin-top: 0; } div.code-block-caption { margin-top: 1em; padding: 2px 5px; font-size: small; } div.code-block-caption code { background-color: transparent; } table.highlighttable td.linenos, span.linenos, div.highlight span.gp { /* gp: Generic.Prompt */ user-select: none; -webkit-user-select: text; /* Safari fallback only */ -webkit-user-select: none; /* Chrome/Safari */ -moz-user-select: none; /* Firefox */ -ms-user-select: none; /* IE10+ */ } div.code-block-caption span.caption-number { padding: 0.1em 0.3em; font-style: italic; } div.code-block-caption span.caption-text { } div.literal-block-wrapper { margin: 1em 0; } code.xref, a code { background-color: transparent; font-weight: bold; } h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { background-color: transparent; } .viewcode-link { float: right; } .viewcode-back { float: right; font-family: sans-serif; } div.viewcode-block:target { margin: -1px -10px; padding: 0 10px; } /* -- math display ---------------------------------------------------------- */ img.math { vertical-align: middle; } div.body div.math p { text-align: center; } span.eqno { float: right; } span.eqno a.headerlink { position: absolute; z-index: 1; } div.math:hover a.headerlink { visibility: visible; } /* -- printout stylesheet --------------------------------------------------- */ @media print { div.document, div.documentwrapper, div.bodywrapper { margin: 0 !important; width: 100%; } div.sphinxsidebar, div.related, div.footer, #top-link { display: none; } }././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1717060227.483261 BTrees-6.0/docs/_build/html/_static/css/0000755000076500000240000000000014626041203016773 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1681980427.0 BTrees-6.0/docs/_build/html/_static/css/badge_only.css0000644000076500000240000000623514420176013021616 0ustar00jensstaff.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696510495.0 BTrees-6.0/docs/_build/html/_static/css/theme.css0000644000076500000240000041022214507531037020617 0ustar00jensstaffhtml{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block}././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_static/custom.css0000644000076500000240000000055314330745562020245 0ustar00jensstaff/* sphinx_rtd_theme pulls from wyrm, which applies white-space: nowrap to tables. https://github.com/snide/wyrm/blob/fd41b56978f009e8c33cb26f384dd0dfaf430a7d/sass/wyrm_core/_table.sass#L144 That makes it hard to have a table with more than three columns without pointless scrolling. */ .wrapped td, .wrapped th { white-space: normal !important; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_build/html/_static/placeholder.txt0000644000076500000240000000000014330745562021227 0ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1715873103.0 BTrees-6.0/docs/_build/html/_static/pygments.css0000644000076500000240000001150114621422517020567 0ustar00jensstaffpre { line-height: 125%; } td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } .highlight .hll { background-color: #ffffcc } .highlight { background: #eeffcc; } .highlight .c { color: #408090; font-style: italic } /* Comment */ .highlight .err { border: 1px solid #FF0000 } /* Error */ .highlight .k { color: #007020; font-weight: bold } /* Keyword */ .highlight .o { color: #666666 } /* Operator */ .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ .highlight .cp { color: #007020 } /* Comment.Preproc */ .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #A00000 } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ .highlight .gr { color: #FF0000 } /* Generic.Error */ .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ .highlight .gi { color: #00A000 } /* Generic.Inserted */ .highlight .go { color: #333333 } /* Generic.Output */ .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ .highlight .gt { color: #0044DD } /* Generic.Traceback */ .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #007020 } /* Keyword.Pseudo */ .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #902000 } /* Keyword.Type */ .highlight .m { color: #208050 } /* Literal.Number */ .highlight .s { color: #4070a0 } /* Literal.String */ .highlight .na { color: #4070a0 } /* Name.Attribute */ .highlight .nb { color: #007020 } /* Name.Builtin */ .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ .highlight .no { color: #60add5 } /* Name.Constant */ .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ .highlight .ne { color: #007020 } /* Name.Exception */ .highlight .nf { color: #06287e } /* Name.Function */ .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #bb60d5 } /* Name.Variable */ .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #208050 } /* Literal.Number.Bin */ .highlight .mf { color: #208050 } /* Literal.Number.Float */ .highlight .mh { color: #208050 } /* Literal.Number.Hex */ .highlight .mi { color: #208050 } /* Literal.Number.Integer */ .highlight .mo { color: #208050 } /* Literal.Number.Oct */ .highlight .sa { color: #4070a0 } /* Literal.String.Affix */ .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ .highlight .sc { color: #4070a0 } /* Literal.String.Char */ .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ .highlight .sx { color: #c65d09 } /* Literal.String.Other */ .highlight .sr { color: #235388 } /* Literal.String.Regex */ .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ .highlight .ss { color: #517918 } /* Literal.String.Symbol */ .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #06287e } /* Name.Function.Magic */ .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4836717 BTrees-6.0/docs/_static/0000755000076500000240000000000014626041203014001 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_static/custom.css0000644000076500000240000000055314330745562016043 0ustar00jensstaff/* sphinx_rtd_theme pulls from wyrm, which applies white-space: nowrap to tables. https://github.com/snide/wyrm/blob/fd41b56978f009e8c33cb26f384dd0dfaf430a7d/sass/wyrm_core/_table.sass#L144 That makes it hard to have a table with more than three columns without pointless scrolling. */ .wrapped td, .wrapped th { white-space: normal !important; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_static/placeholder.txt0000644000076500000240000000000014330745562017025 0ustar00jensstaff././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1717060227.483758 BTrees-6.0/docs/_templates/0000755000076500000240000000000014626041203014510 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/_templates/placeholder.txt0000644000076500000240000000000014330745562017534 0ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/api.rst0000644000076500000240000000701714330745562013676 0ustar00jensstaff=============== API Reference =============== Protocol APIs ============= .. module:: BTrees.Interfaces .. versionchanged:: 4.8.0 Previously, ``ISized`` was defined here, but now it is imported from :mod:`zope.interface.common.collections`. The definition is the same. Similarly, ``IReadSequence``, previously defined here, has been replaced with :mod:`zope.interface.common.sequence.IMinimalSequence `. .. caution:: Before version 4.8.0, most of these interfaces served as documentation only, and were *not* implemented by the classes of this package. For example, :class:`BTrees.OOBTree.BTree` did *not* implement `IBTree`. (The exceptions were the :class:`IBTreeModule` and :class:`IBTreeFamily` families of interfaces and implementations.) Beginning with version 4.8.0, objects implement the expected interface; the ``BTree`` classes implement ``IBTree``, the set classes implement the appropriate set interface and so on. .. autointerface:: ICollection .. autointerface:: IKeyed .. autointerface:: ISetMutable .. autointerface:: IKeySequence .. autointerface:: IMinimalDictionary .. autointerface:: IDictionaryIsh .. autointerface:: IMerge .. autointerface:: IIMerge .. autointerface:: IMergeIntegerKey BTree Family APIs ----------------- .. autointerface:: ISet .. autointerface:: ITreeSet .. autointerface:: IBTree .. autointerface:: IBTreeFamily There are two families defined: .. autodata:: BTrees.family32 .. autodata:: BTrees.family64 Module APIs ----------- .. autointerface:: IBTreeModule .. autointerface:: IObjectObjectBTreeModule .. autointerface:: IIntegerObjectBTreeModule .. autointerface:: IObjectIntegerBTreeModule .. autointerface:: IIntegerIntegerBTreeModule .. autointerface:: IIntegerFloatBTreeModule Utilities ========= .. automodule:: BTrees.Length .. automodule:: BTrees.check BTree Data Structure Variants ============================= Integer Keys ------------ Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.IFBTree Integer Values ~~~~~~~~~~~~~~ .. automodule:: BTrees.IIBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.IOBTree Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.IUBTree Long Integer Keys ----------------- Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.LFBTree Long Integer Values ~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.LLBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.LOBTree Quad Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.LQBTree Object Keys ----------- Integer Values ~~~~~~~~~~~~~~ .. automodule:: BTrees.OIBTree Long Integer Values ~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.OLBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.OOBTree Quad Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.OQBTree Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.OUBTree Quad Unsigned Integer Keys -------------------------- Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.QFBTree Long Integer Values ~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.QLBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.QOBTree Quad Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.QQBTree Unsigned Integer Keys --------------------- Float Values ~~~~~~~~~~~~ .. automodule:: BTrees.UFBTree Integer Values ~~~~~~~~~~~~~~ .. automodule:: BTrees.UIBTree Object Values ~~~~~~~~~~~~~ .. automodule:: BTrees.UOBTree Unsigned Integer Values ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: BTrees.UUBTree ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/changes.rst0000644000076500000240000000003414330745562014525 0ustar00jensstaff.. include:: ../CHANGES.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1715872922.0 BTrees-6.0/docs/conf.py0000644000076500000240000002152714621422232013661 0ustar00jensstaff# -*- coding: utf-8 -*- # # BTrees documentation build configuration file, created by # sphinx-quickstart on Thu Oct 18 00:16:24 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. #import sys, os import sys import os import pkg_resources # We actually have slightly better results by documenting the # C objects right now. # os.environ['PURE_PYTHON'] = '1' sys.path.append(os.path.abspath('../')) rqmt = pkg_resources.require('BTrees')[0] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.8' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'repoze.sphinx.autointerface', ] # Sphinx 1.8+ prefers this to `autodoc_default_flags`. It's documented that # either True or None mean the same thing as just setting the flag, but # only None works in 1.8 (True works in 2.0) autodoc_default_options = { 'members': None, 'show-inheritance': None, 'special-members': None, 'undoc-members': None, } autodoc_member_order = 'groupwise' autoclass_content = 'both' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'BTrees' copyright = u'2012, Zope Foundation Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '%s.%s' % tuple(map(int, rqmt.version.split('.')[:2])) # The full version, including alpha/beta/rc tags. release = rqmt.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = "obj" # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = [ 'custom.css' ] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'BTreesdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'BTrees.tex', u'BTrees Documentation', u'Zope Foundation Contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'btrees', u'BTrees Documentation', [u'Zope Foundation Contributors'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'BTrees', u'BTrees Documentation', u'Zope Foundation Contributors', 'BTrees', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'persistent': ("https://persistent.readthedocs.io/en/latest/", None), 'zodb': ("https://zodb.org/en/latest/", None), 'zopeinterface': ("https://zopeinterface.readthedocs.io/en/latest/", None), } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/development.rst0000644000076500000240000004134414330745562015450 0ustar00jensstaff======================= Developer Information ======================= This document provides information for developers who maintain or extend `BTrees`. Macros ====== `BTrees` are defined using a "template", roughly akin to a C++ template. To create a new family of `BTrees`, create a source file that defines macros used to handle differences in key and value types: Configuration Macros -------------------- ``MASTER_ID`` A string to hold an RCS/CVS Id key to be included in compiled binaries. ``MOD_NAME_PREFIX`` A string (like "IO" or "OO") that provides the prefix used for the module. This gets used to generate type names and the internal module name string. Macros for Keys --------------- ``KEY_TYPE`` The C type declaration for keys (e.g., ``int`` or ``PyObject*``). ``KEY_TYPE_IS_PYOBJECT`` Define if ``KEY_TYPE`` is a ``PyObject*`, else ``undef``. ``KEY_CHECK(K)`` Tests whether the ``PyObject* K`` can be converted to the (``C``) key type (``KEY_TYPE``). The macro should return a boolean (zero for false, non-zero for true). When it returns false, its caller should probably set a ``TypeError`` exception. ``KEY_CHECK_ON_SET(K)`` Like ``KEY_CHECK``, but only checked during ``__setitem__``. ``TEST_KEY_SET_OR(V, K, T)`` Like Python's ``cmp()``. Compares K(ey) to T(arget), where ``K`` and ``T`` are ``C`` values of type `KEY_TYPE`. ``V`` is assigned an `int` value depending on the outcome:: < 0 if K < T == 0 if K == T > 0 if K > T This macro acts like an ``if``, where the following statement is executed only if a Python exception has been raised because the values could not be compared. ``DECREF_KEY(K)`` ``K`` is a value of ``KEY_TYPE``. If ``KEY_TYPE`` is a flavor of ``PyObject*``, write this to do ``Py_DECREF(K)``. Else (e.g., ``KEY_TYPE`` is ``int``) make it a nop. ``INCREF_KEY(K)`` ``K`` is a value of `KEY_TYPE`. If `KEY_TYPE` is a flavor of ``PyObject*``, write this to do ``Py_INCREF(K)``. Else (e.g., `KEY_TYPE` is ``int``) make it a nop. ``COPY_KEY(K, E)`` Like ``K=E``. Copy a key from ``E`` to ``K``, both of ``KEY_TYPE``. Note that this doesn't ``decref K`` or ``incref E`` when ``KEY_TYPE`` is a ``PyObject*``; the caller is responsible for keeping refcounts straight. ``COPY_KEY_TO_OBJECT(O, K)`` Roughly like ``O=K``. ``O`` is a ``PyObject*``, and the macro must build a Python object form of ``K``, assign it to ``O``, and ensure that ``O`` owns the reference to its new value. It may do this by creating a new Python object based on ``K`` (e.g., ``PyInt_FromLong(K)`` when ``KEY_TYPE`` is ``int``), or simply by doing ``Py_INCREF(K)`` if ``KEY_TYPE`` is a ``PyObject*``. ``COPY_KEY_FROM_ARG(TARGET, ARG, STATUS)`` Copy an argument to the target without creating a new reference to ``ARG``. ``ARG`` is a ``PyObject*``, and ``TARGET`` is of type ``KEY_TYPE``. If this can't be done (for example, ``KEY_CHECK(ARG)`` returns false), set a Python error and set status to ``0``. If there is no error, leave status alone. Macros for Values ----------------- ``VALUE_TYPE`` The C type declaration for values (e.g., ``int`` or ``PyObject*``). ``VALUE_TYPE_IS_PYOBJECT`` Define if ``VALUE_TYPE`` is a ``PyObject*``, else ``undef``. ``TEST_VALUE(X, Y)`` Like Python's ``cmp()``. Compares ``X`` to ``Y``, where ``X`` & ``Y`` are ``C`` values of type ``VALUE_TYPE``. The macro returns an ``int``, with value:: < 0 if X < Y == 0 if X == Y > 0 if X > Y Bug: There is no provision for determining whether the comparison attempt failed (set a Python exception). ``DECREF_VALUE(K)`` Like ``DECREF_KEY``, except applied to values of ``VALUE_TYPE``. ``INCREF_VALUE(K)`` Like ``INCREF_KEY``, except applied to values of ``VALUE_TYPE``. ``COPY_VALUE(K, E)`` Like ``COPY_KEY``, except applied to values of ``VALUE_TYPE``. ``COPY_VALUE_TO_OBJECT(O, K)`` Like ``COPY_KEY_TO_OBJECT``, except applied to values of ``VALUE_TYPE``. ``COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS)`` Like ``COPY_KEY_FROM_ARG``, except applied to values of ``VALUE_TYPE``. ``NORMALIZE_VALUE(V, MIN)`` Normalize the value, ``V``, using the parameter ``MIN``. This is almost certainly a YAGNI. It is a no-op for most types. For integers, ``V`` is replaced by ``V/MIN`` only if ``MIN > 0``. Macros for Set Operations ------------------------- ``MERGE_DEFAULT`` A value of ``VALUE_TYPE`` specifying the value to associate with set elements when sets are merged with mappings via weighed union or weighted intersection. ``MERGE(O1, w1, O2, w2)`` Performs a weighted merge of two values, ``O1`` and ``O2``, using weights ``w1`` and ``w2``. The result must be of ``VALUE_TYPE``. Note that weighted unions and weighted intersections are not enabled if this macro is left undefined. ``MERGE_WEIGHT(O, w)`` Computes a weighted value for ``O``. The result must be of ``VALUE_TYPE``. This is used for "filling out" weighted unions, i.e. to compute a weighted value for keys that appear in only one of the input mappings. If left undefined, ``MERGE_WEIGHT`` defaults to:: #define MERGE_WEIGHT(O, w) (O) ``MULTI_INT_UNION`` The value doesn't matter. If defined, `SetOpTemplate.c` compiles code for a ``multiunion()`` function (compute a union of many input sets at high speed). This currently makes sense only for structures with integer keys. Datatypes ========= There are two tunable values exposed on BTree and TreeSet classes. Their default values are found in ``_datatypes.py`` and shared across C and Python. ``max_leaf_size`` An int giving the maximum bucket size (number of key/value pairs). When a bucket gets larger than this due to an insertion *into a BTREE*, it splits. Inserting into a bucket directly doesn't split, and functions that produce a bucket output (e.g., ``union()``) also have no bound on how large a bucket may get. This used to come from the C macro ``DEFAULT_MAX_BUCKET_SIZE``. ``max_internal_size`` An ``int`` giving the maximum size (number of children) of an internal btree node. This used to come from the C macro ``DEFAULT_MAX_BTREE_SIZE`` BTree Clues =========== More or less random bits of helpful info. + In papers and textbooks, this flavor of BTree is usually called a B+-Tree, where "+" is a superscript. + All keys and all values live in the bucket leaf nodes. Keys in interior (BTree) nodes merely serve to guide a search efficiently toward the correct leaf. + When a key is deleted, it's physically removed from the bucket it's in, but this doesn't propagate back up the tree: since keys in interior nodes only serve to guide searches, it's OK-- and saves time --to leave "stale" keys in interior nodes. + No attempt is made to rebalance the tree after a deletion, unless a bucket thereby becomes entirely empty. "Classic BTrees" do rebalance, keeping all buckets at least half full (provided there are enough keys in the entire tree to fill half a bucket). The tradeoffs are murky. Pathological cases in the presence of deletion do exist. Pathologies include trees tending toward only one key per bucket, and buckets at differing depths (all buckets are at the same depth in a classic BTree). + ``max_leaf_size`` and ``max_internal_size`` are chosen mostly to "even out" pickle sizes in storage. That's why, e.g., an `IIBTree` has larger values than an `OOBTree`: pickles store ints more efficiently than they can store arbitrary Python objects. + In a non-empty BTree, every bucket node contains at least one key, and every BTree node contains at least one child and a non-NULL firstbucket pointer. However, a BTree node may not contain any keys. + An empty BTree consists solely of a BTree node with ``len==0`` and ``firstbucket==NULL``. + Although a BTree can become unbalanced under a mix of inserts and deletes (meaning both that there's nothing stronger that can be said about buckets than that they're not empty, and that buckets can appear at different depths), a BTree node always has children of the same kind: they're all buckets, or they're all BTree nodes. The ``BTREE_SEARCH`` Macro ========================== For notational ease, consider a fixed BTree node ``x``, and let :: K(i) mean x->data.key[i] C(i) mean all the keys reachable from x->data.child[i] For each ``i`` in ``0`` to ``x->len-1`` inclusive, :: K(i) <= C(i) < K(i+1) is a BTree node invariant, where we pretend that ``K(0)`` holds a key smaller than any possible key, and ``K(x->len)`` holds a key larger than any possible key. (Note that ``K(x->len)`` doesn't actually exist, and ``K(0)`` is never used although space for it exists in non-empty BTree nodes.) When searching for a key ``k``, then, the child pointer we want to follow is the one at index ``i`` such that ``K(i) <= k < K(i+1)``. There can be at most one such ``i``, since the ``K(i)`` are strictly increasing. And there is at least one such ``i`` provided the tree isn't empty (so that ``0 < len``). For the moment, assume the tree isn't empty (we'll get back to that later). The macro's chief loop invariant is :: K(lo) < k < K(hi) This holds trivially at the start, since ``lo`` is set to ``0``, and ``hi`` to ``x->len``, and we pretend ``K(0)`` is minus infinity and ``K(len)`` is plus infinity. Inside the loop, if ``K(i) < k`` we set ``lo`` to ``i``, and if ``K(i) > k`` we set ``hi`` to ``i``. These obviously preserve the invariant. If ``K(i) == k``, the loop breaks and sets the result to ``i``, and since ``K(i) == k`` in that case ``i`` is obviously the correct result. Other cases depend on how ``i = floor((lo + hi)/2)`` works, exactly. Suppose ``lo + d = hi`` for some ``d >= 0``. Then ``i = floor((lo + lo + d)/2) = floor(lo + d/2) = lo + floor(d/2)``. So: a. ``[d == 0] (lo == i == hi)`` if and only if ``(lo == hi)``. b. ``[d == 1] (lo == i < hi)`` if and only if ``(lo+1 == hi)``. c. ``[d > 1] (lo < i < hi)`` if and only if ``(lo+1 < hi)``. If the node is empty ``(x->len == 0)``, then ``lo==i==hi==0`` at the start, and the loop exits immediately (the first ``i > lo`` test fails), without entering the body. Else ``lo < hi`` at the start, and the invariant ``K(lo) < k < K(hi)`` holds. If ``lo+1 < hi``, we're in case (c): ``i`` is strictly between ``lo`` and ``hi``, so the loop body is entered, and regardless of whether the body sets the new ``lo`` or the new ``hi`` to ``i``, the new ``lo`` is strictly less than the new ``hi``, and the difference between the new ``lo`` and new ``hi`` is strictly less than the difference between the old ``lo`` and old ``hi``. So long as the new ``lo + 1`` remains < the new ``hi``, we stay in this case. We can't stay in this case forever, though: because ``hi-lo`` decreases on each trip but remains > ``0``, ``lo+1 == hi`` must eventually become true. (In fact, it becomes true quickly, in about ``log2(x->len)`` trips; the point is more that ``lo`` doesn't equal ``hi`` when the loop ends, it has to end with ``lo+1==hi`` and ``i==lo``). Then we're in case (b): ``i==lo==hi-1`` then, and the loop exits. The invariant still holds, with ``lo==i`` and ``hi==lo+1==i+1``:: K(i) < k < K(i+1) so ``i`` is again the correct answer. Optimization points ------------------- + Division by 2 is done via shift rather via "/2". These are signed ints, and almost all C compilers treat signed int division as truncating, and shifting is not the same as truncation for signed int division. The compiler has no way to know these values aren't negative, so has to generate longer-winded code for "/2". But we know these values aren't negative, and exploit it. + The order of _cmp comparisons matters. We're in an interior BTree node, and are looking at only a tiny fraction of all the keys that exist. So finding the key exactly in this node is unlikely, and checking ``_cmp == 0`` is a waste of time to the same extent. It doesn't matter whether we check for ``_cmp < 0`` or ``_cmp > 0`` first, so long as we do both before worrying about equality. + At the start of a routine, it's better to run this macro even if ``x->len`` is ``0`` (check for that afterwards). We just called a function and so probably drained the pipeline. If the first thing we do then is read up ``self->len`` and check it against ``0``, we just sit there waiting for the data to get read up, and then another immediate test-and-branch, and for a very unlikely case (BTree nodes are rarely empty). It's better to get into the loop right away so the normal case makes progress ASAP. The ``BUCKET_SEARCH`` Macro =========================== This has a different job than ``BTREE_SEARCH``: the key ``0`` slot is legitimate in a bucket, and we want to find the index at which the key belongs. If the key is larger than the bucket's largest key, a new slot at index len is where it belongs, else it belongs at the smallest ``i`` with ``keys[i]`` >= the key we're looking for. We also need to know whether or not the key is present (``BTREE_SEARCH`` didn't care; it only wanted to find the next node to search). The mechanics of the search are quite similar, though. The primary loop invariant changes to (say we're searching for key ``k``):: K(lo-1) < k < K(hi) where ``K(i)`` means ``keys[i]``, and we pretend ``K(-1)`` is minus infinity and ``K(len)`` is plus infinity. If the bucket is empty, ``lo=hi=i=0`` at the start, the loop body is never entered, and the macro sets ``INDEX`` to 0 and ``ABSENT`` to true. That's why ``_cmp`` is initialized to 1 (``_cmp`` becomes ``ABSENT``). Else the bucket is not empty, lok``, ``hi`` is set to ``i``, preserving that ``K[hi] = K[i] > k``. If the loop exits after either of those, ``_cmp != 0``, so ``ABSENT`` becomes true. If ``K[i]=k``, the loop breaks, so that ``INDEX`` becomes ``i``, and ``ABSENT`` becomes false (``_cmp=0`` in this case). The same case analysis for ``BTREE_SEARCH`` on ``lo`` and ``hi`` holds here: a. ``(lo == i == hi)`` if and only if ``(lo == hi)``. b. ``(lo == i < hi)`` if and only if ``(lo+1 == hi)``. c. ``(lo < i < hi)`` if and only if ``(lo+1 < hi)``. So long as ``lo+1 < hi``, we're in case (c), and either break with equality (in which case the right results are obviously computed) or narrow the range. If equality doesn't obtain, the range eventually narrows to cases (a) or (b). To go from (c) to (a), we must have ``lo+2==hi`` at the start, and ``K[i]=K[lo+1] key``), because when it pays it narrows the range more (we get a little boost from setting ``lo=i+1`` in this case; the other case sets ``hi=i``, which isn't as much of a narrowing). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/index.rst0000644000076500000240000000102514330745562014225 0ustar00jensstaff====================== BTrees Documentation ====================== This package contains a set of persistent object containers built around a modified BTree data structure. The trees are optimized for use inside ZODB's "optimistic concurrency" paradigm, and include explicit resolution of conflicts detected by that mechanism. Contents: .. toctree:: :maxdepth: 2 overview api development changes ==================== Indices and tables ==================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/make.bat0000644000076500000240000001175014330745562013777 0ustar00jensstaff@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\BTrees.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\BTrees.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/docs/overview.rst0000644000076500000240000005242414330745562014775 0ustar00jensstaff========== Overview ========== When programming with the ZODB, Python dictionaries aren't always what you need. The most important case is where you want to store a very large mapping. When a Python dictionary is accessed in a ZODB, the whole dictionary has to be unpickled and brought into memory. If you're storing something very large, such as a 100,000-entry user database, unpickling such a large object will be slow. BTrees are a balanced tree data structure that behave like a mapping but distribute keys throughout a number of tree nodes. The nodes are stored in sorted order (this has important consequences -- see below). Nodes are then only unpickled and brought into memory as they're accessed, so the entire tree doesn't have to occupy memory (unless you really are touching every single key). Related Data Structures ======================= The BTrees package provides a large collection of related data structures. The most general data structures store arbitrary ordered_ objects. There are variants of the data structures specialized to numbers, which are faster and more memory efficient than those dealing with objects. There are several modules that handle the different variants. The first two letters of the module name specify the types of the keys and values in mappings. For example, the :mod:`BTrees.IOBTree` module provides a mapping with 32-bit integer keys and arbitrary objects as values. .. list-table:: Data Type Notation :widths: auto :class: wrapped :header-rows: 1 * - Letter - Mnemonic Device - Data Type - Notes * - O - "Object" - Any sortable Python object - * - I - "Integer" - 32-bit signed integer - Values from ``-2**31 - 1`` through ``2**31 - 1`` (about plus or minus two billion) * - L - "Long integer" - 64-bit signed integer - Values from ``-2**63 - 1`` through ``2**63 - 1`` (about plus or minus nine quintillion) * - F - "Float" - 32-bit C-language ``float`` - New in ZODB 3.4 * - U - "Unsigned" - 32-bit unsigned integer - (New in BTrees 4.7.0) Values from 0 through ``2**32`` (about four billion) * - Q - "Quad" - 64-bit unsigned integer - Values from 0 through ``2**64`` (about 18 quintillion) (New in BTrees 4.7.0) The four data structures provide by each module are a BTree, a Bucket, a TreeSet, and a Set. The BTree and Bucket types are mappings and support all the usual mapping methods, e.g. :func:`~BTrees.Interfaces.ISetMutable.update` and :func:`~BTrees.Interfaces.IKeyed.keys`. The TreeSet and Set types are similar to mappings but they have no values; they support the methods that make sense for a mapping with no keys, e.g. :func:`~BTrees.Interfaces.IKeyed.keys` but not :func:`~BTrees.Interfaces.IMinimalDictionary.items`. The Bucket and Set types are the individual building blocks for BTrees and TreeSets, respectively. A Bucket or Set can be used when you are sure that it will have few elements. If the data structure will grow large, you should use a BTree or TreeSet. Like Python lists, Buckets and Sets are allocated in one contiguous piece, and insertions and deletions can take time proportional to the number of existing elements. Also like Python lists, a Bucket or Set is a single object, and is pickled and unpickled in its entirety. BTrees and TreeSets are multi-level tree structures with much better (logarithmic) worst- case time bounds, and the tree structure is built out of multiple objects, which ZODB can load individually as needed. The two letter prefixes are repeated in the data types names. For example, the :mod:`BTrees.OOBTree` module defines the following types: :class:`BTrees.OOBTree.OOBTree`, :class:`BTrees.OOBTree.OOBucket`, :class:`BTrees.OOBTree.OOSet`, and :class:`BTrees.OOBTree.OOTreeSet`. Similarly, the other modules each define their own variants of those four types. For convenience, BTrees groups the most closely related data structures together into a "family" (defined by :class:`BTrees.Interfaces.IBTreeFamily`). :obj:`BTrees.family32` groups 32-bit data structures, while :obj:`BTrees.family64` contains 64-bit data structures. It is a common practice for code that creates BTrees to be parameterized on the family so that the caller can choose the desired characteristics. Behaviour ========= The `keys`, :func:`values`, and :func:`items` methods on BTree and TreeSet types do not materialize a list with all of the data. Instead, they return lazy sequences that fetch data from the BTree as needed. They also support optional arguments to specify the minimum and maximum values to return, often called "range searching". Because all these types are stored in sorted order, range searching is very efficient. The :func:`keys`, :func:`values`, and :func:`items` methods on Bucket and Set types do return lists with all the data. Starting in ZODB 3.3, there are also :func:`iterkeys`, :func:`itervalues`, and :func:`iteritems` methods that return iterators (in the Python 2.2 sense). Those methods also apply to BTree and TreeSet objects. A BTree object supports all the methods you would expect of a mapping, with a few extensions that exploit the fact that the keys are sorted. The example below demonstrates how some of the methods work. The extra methods are :func:`minKey` and :func:`maxKey`, which find the minimum and maximum key value subject to an optional bound argument, and :func:`byValue`, which should probably be ignored (it's hard to explain exactly what it does, and as a result it's almost never used -- best to consider it deprecated). The various methods for enumerating keys, values and items also accept minimum and maximum key arguments ("range search"), and (new in ZODB 3.3) optional Boolean arguments to control whether a range search is inclusive or exclusive of the range's endpoints. .. doctest:: >>> from BTrees.OOBTree import OOBTree >>> t = OOBTree() >>> t.update({1: "red", 2: "green", 3: "blue", 4: "spades"}) >>> len(t) 4 >>> t[2] 'green' >>> s = t.keys() # this is a "lazy" sequence object >>> s <...TreeItems object at ...> >>> len(s) # it acts like a Python list 4 >>> s[-2] 3 >>> list(s) # materialize the full list [1, 2, 3, 4] >>> list(t.values()) ['red', 'green', 'blue', 'spades'] >>> list(t.values(1, 2)) # values at keys in 1 to 2 inclusive ['red', 'green'] >>> list(t.values(2)) # values at keys >= 2 ['green', 'blue', 'spades'] >>> list(t.values(min=1, max=4)) # keyword args new in ZODB 3.3 ['red', 'green', 'blue', 'spades'] >>> list(t.values(min=1, max=4, excludemin=True, excludemax=True)) ['green', 'blue'] >>> t.minKey() # smallest key 1 >>> t.minKey(1.5) # smallest key >= 1.5 2 >>> [k for k in t.keys()] [1, 2, 3, 4] >>> [k for k in t] # new in ZODB 3.3 [1, 2, 3, 4] >>> [pair for pair in t.iteritems()] # new in ZODB 3.3 [(1, 'red'), (2, 'green'), (3, 'blue'), (4, 'spades')] >>> t.has_key(4) # returns a true value True >>> t.has_key(5) False >>> 4 in t # new in ZODB 3.3 True >>> 5 in t # new in ZODB 3.3 False >>> Each of the modules also defines some functions that operate on BTrees -- :func:`~BTrees.Interfaces.IMerge.difference`, :func:`~BTrees.Interfaces.IMerge.union`, and :func:`~BTrees.Interfaces.IMerge.intersection`. The :func:`~BTrees.Interfaces.IMerge.difference` function returns a Bucket, while the other two methods return a Set. If the keys are integers, then the module also defines :func:`~BTrees.Interfaces.IMergeIntegerKey.multiunion`. If the values are integers or floats, then the module also defines :func:`~BTrees.Interfaces.IIMerge.weightedIntersection` and :func:`~BTrees.Interfaces.IIMerge.weightedUnion`. The function doc strings describe each function briefly. .. % XXX I'm not sure all of the following is actually correct. The .. % XXX set functions have complicated behavior. :mod:`~BTrees.Interfaces` defines the operations, and is the official documentation. Note that the interfaces don't define the concrete types returned by most operations, and you shouldn't rely on the concrete types that happen to be returned: stick to operations guaranteed by the interface. In particular, note that the interfaces don't specify anything about comparison behavior, and so nothing about it is guaranteed. In ZODB 3.3, for example, two BTrees happen to use Python's default object comparison, which amounts to comparing the (arbitrary but fixed) memory addresses of the BTrees. This may or may not be true in future releases. If the interfaces don't specify a behavior, then whether that behavior appears to work, and exactly happens if it does appear to work, are undefined and should not be relied on. .. _ordered: Total Ordering and Persistence ============================== The BTree-based data structures differ from Python dicts in several fundamental ways. One of the most important is that while dicts require that keys support hash codes and equality comparison, the BTree-based structures don't use hash codes and require a total ordering on keys. Total ordering means three things: #. Reflexive. For each *x*, ``x == x`` is true. #. Trichotomy. For each *x* and *y*, exactly one of ``x < y``, ``x == y``, and ``x > y`` is true. #. Transitivity. Whenever ``x <= y`` and ``y <= z``, it's also true that ``x <= z``. The default comparison functions for most objects that come with Python satisfy these rules, with some crucial cautions explained later. Complex numbers are an example of an object whose default comparison function does not satisfy these rules: complex numbers only support ``==`` and ``!=`` comparisons, and raise an exception if you try to compare them in any other way. They don't satisfy the trichotomy rule, and must not be used as keys in BTree-based data structures (although note that complex numbers can be used as keys in Python dicts, which do not require a total ordering). Examples of objects that are wholly safe to use as keys in BTree-based structures include ints, longs, floats, 8-bit strings, Unicode strings, and tuples composed (possibly recursively) of objects of wholly safe types. It's important to realize that even if two types satisfy the rules on their own, mixing objects of those types may not. For example, 8-bit strings and Unicode strings both supply total orderings, but mixing the two loses trichotomy; e.g., ``'x' < chr(255)`` and ``u'x' == 'x'``, but trying to compare ``chr(255)`` to ``u'x'`` raises an exception. Partly for this reason (another is given later), it can be dangerous to use keys with multiple types in a single BTree-based structure. Don't try to do that, and you don't have to worry about it. Another potential problem is mutability: when a key is inserted in a BTree- based structure, it must retain the same order relative to the other keys over time. This is easy to run afoul of if you use mutable objects as keys. For example, lists supply a total ordering, and then .. doctest:: >>> L1, L2, L3 = [1], [2], [3] >>> from BTrees.OOBTree import OOSet >>> s = OOSet((L2, L3, L1)) # this is fine, so far >>> list(s.keys()) # note that the lists are in sorted order [[1], [2], [3]] >>> s.has_key([3]) # and [3] is in the set True >>> L2[0] = 5 # horrible -- the set is insane now >>> s.has_key([3]) # for example, it's insane this way False >>> s.__class__ >>> list(s) [[1], [5], [3]] Key lookup relies on that the keys remain in sorted order (an efficient form of binary search is used). By mutating key L2 after inserting it, we destroyed the invariant that the OOSet is sorted. As a result, all future operations on this set are unpredictable. A subtler variant of this problem arises due to persistence: by default, Python does several kinds of comparison by comparing the memory addresses of two objects. Because Python never moves an object in memory, this does supply a usable (albeit arbitrary) total ordering across the life of a program run (an object's memory address doesn't change). But if objects compared in this way are used as keys of a BTree-based structure that's stored in a database, when the objects are loaded from the database again they will almost certainly wind up at different memory addresses. There's no guarantee then that if key K1 had a memory address smaller than the memory address of key K2 at the time K1 and K2 were inserted in a BTree, K1's address will also be smaller than K2's when that BTree is loaded from a database later. The result will be an insane BTree, where various operations do and don't work as expected, seemingly at random. Now each of the types identified above as "wholly safe to use" never compares two instances of that type by memory address, so there's nothing to worry about here if you use keys of those types. The most common mistake is to use keys that are instances of a user-defined class that doesn't supply its own :meth:`__cmp__` method. Python compares such instances by memory address. This is fine if such instances are used as keys in temporary BTree-based structures used only in a single program run. It can be disastrous if that BTree-based structure is stored to a database, though. .. doctest:: :options: +SKIP >>> class C: ... pass ... >>> a, b = C(), C() >>> print(a < b) # this may print 0 if you try it True >>> del a, b >>> a, b = C(), C() >>> print(a < b) # and this may print 0 or 1 False >>> That example illustrates that comparison of instances of classes that don't define :meth:`__cmp__` yields arbitrary results (but consistent results within a single program run). Another problem occurs with instances of classes that do define :meth:`__cmp__`, but define it incorrectly. It's possible but rare for a custom :meth:`__cmp__` implementation to violate one of the three required formal properties directly. It's more common for it to "fall back" to address-based comparison by mistake. For example, .. doctest:: >>> class Mine: ... def __cmp__(self, other): ... if other.__class__ is Mine: ... return cmp(self.data, other.data) ... else: ... return cmp(self.data, other) It's quite possible there that the :keyword:`else` clause allows a result to be computed based on memory address. The bug won't show up until a BTree-based structure uses objects of class :class:`Mine` as keys, and also objects of other types as keys, and the structure is loaded from a database, and a sequence of comparisons happens to execute the :keyword:`else` clause in a case where the relative order of object memory addresses happened to change. This is as difficult to track down as it sounds, so best to stay far away from the possibility. You'll stay out of trouble by follwing these rules, violating them only with great care: #. Use objects of simple immutable types as keys in BTree-based data structures. #. Within a single BTree-based data structure, use objects of a single type as keys. Don't use multiple key types in a single structure. #. If you want to use class instances as keys, and there's any possibility that the structure may be stored in a database, it's crucial that the class define a :meth:`__cmp__` method, and that the method is carefully implemented. Any part of a comparison implementation that relies (explicitly or implicitly) on an address-based comparison result will eventually cause serious failure. #. Do not use :class:`~persistent.Persistent` objects as keys, or objects of a subclass of :class:`~persistent.Persistent`. That last item may be surprising. It stems from details of how conflict resolution is implemented: the states passed to conflict resolution do not materialize persistent subobjects (if a persistent object P is a key in a BTree, then P is a subobject of the bucket containing P). Instead, if an object O references a persistent subobject P directly, and O is involved in a conflict, the states passed to conflict resolution contain an instance of an internal :class:`~persistent.PersistentReference` stub class everywhere O references P. Two :class:`~persistent.PersistentReference` instances compare equal if and only if they "represent" the same persistent object; when they're not equal, they compare by memory address, and, as explained before, memory-based comparison must never happen in a sane persistent BTree. Note that it doesn't help in this case if your :class:`~persistent.Persistent` subclass defines a sane :meth:`__cmp__` method: conflict resolution doesn't know about your class, and so also doesn't know about its :meth:`__cmp__` method. It only sees instances of the internal :class:`~persistent.PersistentReference` stub class. Iteration and Mutation ====================== As with a Python dictionary or list, you should not mutate a BTree-based data structure while iterating over it, except that it's fine to replace the value associated with an existing key while iterating. You won't create internal damage in the structure if you try to remove, or add new keys, while iterating, but the results are undefined and unpredictable. A weak attempt is made to raise :exc:`RuntimeError` if the size of a BTree-based structure changes while iterating, but it doesn't catch most such cases, and is also unreliable. Example .. doctest:: :options: +SKIP >>> from BTrees.IIBTree import IISet >>> s = IISet(range(10)) >>> list(s) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> for i in s: # the output is undefined ... print(i) ... s.remove(i) 0 2 4 6 8 Traceback (most recent call last): File "", line 1, in ? RuntimeError: the bucket being iterated changed size >>> list(s) # this output is also undefined [1, 3, 5, 7, 9] >>> Also as with Python dictionaries and lists, the safe and predictable way to mutate a BTree-based structure while iterating over it is to iterate over a copy of the keys. Example .. doctest:: >>> from BTrees.IIBTree import IISet >>> s = IISet(range(10)) >>> for i in list(s.keys()): # this is well defined ... print(i) ... s.remove(i) 0 1 2 3 4 5 6 7 8 9 >>> list(s) [] >>> BTree node sizes ================ BTrees (and TreeSets) are made up of a tree of Buckets (and Sets) and internal nodes. There are maximum sizes of these notes configured for the various key and value types (unsigned and quad unsigned follow integer and long, respectively): ======== ========== ========================== ============================= Key Type Value Type Maximum Bucket or Set Size Maximum BTree or TreeSet Size ======== ========== ========================== ============================= Integer Float 120 500 Integer Integer 120 500 Integer Object 60 500 Long Float 120 500 Long Long 120 500 Long Object 60 500 Object Integer 60 250 Object Long 60 250 Object Object 30 250 ======== ========== ========================== ============================= For your application, especially when using object keys or values, you may want to override the default sizes. You can do this by subclassing any of the BTree (or TreeSet) classes and specifying new values for ``max_leaf_size`` or ``max_internal_size`` in your subclass:: >>> import BTrees.OOBTree >>> class MyBTree(BTrees.OOBTree.BTree): ... max_leaf_size = 500 ... max_internal_size = 1000 As of version 4.9, you can also set these values directly on an existing BTree class if you wish to tune them across your entire application. ``max_leaf_size`` is used for leaf nodes in a BTree, either Buckets or Sets. ``max_internal_size`` is used for internal nodes, either BTrees or TreeSets. BTree Diagnostic Tools ====================== A BTree (or TreeSet) is a complex data structure, really a graph of variable- size nodes, connected in multiple ways via three distinct kinds of C pointers. There are some tools available to help check internal consistency of a BTree as a whole. Most generally useful is the :mod:`~BTrees.check` module. The :func:`~BTrees.check.check` function examines a BTree (or Bucket, Set, or TreeSet) for value-based consistency, such as that the keys are in strictly increasing order. See the function docstring for details. The :func:`~BTrees.check.display` function displays the internal structure of a BTree. BTrees and TreeSets also have a :meth:`_check` method. This verifies that the (possibly many) internal pointers in a BTree or TreeSet are mutually consistent, and raises :exc:`AssertionError` if they're not. If a :func:`~BTrees.check.check` or :meth:`_check` call fails, it may point to a bug in the implementation of BTrees or conflict resolution, or may point to database corruption. Repairing a damaged BTree is usually best done by making a copy of it. For example, if *self.data* is bound to a corrupted IOBTree, .. doctest:: :options: +SKIP >>> self.data = IOBTree(self.data) usually suffices. If object identity needs to be preserved, .. doctest:: :options: +SKIP >>> acopy = IOBTree(self.data) >>> self.data.clear() >>> self.data.update(acopy) does the same, but leaves *self.data* bound to the same object. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696497143.0 BTrees-6.0/docs/requirements.txt0000644000076500000240000000011014507476767015660 0ustar00jensstaffSphinx repoze.sphinx.autointerface docutils < 0.19 sphinx_rtd_theme > 1 ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1717060227.476507 BTrees-6.0/include/0000755000076500000240000000000014626041203013046 5ustar00jensstaff././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4765491 BTrees-6.0/include/persistent/0000755000076500000240000000000014626041203015246 5ustar00jensstaff././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4842432 BTrees-6.0/include/persistent/persistent/0000755000076500000240000000000014626041203017446 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696497143.0 BTrees-6.0/include/persistent/persistent/_compat.h0000644000076500000240000000267014507476767021277 0ustar00jensstaff/***************************************************************************** Copyright (c) 2012 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #ifndef PERSISTENT__COMPAT_H #define PERSISTENT__COMPAT_H #include "Python.h" #define INTERN PyUnicode_InternFromString #define INTERN_INPLACE PyUnicode_InternInPlace #define NATIVE_CHECK_EXACT PyUnicode_CheckExact #define NATIVE_FROM_STRING_AND_SIZE PyUnicode_FromStringAndSize #define Py_TPFLAGS_HAVE_RICHCOMPARE 0 #define INT_FROM_LONG(x) PyLong_FromLong(x) #define INT_CHECK(x) PyLong_Check(x) #define INT_AS_LONG(x) PyLong_AsLong(x) #define CAPI_CAPSULE_NAME "persistent.cPersistence.CAPI" #else #define INTERN PyString_InternFromString #define INTERN_INPLACE PyString_InternInPlace #define NATIVE_CHECK_EXACT PyString_CheckExact #define NATIVE_FROM_STRING_AND_SIZE PyString_FromStringAndSize #define INT_FROM_LONG(x) PyInt_FromLong(x) #define INT_CHECK(x) PyInt_Check(x) #define INT_AS_LONG(x) PyInt_AS_LONG(x) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696497143.0 BTrees-6.0/include/persistent/persistent/cPersistence.h0000644000076500000240000001176114507476767022305 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #ifndef CPERSISTENCE_H #define CPERSISTENCE_H #include "_compat.h" #include "bytesobject.h" #include "ring.h" #define CACHE_HEAD \ PyObject_HEAD \ CPersistentRing ring_home; \ int non_ghost_count; \ Py_ssize_t total_estimated_size; struct ccobject_head_struct; typedef struct ccobject_head_struct PerCache; /* How big is a persistent object? 12 PyGC_Head is two pointers and an int 8 PyObject_HEAD is an int and a pointer 12 jar, oid, cache pointers 8 ring struct 8 serialno 4 state + extra 4 size info (56) so far 4 dict ptr 4 weaklist ptr ------------------------- 68 only need 62, but obmalloc rounds up to multiple of eight Even a ghost requires 64 bytes. It's possible to make a persistent instance with slots and no dict, which changes the storage needed. */ #define cPersistent_HEAD \ PyObject_HEAD \ PyObject *jar; \ PyObject *oid; \ PerCache *cache; \ CPersistentRing ring; \ char serial[8]; \ signed state:8; \ unsigned estimated_size:24; /* We recently added estimated_size. We originally added it as a new unsigned long field after a signed char state field and a 3-character reserved field. This didn't work because there are packages in the wild that have their own copies of cPersistence.h that didn't see the update. To get around this, we used the reserved space by making estimated_size a 24-bit bit field in the space occupied by the old 3-character reserved field. To fit in 24 bits, we made the units of estimated_size 64-character blocks. This allows is to handle up to a GB. We should never see that, but to be paranoid, we also truncate sizes greater than 1GB. We also set the minimum size to 64 bytes. We use the _estimated_size_in_24_bits and _estimated_size_in_bytes macros both to avoid repetition and to make intent a little clearer. */ #define _estimated_size_in_24_bits(I) ((I) > 1073741696 ? 16777215 : (I)/64+1) #define _estimated_size_in_bytes(I) ((I)*64) #define cPersistent_GHOST_STATE -1 #define cPersistent_UPTODATE_STATE 0 #define cPersistent_CHANGED_STATE 1 #define cPersistent_STICKY_STATE 2 typedef struct { cPersistent_HEAD } cPersistentObject; typedef void (*percachedelfunc)(PerCache *, PyObject *); typedef struct { PyTypeObject *pertype; getattrofunc getattro; setattrofunc setattro; int (*changed)(cPersistentObject*); void (*accessed)(cPersistentObject*); void (*ghostify)(cPersistentObject*); int (*setstate)(PyObject*); percachedelfunc percachedel; int (*readCurrent)(cPersistentObject*); } cPersistenceCAPIstruct; #define cPersistenceType cPersistenceCAPI->pertype #ifndef DONT_USE_CPERSISTENCECAPI static cPersistenceCAPIstruct *cPersistenceCAPI; #endif #define cPersistanceModuleName "cPersistence" #define PER_TypeCheck(O) PyObject_TypeCheck((O), cPersistenceCAPI->pertype) #define PER_USE_OR_RETURN(O,R) {if((O)->state==cPersistent_GHOST_STATE && cPersistenceCAPI->setstate((PyObject*)(O)) < 0) return (R); else if ((O)->state==cPersistent_UPTODATE_STATE) (O)->state=cPersistent_STICKY_STATE;} #define PER_CHANGED(O) (cPersistenceCAPI->changed((cPersistentObject*)(O))) #define PER_READCURRENT(O, E) \ if (cPersistenceCAPI->readCurrent((cPersistentObject*)(O)) < 0) { E; } #define PER_GHOSTIFY(O) (cPersistenceCAPI->ghostify((cPersistentObject*)(O))) /* If the object is sticky, make it non-sticky, so that it can be ghostified. The value is not meaningful */ #define PER_ALLOW_DEACTIVATION(O) ((O)->state==cPersistent_STICKY_STATE && ((O)->state=cPersistent_UPTODATE_STATE)) #define PER_PREVENT_DEACTIVATION(O) ((O)->state==cPersistent_UPTODATE_STATE && ((O)->state=cPersistent_STICKY_STATE)) /* Make a persistent object usable from C by: - Making sure it is not a ghost - Making it sticky. IMPORTANT: If you call this and don't call PER_ALLOW_DEACTIVATION, your object will not be ghostified. PER_USE returns a 1 on success and 0 failure, where failure means error. */ #define PER_USE(O) \ (((O)->state != cPersistent_GHOST_STATE \ || (cPersistenceCAPI->setstate((PyObject*)(O)) >= 0)) \ ? (((O)->state==cPersistent_UPTODATE_STATE) \ ? ((O)->state=cPersistent_STICKY_STATE) : 1) : 0) #define PER_ACCESSED(O) (cPersistenceCAPI->accessed((cPersistentObject*)(O))) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696497143.0 BTrees-6.0/include/persistent/persistent/ring.h0000644000076500000240000000512014507476767020605 0ustar00jensstaff/***************************************************************************** Copyright (c) 2003 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ /* Support routines for the doubly-linked list of cached objects. The cache stores a headed, doubly-linked, circular list of persistent objects, with space for the pointers allocated in the objects themselves. The cache stores the distinguished head of the list, which is not a valid persistent object. The other list members are non-ghost persistent objects, linked in LRU (least-recently used) order. The r_next pointers traverse the ring starting with the least recently used object. The r_prev pointers traverse the ring starting with the most recently used object. Obscure: While each object is pointed at twice by list pointers (once by its predecessor's r_next, again by its successor's r_prev), the refcount on the object is bumped only by 1. This leads to some possibly surprising sequences of incref and decref code. Note that since the refcount is bumped at least once, the list does hold a strong reference to each object in it. */ typedef struct CPersistentRing_struct { struct CPersistentRing_struct *r_prev; struct CPersistentRing_struct *r_next; } CPersistentRing; /* The list operations here take constant time independent of the * number of objects in the list: */ /* Add elt as the most recently used object. elt must not already be * in the list, although this isn't checked. */ void ring_add(CPersistentRing *ring, CPersistentRing *elt); /* Remove elt from the list. elt must already be in the list, although * this isn't checked. */ void ring_del(CPersistentRing *elt); /* elt must already be in the list, although this isn't checked. It's * unlinked from its current position, and relinked into the list as the * most recently used object (which is arguably the tail of the list * instead of the head -- but the name of this function could be argued * either way). This is equivalent to * * ring_del(elt); * ring_add(ring, elt); * * but may be a little quicker. */ void ring_move_to_head(CPersistentRing *ring, CPersistentRing *elt); ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/pyproject.toml0000644000076500000240000000013014330745562014344 0ustar00jensstaff[build-system] requires = ["setuptools", "wheel", "persistent"] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4974287 BTrees-6.0/setup.cfg0000644000076500000240000000110514626041203013241 0ustar00jensstaff[zest.releaser] create-wheel = no [flake8] doctests = 1 per-file-ignores = src/BTrees/check.py: F401 [check-manifest] ignore = .editorconfig .meta.toml docs/_build/html/_sources/* docs/_build/doctest/* docs/_build/html/_static/* docs/_build/html/_static/*/* [isort] force_single_line = True combine_as_imports = True sections = FUTURE,STDLIB,THIRDPARTY,ZOPE,FIRSTPARTY,LOCALFOLDER known_third_party = docutils, pkg_resources, pytz known_zope = known_first_party = default_section = ZOPE line_length = 79 lines_after_imports = 2 [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052530.0 BTrees-6.0/setup.py0000644000076500000240000001507214626022162013145 0ustar00jensstaff############################################################################## # # Copyright (c) 2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os import sys from distutils.errors import CCompilerError from distutils.errors import DistutilsExecError from distutils.errors import DistutilsPlatformError from setuptools import Extension from setuptools import find_packages from setuptools import setup from setuptools.command.build_ext import build_ext version = '6.0' def _read(fname): here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, fname)) as f: return f.read() README = _read("README.rst") + '\n\n' + _read('CHANGES.rst') class optional_build_ext(build_ext): """This class subclasses build_ext and allows the building of C extensions to fail. """ def run(self): try: build_ext.run(self) except DistutilsPlatformError as e: self._unavailable(e) def build_extension(self, ext): try: build_ext.build_extension(self, ext) except (CCompilerError, DistutilsExecError, OSError) as e: self._unavailable(e) def _unavailable(self, e): print('*' * 80) print("""WARNING: An optional code optimization (C extension) could not be compiled. Optimizations for this package will not be available!""") print() print(e) print('*' * 80) if 'bdist_wheel' in sys.argv and not os.environ.get("PURE_PYTHON"): # pip uses bdist_wheel by default, and hides the error output. # Let this error percolate up so the user can see it. # pip will then go ahead and run 'setup.py install' directly. raise # Set up dependencies for the BTrees package base_btrees_depends = [ "src/BTrees/BTreeItemsTemplate.c", "src/BTrees/BTreeModuleTemplate.c", "src/BTrees/BTreeTemplate.c", "src/BTrees/BucketTemplate.c", "src/BTrees/MergeTemplate.c", "src/BTrees/SetOpTemplate.c", "src/BTrees/SetTemplate.c", "src/BTrees/TreeSetTemplate.c", "src/BTrees/sorters.c", ] FLAVORS = { "O": "object", "F": "float", "I": "int", # Signed 32-bit "L": "int", # Signed 64-bit "U": "int", # Unsigned 32-bit "Q": "int" # Unsigned 64-bit (from the printf "q" modifier for quad_t) } # XXX should 'fs' be in ZODB instead? FAMILIES = ( # Signed 32-bit keys "IO", # object value "II", # self value "IF", # float value "IU", # opposite sign value # Unsigned 32-bit keys "UO", # object value "UU", # self value "UF", # float value "UI", # opposite sign value # Signed 64-bit keys "LO", # object value "LL", # self value "LF", # float value "LQ", # opposite sign value # Unsigned 64-bit keys "QO", # object value "QQ", # self value "QF", # float value "QL", # opposite sign value # Object keys "OO", # object "OI", # 32-bit signed "OU", # 32-bit unsigned "OL", # 64-bit signed "OQ", # 64-bit unsigned "fs", ) KEY_H = "src/BTrees/%skeymacros.h" VALUE_H = "src/BTrees/%svaluemacros.h" def BTreeExtension(family): key = family[0] value = family[1] name = "BTrees._%sBTree" % family sources = ["src/BTrees/_%sBTree.c" % family] kwargs = {"include_dirs": [os.path.join('include', 'persistent')]} if family != "fs": kwargs["depends"] = (base_btrees_depends + [KEY_H % FLAVORS[key], VALUE_H % FLAVORS[value]]) else: kwargs["depends"] = base_btrees_depends if key != "O": kwargs["define_macros"] = [('EXCLUDE_INTSET_SUPPORT', None)] return Extension(name, sources, **kwargs) ext_modules = [BTreeExtension(family) for family in FAMILIES] REQUIRES = [ # 4.1.0 is the first version that PURE_PYTHON can run # ZODB tests 'persistent >= 4.1.0', # 5.0.0 added zope.interface.common.collections 'zope.interface >= 5.0.0', ] TESTS_REQUIRE = [ # Our tests check for the new repr strings # generated in persistent 4.4. 'persistent >= 4.4.3', 'transaction', 'zope.testrunner', ] setup( name='BTrees', version=version, description='Scalable persistent object containers', long_description=README, classifiers=[ "Development Status :: 6 - Mature", "License :: OSI Approved :: Zope Public License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Framework :: ZODB", "Topic :: Database", "Topic :: Software Development :: Libraries :: Python Modules", "Operating System :: Microsoft :: Windows", "Operating System :: Unix", ], author="Zope Foundation", author_email="zodb-dev@zope.org", url="https://github.com/zopefoundation/BTrees", project_urls={ 'Documentation': 'https://btrees.readthedocs.io', 'Issue Tracker': 'https://github.com/zopefoundation/BTrees/issues', 'Sources': 'https://github.com/zopefoundation/BTrees', }, license="ZPL 2.1", platforms=["any"], packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, ext_modules=ext_modules, extras_require={ 'test': TESTS_REQUIRE, 'ZODB': [ 'ZODB', ], 'docs': [ 'Sphinx', 'repoze.sphinx.autointerface', 'sphinx_rtd_theme', ], }, test_suite="BTrees.tests", tests_require=TESTS_REQUIRE, python_requires='>=3.8', install_requires=REQUIRES, cmdclass={ 'build_ext': optional_build_ext, }, entry_points="""\ """ ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4766927 BTrees-6.0/src/0000755000076500000240000000000014626041203012212 5ustar00jensstaff././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4940567 BTrees-6.0/src/BTrees/0000755000076500000240000000000014626041203013376 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/BTreeItemsTemplate.c0000644000076500000240000005773714355020716017271 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #define BTREEITEMSTEMPLATE_C "$Id$\n" /* A BTreeItems struct is returned from calling .items(), .keys() or * .values() on a BTree-based data structure, and is also the result of * taking slices of those. It represents a contiguous slice of a BTree. * * The start of the slice is in firstbucket, at offset first. The end of * the slice is in lastbucket, at offset last. Both endpoints are inclusive. * It must possible to get from firstbucket to lastbucket via following * bucket 'next' pointers zero or more times. firstbucket, first, lastbucket, * and last are readonly after initialization. An empty slice is represented * by firstbucket == lastbucket == currentbucket == NULL. * * 'kind' determines whether this slice represents 'k'eys alone, 'v'alues * alone, or 'i'items (key+value pairs). 'kind' is also readonly after * initialization. * * The combination of currentbucket, currentoffset and pseudoindex acts as * a search finger. Offset currentoffset in bucket currentbucket is at index * pseudoindex, where pseudoindex==0 corresponds to offset first in bucket * firstbucket, and pseudoindex==-1 corresponds to offset last in bucket * lastbucket. The function BTreeItems_seek() can be used to set this combo * correctly for any in-bounds index, and uses this combo on input to avoid * needing to search from the start (or end) on each call. Calling * BTreeItems_seek() with consecutive larger positions is very efficent. * Calling it with consecutive smaller positions is more efficient than if * a search finger weren't being used at all, but is still quadratic time * in the number of buckets in the slice. */ typedef struct { PyObject_HEAD Bucket *firstbucket; /* First bucket */ Bucket *currentbucket; /* Current bucket (search finger) */ Bucket *lastbucket; /* Last bucket */ int currentoffset; /* Offset in currentbucket */ int pseudoindex; /* search finger index */ int first; /* Start offset in firstbucket */ int last; /* End offset in lastbucket */ char kind; /* 'k', 'v', 'i' */ } BTreeItems; #define ITEMS(O)((BTreeItems*)(O)) static PyObject * newBTreeItems(char kind, Bucket *lowbucket, int lowoffset, Bucket *highbucket, int highoffset); static void BTreeItems_dealloc(BTreeItems *self) { Py_XDECREF(self->firstbucket); Py_XDECREF(self->lastbucket); Py_XDECREF(self->currentbucket); PyObject_DEL(self); } static Py_ssize_t BTreeItems_length_or_nonzero(BTreeItems *self, int nonzero) { Py_ssize_t r; Bucket *b, *next; b = self->firstbucket; if (b == NULL) return 0; r = self->last + 1 - self->first; if (nonzero && r > 0) /* Short-circuit if all we care about is nonempty */ return 1; if (b == self->lastbucket) return r; Py_INCREF(b); PER_USE_OR_RETURN(b, -1); while ((next = b->next)) { r += b->len; if (nonzero && r > 0) /* Short-circuit if all we care about is nonempty */ break; if (next == self->lastbucket) break; /* we already counted the last bucket */ Py_INCREF(next); PER_UNUSE(b); Py_DECREF(b); b = next; PER_USE_OR_RETURN(b, -1); } PER_UNUSE(b); Py_DECREF(b); return r >= 0 ? r : 0; } static Py_ssize_t BTreeItems_length(BTreeItems *self) { return BTreeItems_length_or_nonzero(self, 0); } /* ** BTreeItems_seek ** ** Find the ith position in the BTreeItems. ** ** Arguments: self The BTree ** i the index to seek to, in 0 .. len(self)-1, or in ** -len(self) .. -1, as for indexing a Python sequence. ** ** ** Returns 0 if successful, -1 on failure to seek (like out-of-bounds). ** Upon successful return, index i is at offset self->currentoffset in bucket ** self->currentbucket. */ static int BTreeItems_seek(BTreeItems *self, Py_ssize_t i) { int delta, pseudoindex, currentoffset; Bucket *b, *currentbucket; int error; pseudoindex = self->pseudoindex; currentoffset = self->currentoffset; currentbucket = self->currentbucket; if (currentbucket == NULL) goto no_match; delta = i - pseudoindex; while (delta > 0) /* move right */ { int max; /* Want to move right delta positions; the most we can move right in * this bucket is currentbucket->len - currentoffset - 1 positions. */ PER_USE_OR_RETURN(currentbucket, -1); max = currentbucket->len - currentoffset - 1; b = currentbucket->next; PER_UNUSE(currentbucket); if (delta <= max) { currentoffset += delta; pseudoindex += delta; if (currentbucket == self->lastbucket && currentoffset > self->last) goto no_match; break; } /* Move to start of next bucket. */ if (currentbucket == self->lastbucket || b == NULL) goto no_match; currentbucket = b; pseudoindex += max + 1; delta -= max + 1; currentoffset = 0; } while (delta < 0) /* move left */ { int status; /* Want to move left -delta positions; the most we can move left in * this bucket is currentoffset positions. */ if ((-delta) <= currentoffset) { currentoffset += delta; pseudoindex += delta; if (currentbucket == self->firstbucket && currentoffset < self->first) goto no_match; break; } /* Move to end of previous bucket. */ if (currentbucket == self->firstbucket) goto no_match; status = PreviousBucket(¤tbucket, self->firstbucket); if (status == 0) goto no_match; else if (status < 0) return -1; pseudoindex -= currentoffset + 1; delta += currentoffset + 1; PER_USE_OR_RETURN(currentbucket, -1); currentoffset = currentbucket->len - 1; PER_UNUSE(currentbucket); } assert(pseudoindex == i); /* Alas, the user may have mutated the bucket since the last time we * were called, and if they deleted stuff, we may be pointing into * trash memory now. */ PER_USE_OR_RETURN(currentbucket, -1); error = currentoffset < 0 || currentoffset >= currentbucket->len; PER_UNUSE(currentbucket); if (error) { PyErr_SetString(PyExc_RuntimeError, "the bucket being iterated changed size"); return -1; } Py_INCREF(currentbucket); Py_DECREF(self->currentbucket); self->currentbucket = currentbucket; self->currentoffset = currentoffset; self->pseudoindex = pseudoindex; return 0; no_match: IndexError(i); return -1; } /* Return the right kind ('k','v','i') of entry from bucket b at offset i. * b must be activated. Returns NULL on error. */ static PyObject * getBucketEntry(Bucket *b, int i, char kind) { PyObject *result = NULL; assert(b); assert(0 <= i && i < b->len); switch (kind) { case 'k': COPY_KEY_TO_OBJECT(result, b->keys[i]); break; case 'v': COPY_VALUE_TO_OBJECT(result, b->values[i]); break; case 'i': { PyObject *key; PyObject *value;; COPY_KEY_TO_OBJECT(key, b->keys[i]); if (!key) break; COPY_VALUE_TO_OBJECT(value, b->values[i]); if (!value) { Py_DECREF(key); break; } result = PyTuple_New(2); if (result) { PyTuple_SET_ITEM(result, 0, key); PyTuple_SET_ITEM(result, 1, value); } else { Py_DECREF(key); Py_DECREF(value); } break; } default: PyErr_SetString(PyExc_AssertionError, "getBucketEntry: unknown kind"); break; } return result; } /* ** BTreeItems_item ** ** Arguments: self a BTreeItems structure ** i Which item to inspect ** ** Returns: the BTreeItems_item_BTree of self->kind, i ** (ie pulls the ith item out) */ static PyObject * BTreeItems_item(BTreeItems *self, Py_ssize_t i) { PyObject *result; if (BTreeItems_seek(self, i) < 0) return NULL; PER_USE_OR_RETURN(self->currentbucket, NULL); result = getBucketEntry(self->currentbucket, self->currentoffset, self->kind); PER_UNUSE(self->currentbucket); return result; } /* ** BTreeItems_slice ** ** Creates a new BTreeItems structure representing the slice ** between the low and high range ** ** Arguments: self The old BTreeItems structure ** ilow The start index ** ihigh The end index ** ** Returns: BTreeItems item */ static PyObject * BTreeItems_slice(BTreeItems *self, Py_ssize_t ilow, Py_ssize_t ihigh) { Bucket *lowbucket; Bucket *highbucket; int lowoffset; int highoffset; Py_ssize_t length = -1; /* len(self), but computed only if needed */ /* Complications: * A Python slice never raises IndexError, but BTreeItems_seek does. * Python did only part of index normalization before calling this: * ilow may be < 0 now, and ihigh may be arbitrarily large. It's * our responsibility to clip them. * A Python slice is exclusive of the high index, but a BTreeItems * struct is inclusive on both ends. */ /* First adjust ilow and ihigh to be legit endpoints in the Python * sense (ilow inclusive, ihigh exclusive). This block duplicates the * logic from Python's list_slice function (slicing for builtin lists). */ if (ilow < 0) ilow = 0; else { if (length < 0) length = BTreeItems_length(self); if (ilow > length) ilow = length; } if (ihigh < ilow) ihigh = ilow; else { if (length < 0) length = BTreeItems_length(self); if (ihigh > length) ihigh = length; } assert(0 <= ilow && ilow <= ihigh); assert(length < 0 || ihigh <= length); /* Now adjust for that our struct is inclusive on both ends. This is * easy *except* when the slice is empty: there's no good way to spell * that in an inclusive-on-both-ends scheme. For example, if the * slice is btree.items([:0]), ilow == ihigh == 0 at this point, and if * we were to subtract 1 from ihigh that would get interpreted by * BTreeItems_seek as meaning the *entire* set of items. Setting ilow==1 * and ihigh==0 doesn't work either, as BTreeItems_seek raises IndexError * if we attempt to seek to ilow==1 when the underlying sequence is empty. * It seems simplest to deal with empty slices as a special case here. */ if (ilow == ihigh) /* empty slice */ { lowbucket = highbucket = NULL; lowoffset = 1; highoffset = 0; } else { assert(ilow < ihigh); --ihigh; /* exclusive -> inclusive */ if (BTreeItems_seek(self, ilow) < 0) return NULL; lowbucket = self->currentbucket; lowoffset = self->currentoffset; if (BTreeItems_seek(self, ihigh) < 0) return NULL; highbucket = self->currentbucket; highoffset = self->currentoffset; } return newBTreeItems(self->kind, lowbucket, lowoffset, highbucket, highoffset); } static PyObject * BTreeItems_subscript(BTreeItems *self, PyObject* subscript) { Py_ssize_t len = BTreeItems_length_or_nonzero(self, 0); if (PyIndex_Check(subscript)) { Py_ssize_t i = PyNumber_AsSsize_t(subscript, PyExc_IndexError); if (i == -1 && PyErr_Occurred()) return NULL; if (i < 0) i += len; return BTreeItems_item(self, i); } if (PySlice_Check(subscript)) { Py_ssize_t start, stop, step, slicelength; #define SLICEOBJ(x) (x) if (PySlice_GetIndicesEx(SLICEOBJ(subscript), len, &start, &stop, &step, &slicelength) < 0) { return NULL; } if (step != 1) { PyErr_SetString(PyExc_RuntimeError, "slices must have step size of 1"); return NULL; } return BTreeItems_slice(self, start, stop); } PyErr_SetString(PyExc_RuntimeError, "Unknown index type: must be int or slice"); return NULL; } /* Py3K doesn't honor sequence slicing, so implement via mapping */ static PyMappingMethods BTreeItems_as_mapping = { (lenfunc)BTreeItems_length, /* mp_length */ (binaryfunc)BTreeItems_subscript, /* mp_subscript */ }; static PySequenceMethods BTreeItems_as_sequence = { (lenfunc) BTreeItems_length, /* sq_length */ (binaryfunc)0, /* sq_concat */ (ssizeargfunc)0, /* sq_repeat */ (ssizeargfunc) BTreeItems_item, /* sq_item */ }; /* Number Method items (just for nb_nonzero!) */ static int BTreeItems_nonzero(BTreeItems *self) { return BTreeItems_length_or_nonzero(self, 1); } static PyNumberMethods BTreeItems_as_number_for_nonzero = { 0, /* nb_add */ 0, /* nb_subtract */ 0, /* nb_multiply */ 0, /* nb_remainder */ 0, /* nb_divmod */ 0, /* nb_power */ 0, /* nb_negative */ 0, /* nb_positive */ 0, /* nb_absolute */ (inquiry)BTreeItems_nonzero /* nb_nonzero */ }; static PyTypeObject BTreeItemsType = { PyVarObject_HEAD_INIT(NULL, 0) MOD_NAME_PREFIX "BTreeItems", /* tp_name */ sizeof(BTreeItems), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) BTreeItems_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* obsolete tp_getattr */ 0, /* obsolete tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ &BTreeItems_as_number_for_nonzero, /* tp_as_number */ &BTreeItems_as_sequence, /* tp_as_sequence */ &BTreeItems_as_mapping, /* tp_as_mapping */ (hashfunc)0, /* tp_hash */ (ternaryfunc)0, /* tp_call */ (reprfunc)0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ /* Space for future expansion */ 0L,0L, "Sequence type used to iterate over BTree items." /* Documentation string */ }; /* Returns a new BTreeItems object representing the contiguous slice from * offset lowoffset in bucket lowbucket through offset highoffset in bucket * highbucket, inclusive. Pass lowbucket == NULL for an empty slice. * The currentbucket is set to lowbucket, currentoffset ot lowoffset, and * pseudoindex to 0. kind is 'k', 'v' or 'i' (see BTreeItems struct docs). */ static PyObject * newBTreeItems(char kind, Bucket *lowbucket, int lowoffset, Bucket *highbucket, int highoffset) { BTreeItems *self; UNLESS (self = PyObject_NEW(BTreeItems, &BTreeItemsType)) return NULL; self->kind=kind; self->first=lowoffset; self->last=highoffset; if (! lowbucket || ! highbucket || (lowbucket == highbucket && lowoffset > highoffset)) { self->firstbucket = 0; self->lastbucket = 0; self->currentbucket = 0; } else { Py_INCREF(lowbucket); self->firstbucket = lowbucket; Py_INCREF(highbucket); self->lastbucket = highbucket; Py_INCREF(lowbucket); self->currentbucket = lowbucket; } self->currentoffset = lowoffset; self->pseudoindex = 0; return OBJECT(self); } static int nextBTreeItems(SetIteration *i) { if (i->position >= 0) { if (i->position) { DECREF_KEY(i->key); DECREF_VALUE(i->value); } if (BTreeItems_seek(ITEMS(i->set), i->position) >= 0) { Bucket *currentbucket; currentbucket = BUCKET(ITEMS(i->set)->currentbucket); UNLESS(PER_USE(currentbucket)) { /* Mark iteration terminated, so that finiSetIteration doesn't * try to redundantly decref the key and value */ i->position = -1; return -1; } COPY_KEY(i->key, currentbucket->keys[ITEMS(i->set)->currentoffset]); INCREF_KEY(i->key); COPY_VALUE(i->value, currentbucket->values[ITEMS(i->set)->currentoffset]); INCREF_VALUE(i->value); i->position ++; PER_UNUSE(currentbucket); } else { i->position = -1; PyErr_Clear(); } } return 0; } static int nextTreeSetItems(SetIteration *i) { if (i->position >= 0) { if (i->position) { DECREF_KEY(i->key); } if (BTreeItems_seek(ITEMS(i->set), i->position) >= 0) { Bucket *currentbucket; currentbucket = BUCKET(ITEMS(i->set)->currentbucket); UNLESS(PER_USE(currentbucket)) { /* Mark iteration terminated, so that finiSetIteration doesn't * try to redundantly decref the key and value */ i->position = -1; return -1; } COPY_KEY(i->key, currentbucket->keys[ITEMS(i->set)->currentoffset]); INCREF_KEY(i->key); i->position ++; PER_UNUSE(currentbucket); } else { i->position = -1; PyErr_Clear(); } } return 0; } /* Support for the iteration protocol */ static PyTypeObject BTreeIter_Type; /* The type of iterator objects, returned by e.g. iter(IIBTree()). */ typedef struct { PyObject_HEAD /* We use a BTreeItems object because it's convenient and flexible. * We abuse it two ways: * 1. We set currentbucket to NULL when the iteration is finished. * 2. We don't bother keeping pseudoindex in synch. */ BTreeItems *pitems; } BTreeIter; /* Return a new iterator object, to traverse the keys and/or values * represented by pitems. pitems must not be NULL. Returns NULL if error. */ static BTreeIter * BTreeIter_new(BTreeItems *pitems) { BTreeIter *result; assert(pitems != NULL); result = PyObject_New(BTreeIter, &BTreeIter_Type); if (result) { Py_INCREF(pitems); result->pitems = pitems; } return result; } /* The iterator's tp_dealloc slot. */ static void BTreeIter_dealloc(BTreeIter *bi) { Py_DECREF(bi->pitems); PyObject_Del(bi); } /* The implementation of the iterator's tp_iternext slot. Returns "the next" * item; returns NULL if error; returns NULL without setting an error if the * iteration is exhausted (that's the way to terminate the iteration protocol). */ static PyObject * BTreeIter_next(BTreeIter *bi, PyObject *args) { PyObject *result = NULL; /* until proven innocent */ BTreeItems *items = bi->pitems; int i = items->currentoffset; Bucket *bucket = items->currentbucket; if (bucket == NULL) /* iteration termination is sticky */ return NULL; PER_USE_OR_RETURN(bucket, NULL); if (i >= bucket->len) { /* We never leave this routine normally with i >= len: somebody * else mutated the current bucket. */ PyErr_SetString(PyExc_RuntimeError, "the bucket being iterated changed size"); /* Arrange for that this error is sticky too. */ items->currentoffset = INT_MAX; goto Done; } /* Build the result object, from bucket at offset i. */ result = getBucketEntry(bucket, i, items->kind); /* Advance position for next call. */ if (bucket == items->lastbucket && i >= items->last) { /* Next call should terminate the iteration. */ Py_DECREF(items->currentbucket); items->currentbucket = NULL; } else { ++i; if (i >= bucket->len) { Py_XINCREF(bucket->next); items->currentbucket = bucket->next; Py_DECREF(bucket); i = 0; } items->currentoffset = i; } Done: PER_UNUSE(bucket); return result; } static PyObject * BTreeIter_getiter(PyObject *it) { Py_INCREF(it); return it; } static PyTypeObject BTreeIter_Type = { PyVarObject_HEAD_INIT(NULL, 0) MODULE_NAME MOD_NAME_PREFIX "TreeIterator", /* tp_name */ sizeof(BTreeIter), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor)BTreeIter_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /*PyObject_GenericGetAttr,*/ /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)BTreeIter_getiter, /* tp_iter */ (iternextfunc)BTreeIter_next, /* tp_iternext */ 0, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/BTreeModuleTemplate.c0000644000076500000240000005677014355020716017431 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #include "Python.h" /* include structmember.h for offsetof */ #include "structmember.h" #include "bytesobject.h" #ifdef PERSISTENT #include "persistent/cPersistence.h" #else #define PER_USE_OR_RETURN(self, NULL) #define PER_ALLOW_DEACTIVATION(self) #define PER_PREVENT_DEACTIVATION(self) #define PER_DEL(self) #define PER_USE(O) 1 #define PER_ACCESSED(O) 1 #endif #include "_compat.h" /* So sue me. This pair gets used all over the place, so much so that it * interferes with understanding non-persistence parts of algorithms. * PER_UNUSE can be used after a successul PER_USE or PER_USE_OR_RETURN. * It allows the object to become ghostified, and tells the persistence * machinery that the object's fields were used recently. */ #define PER_UNUSE(OBJ) do { \ PER_ALLOW_DEACTIVATION(OBJ); \ PER_ACCESSED(OBJ); \ } while (0) /* The tp_name slots of the various BTree types contain the fully * qualified names of the types, e.g. zodb.btrees.OOBTree.OOBTree. * The full name is usd to support pickling and because it is not * possible to modify the __module__ slot of a type dynamically. (This * may be a bug in Python 2.2). * * The MODULE_NAME here used to be "BTrees._". We actually want the module * name to point to the Python module rather than the C, so the underline * is now removed. */ #define MODULE_NAME "BTrees." MOD_NAME_PREFIX "BTree." static PyObject *sort_str, *reverse_str, *__setstate___str; static PyObject *_bucket_type_str, *max_internal_size_str, *max_leaf_size_str; static PyObject *__slotnames__str; static PyObject *ConflictError = NULL; static void PyVar_Assign(PyObject **v, PyObject *e) { Py_XDECREF(*v); *v=e;} #define ASSIGN(V,E) PyVar_Assign(&(V),(E)) #define UNLESS(E) if (!(E)) #define OBJECT(O) ((PyObject*)(O)) #define MIN_BUCKET_ALLOC 16 #define SameType_Check(O1, O2) (Py_TYPE((O1))==Py_TYPE((O2))) #define ASSERT(C, S, R) if (! (C)) { \ PyErr_SetString(PyExc_AssertionError, (S)); return (R); } #ifdef NEED_LONG_LONG_SUPPORT /* Helper code used to support long long instead of int. */ #ifndef PY_LONG_LONG #error "PY_LONG_LONG required but not defined" #endif static int longlong_handle_overflow(PY_LONG_LONG result, int overflow) { if (overflow) { PyErr_Clear(); /* Python 3 tends to have an exception already set. We want to consistently raise a TypeError. */ PyErr_SetString(PyExc_TypeError, "couldn't convert integer to C long long"); return 0; } else if (result == -1 && PyErr_Occurred()) /* An exception has already been raised. */ return 0; return 1; } #ifdef NEED_LONG_LONG_KEYS #if defined(ZODB_UNSIGNED_VALUE_INTS) || defined(ZODB_UNSIGNED_KEY_INTS) static int ulonglong_check(PyObject *ob) { if (!PyLong_Check(ob)) { return 0; } if (PyLong_AsUnsignedLongLong(ob) == (unsigned long long)-1 && PyErr_Occurred()) { return 0; } return 1; } #endif /* defined(ZODB_UNSIGNED_VALUE_INTS) || defined(ZODB_UNSIGNED_KEY_INTS) */ static int longlong_check(PyObject *ob) { if (PyLong_Check(ob)) { int overflow; PY_LONG_LONG result; result = PyLong_AsLongLongAndOverflow(ob, &overflow); return longlong_handle_overflow(result, overflow); } return 0; } #endif #if defined(ZODB_UNSIGNED_VALUE_INTS) || defined(ZODB_UNSIGNED_KEY_INTS) static PyObject * ulonglong_as_object(unsigned PY_LONG_LONG val) { if ((val > LONG_MAX)) return PyLong_FromUnsignedLongLong(val); return UINT_FROM_LONG((unsigned long)val); } static int ulonglong_convert(PyObject *ob, unsigned PY_LONG_LONG *value) { unsigned PY_LONG_LONG val; if (!PyLong_Check(ob)) { PyErr_SetString(PyExc_TypeError, "expected integer key"); return 0; } val = PyLong_AsUnsignedLongLong(ob); if (val == (unsigned long long)-1 && PyErr_Occurred()) { if (PyErr_ExceptionMatches(PyExc_OverflowError)) { PyErr_Clear(); PyErr_SetString(PyExc_TypeError, "overflow error converting int to C long long"); } return 0; } (*value) = val; return 1; } #endif /* defined(ZODB_UNSIGNED_VALUE_INTS) || defined(ZODB_UNSIGNED_KEY_INTS) */ static PyObject * longlong_as_object(PY_LONG_LONG val) { if ((val > LONG_MAX) || (val < LONG_MIN)) return PyLong_FromLongLong(val); return INT_FROM_LONG((long)val); } static int longlong_convert(PyObject *ob, PY_LONG_LONG *value) { PY_LONG_LONG val; int overflow; if (!PyLong_Check(ob)) { PyErr_SetString(PyExc_TypeError, "expected integer key"); return 0; } val = PyLong_AsLongLongAndOverflow(ob, &overflow); if (!longlong_handle_overflow(val, overflow)) { return 0; } (*value) = val; return 1; } #endif /* NEED_LONG_LONG_SUPPORT */ /* Various kinds of BTree and Bucket structs are instances of * "sized containers", and have a common initial layout: * The stuff needed for all Python objects, or all Persistent objects. * int size: The maximum number of things that could be contained * without growing the container. * int len: The number of things currently contained. * * Invariant: 0 <= len <= size. * * A sized container typically goes on to declare one or more pointers * to contiguous arrays with 'size' elements each, the initial 'len' of * which are currently in use. */ #ifdef PERSISTENT #define sizedcontainer_HEAD \ cPersistent_HEAD \ int size; \ int len; #else #define sizedcontainer_HEAD \ PyObject_HEAD \ int size; \ int len; #endif /* Nothing is actually of type Sized, but (pointers to) BTree nodes and * Buckets can be cast to Sized* in contexts that only need to examine * the members common to all sized containers. */ typedef struct Sized_s { sizedcontainer_HEAD } Sized; #define SIZED(O) ((Sized*)(O)) /* A Bucket wraps contiguous vectors of keys and values. Keys are unique, * and stored in sorted order. The 'values' pointer may be NULL if the * Bucket is used to implement a set. Buckets serving as leafs of BTrees * are chained together via 'next', so that the entire BTree contents * can be traversed in sorted order quickly and easily. */ typedef struct Bucket_s { sizedcontainer_HEAD struct Bucket_s *next; /* the bucket with the next-larger keys */ KEY_TYPE *keys; /* 'len' keys, in increasing order */ VALUE_TYPE *values; /* 'len' corresponding values; NULL if a set */ } Bucket; #define BUCKET(O) ((Bucket*)(O)) /* A BTree is complicated. See Maintainer.txt. */ typedef struct BTreeItem_s { KEY_TYPE key; Sized *child; /* points to another BTree, or to a Bucket of some sort */ } BTreeItem; typedef struct BTree_s { sizedcontainer_HEAD /* firstbucket points to the bucket containing the smallest key in * the BTree. This is found by traversing leftmost child pointers * (data[0].child) until reaching a Bucket. */ Bucket *firstbucket; /* The BTree points to 'len' children, via the "child" fields of the data * array. There are len-1 keys in the 'key' fields, stored in increasing * order. data[0].key is unused. For i in 0 .. len-1, all keys reachable * from data[i].child are >= data[i].key and < data[i+1].key, at the * endpoints pretending that data[0].key is minus infinity and * data[len].key is positive infinity. */ BTreeItem *data; long max_internal_size; long max_leaf_size; } BTree; static PyTypeObject BTreeTypeType; static PyTypeObject BTreeType; static PyTypeObject BucketType; #define BTREE(O) ((BTree*)(O)) /* Use BTREE_SEARCH to find which child pointer to follow. * RESULT An int lvalue to hold the index i such that SELF->data[i].child * is the correct node to search next. * SELF A pointer to a BTree node. * KEY The key you're looking for, of type KEY_TYPE. * ONERROR What to do if key comparison raises an exception; for example, * perhaps 'return NULL'. * * See Maintainer.txt for discussion: this is optimized in subtle ways. * It's recommended that you call this at the start of a routine, waiting * to check for self->len == 0 after. */ #define BTREE_SEARCH(RESULT, SELF, KEY, ONERROR) { \ int _lo = 0; \ int _hi = (SELF)->len; \ int _i, _cmp; \ for (_i = _hi >> 1; _i > _lo; _i = (_lo + _hi) >> 1) { \ TEST_KEY_SET_OR(_cmp, (SELF)->data[_i].key, (KEY)) \ ONERROR; \ if (_cmp < 0) _lo = _i; \ else if (_cmp > 0) _hi = _i; \ else /* equal */ break; \ } \ (RESULT) = _i; \ } /* SetIteration structs are used in the internal set iteration protocol. * When you want to iterate over a set or bucket or BTree (even an * individual key!), * 1. Declare a new iterator: * SetIteration si = {0,0,0}; * Using "{0,0,0}" or "{0,0}" appear most common. Only one {0} is * necssary. At least one must be given so that finiSetIteration() works * correctly even if you don't get around to calling initSetIteration(). * 2. Initialize it via * initSetIteration(&si, PyObject *s, useValues) * It's an error if that returns an int < 0. In case of error on the * init call, calling finiSetIteration(&si) is optional. But if the * init call succeeds, you must eventually call finiSetIteration(), * and whether or not subsequent calls to si.next() fail. * 3. Get the first element: * if (si.next(&si) < 0) { there was an error } * If the set isn't empty, this sets si.position to an int >= 0, * si.key to the element's key (of type KEY_TYPE), and maybe si.value to * the element's value (of type VALUE_TYPE). si.value is defined * iff si.usesValue is true. * 4. Process all the elements: * while (si.position >= 0) { * do something with si.key and/or si.value; * if (si.next(&si) < 0) { there was an error; } * } * 5. Finalize the SetIterator: * finiSetIteration(&si); * This is mandatory! si may contain references to iterator objects, * keys and values, and they must be cleaned up else they'll leak. If * this were C++ we'd hide that in the destructor, but in C you have to * do it by hand. */ typedef struct SetIteration_s { PyObject *set; /* the set, bucket, BTree, ..., being iterated */ int position; /* initialized to 0; set to -1 by next() when done */ int usesValue; /* true iff 'set' has values & we iterate them */ KEY_TYPE key; /* next() sets to next key */ VALUE_TYPE value; /* next() may set to next value */ int (*next)(struct SetIteration_s*); /* function to get next key+value */ } SetIteration; /* Finish the set iteration protocol. This MUST be called by everyone * who starts a set iteration, unless the initial call to initSetIteration * failed; in that case, and only that case, calling finiSetIteration is * optional. */ static void finiSetIteration(SetIteration *i) { assert(i != NULL); if (i->set == NULL) return; Py_DECREF(i->set); i->set = NULL; /* so it doesn't hurt to call this again */ if (i->position > 0) { /* next() was called at least once, but didn't finish iterating * (else position would be negative). So the cached key and * value need to be cleaned up. */ DECREF_KEY(i->key); if (i->usesValue) { DECREF_VALUE(i->value); } } i->position = -1; /* stop any stray next calls from doing harm */ } static PyObject * IndexError(int i) { PyObject *v; v = INT_FROM_LONG(i); if (!v) { v = Py_None; Py_INCREF(v); } PyErr_SetObject(PyExc_IndexError, v); Py_DECREF(v); return NULL; } /* Search for the bucket immediately preceding *current, in the bucket chain * starting at first. current, *current and first must not be NULL. * * Return: * 1 *current holds the correct bucket; this is a borrowed reference * 0 no such bucket exists; *current unaltered * -1 error; *current unaltered */ static int PreviousBucket(Bucket **current, Bucket *first) { Bucket *trailing = NULL; /* first travels; trailing follows it */ int result = 0; assert(current && *current && first); if (first == *current) return 0; do { trailing = first; PER_USE_OR_RETURN(first, -1); first = first->next; ((trailing)->state==cPersistent_STICKY_STATE && ((trailing)->state=cPersistent_UPTODATE_STATE)); PER_ACCESSED(trailing); if (first == *current) { *current = trailing; result = 1; break; } } while (first); return result; } static void * BTree_Malloc(size_t sz) { void *r; ASSERT(sz > 0, "non-positive size malloc", NULL); r = malloc(sz); if (r) return r; PyErr_NoMemory(); return NULL; } static void * BTree_Realloc(void *p, size_t sz) { void *r; ASSERT(sz > 0, "non-positive size realloc", NULL); if (p) r = realloc(p, sz); else r = malloc(sz); UNLESS (r) PyErr_NoMemory(); return r; } /* Shared keyword-argument list for BTree/Bucket * (iter)?(keys|values|items) */ static char *search_keywords[] = {"min", "max", "excludemin", "excludemax", 0}; /** * Call this instead of using `PyErr_ExceptionMatches(PyExc_KeyError)` * when you intend to suppress the KeyError. * * We want to match only exactly ``PyExc_KeyError``, and not subclasses * such as ``ZODB.POSException.POSKeyError``. */ static int BTree_ShouldSuppressKeyError() { PyObject* exc_type = PyErr_Occurred(); /* Returns borrowed reference. */ if (exc_type && exc_type == PyExc_KeyError) { return 1; } return 0; } #include "BTreeItemsTemplate.c" #include "BucketTemplate.c" #include "SetTemplate.c" #include "BTreeTemplate.c" #include "TreeSetTemplate.c" #include "SetOpTemplate.c" #include "MergeTemplate.c" static struct PyMethodDef module_methods[] = { {"difference", (PyCFunction) difference_m, METH_VARARGS, "difference(o1, o2)\n" "compute the difference between o1 and o2" }, {"union", (PyCFunction) union_m, METH_VARARGS, "union(o1, o2)\ncompute the union of o1 and o2\n" }, {"intersection", (PyCFunction) intersection_m, METH_VARARGS, "intersection(o1, o2)\n" "compute the intersection of o1 and o2" }, #ifdef MERGE {"weightedUnion", (PyCFunction) wunion_m, METH_VARARGS, "weightedUnion(o1, o2 [, w1, w2])\ncompute the union of o1 and o2\n" "\nw1 and w2 are weights." }, {"weightedIntersection", (PyCFunction) wintersection_m, METH_VARARGS, "weightedIntersection(o1, o2 [, w1, w2])\n" "compute the intersection of o1 and o2\n" "\nw1 and w2 are weights." }, #endif #ifdef MULTI_INT_UNION {"multiunion", (PyCFunction) multiunion_m, METH_VARARGS, "multiunion(seq)\ncompute union of a sequence of integer sets.\n" "\n" "Each element of seq must be an integer set, or convertible to one\n" "via the set iteration protocol. The union returned is an IISet." }, #endif {NULL, NULL} /* sentinel */ }; static char BTree_module_documentation[] = "\n" MASTER_ID BTREEITEMSTEMPLATE_C "$Id$\n" BTREETEMPLATE_C BUCKETTEMPLATE_C KEYMACROS_H MERGETEMPLATE_C SETOPTEMPLATE_C SETTEMPLATE_C TREESETTEMPLATE_C VALUEMACROS_H BTREEITEMSTEMPLATE_C ; static int init_type_with_meta_base(PyTypeObject *type, PyTypeObject* meta, PyTypeObject* base) { int result; PyObject* slotnames; ((PyObject*)type)->ob_type = meta; type->tp_base = base; if (PyType_Ready(type) < 0) return 0; /* persistent looks for __slotnames__ in the dict at deactivation time, and if it's not present, calls ``copyreg._slotnames``, which itself looks in the dict again. Then it does some computation, and tries to store the object in the dict --- which for built-in types, it can't. So we can save some runtime if we store an empty slotnames for these classes. */ slotnames = PyTuple_New(0); if (!slotnames) { return 0; } result = PyDict_SetItem(type->tp_dict, __slotnames__str, slotnames); Py_DECREF(slotnames); return result < 0 ? 0 : 1; } int /* why isn't this static? */ init_persist_type(PyTypeObject* type) { return init_type_with_meta_base(type, &PyType_Type, cPersistenceCAPI->pertype); } static int init_tree_type(PyTypeObject* type, PyTypeObject* bucket_type) { if (!init_type_with_meta_base(type, &BTreeTypeType, cPersistenceCAPI->pertype)) { return 0; } if (PyDict_SetItem(type->tp_dict, _bucket_type_str, (PyObject*)bucket_type) < 0) { return 0; } return 1; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_" MOD_NAME_PREFIX "BTree", /* m_name */ BTree_module_documentation, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; static PyObject* module_init(void) { PyObject *module, *mod_dict, *interfaces, *conflicterr; #ifdef KEY_TYPE_IS_PYOBJECT object_ = PyTuple_GetItem(Py_TYPE(Py_None)->tp_bases, 0); if (object_ == NULL) return NULL; #endif sort_str = INTERN("sort"); if (!sort_str) return NULL; reverse_str = INTERN("reverse"); if (!reverse_str) return NULL; __setstate___str = INTERN("__setstate__"); if (!__setstate___str) return NULL; _bucket_type_str = INTERN("_bucket_type"); if (!_bucket_type_str) return NULL; max_internal_size_str = INTERN("max_internal_size"); if (! max_internal_size_str) return NULL; max_leaf_size_str = INTERN("max_leaf_size"); if (! max_leaf_size_str) return NULL; __slotnames__str = INTERN("__slotnames__"); if (!__slotnames__str) return NULL; BTreeType_setattro_allowed_names = PyTuple_Pack( 5, /* BTree attributes */ max_internal_size_str, max_leaf_size_str, /* zope.interface attributes */ /* Technically, INTERNING directly here leaks references, but since we can't be unloaded, it's not a problem. */ INTERN("__implemented__"), INTERN("__providedBy__"), INTERN("__provides__") ); /* Grab the ConflictError class */ interfaces = PyImport_ImportModule("BTrees.Interfaces"); if (interfaces != NULL) { conflicterr = PyObject_GetAttrString(interfaces, "BTreesConflictError"); if (conflicterr != NULL) ConflictError = conflicterr; Py_DECREF(interfaces); } if (ConflictError == NULL) { Py_INCREF(PyExc_ValueError); ConflictError=PyExc_ValueError; } /* Initialize the PyPersist_C_API and the type objects. */ cPersistenceCAPI = (cPersistenceCAPIstruct *)PyCapsule_Import( "persistent.cPersistence.CAPI", 0); if (cPersistenceCAPI == NULL) { /* The Capsule API attempts to import 'persistent' and then * walk down to the specified attribute using getattr. If the C * extensions aren't available, this can result in an * AttributeError being raised. Let that percolate up as an * ImportError so it can be caught in the expected way. */ if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_ImportError)) { PyErr_SetString(PyExc_ImportError, "persistent C extension unavailable"); } return NULL; } #define _SET_TYPE(typ) ((PyObject*)(&typ))->ob_type = &PyType_Type _SET_TYPE(BTreeItemsType); _SET_TYPE(BTreeIter_Type); BTreeIter_Type.tp_getattro = PyObject_GenericGetAttr; BucketType.tp_new = PyType_GenericNew; SetType.tp_new = PyType_GenericNew; BTreeType.tp_new = PyType_GenericNew; TreeSetType.tp_new = PyType_GenericNew; if (!init_persist_type(&BucketType)) return NULL; if (!init_type_with_meta_base(&BTreeTypeType, &PyType_Type, &PyType_Type)) { return NULL; } if (!init_tree_type(&BTreeType, &BucketType)) { return NULL; } if (!init_persist_type(&SetType)) return NULL; if (!init_tree_type(&TreeSetType, &SetType)) { return NULL; } /* Create the module and add the functions */ module = PyModule_Create(&moduledef); /* Add some symbolic constants to the module */ mod_dict = PyModule_GetDict(module); if (PyDict_SetItemString(mod_dict, MOD_NAME_PREFIX "Bucket", (PyObject *)&BucketType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, MOD_NAME_PREFIX "BTree", (PyObject *)&BTreeType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, MOD_NAME_PREFIX "Set", (PyObject *)&SetType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, MOD_NAME_PREFIX "TreeSet", (PyObject *)&TreeSetType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, MOD_NAME_PREFIX "TreeIterator", (PyObject *)&BTreeIter_Type) < 0) return NULL; /* We also want to be able to access these constants without the prefix * so that code can more easily exchange modules (particularly the integer * and long modules, but also others). The TreeIterator is only internal, * so we don't bother to expose that. */ if (PyDict_SetItemString(mod_dict, "Bucket", (PyObject *)&BucketType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, "BTree", (PyObject *)&BTreeType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, "Set", (PyObject *)&SetType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, "TreeSet", (PyObject *)&TreeSetType) < 0) return NULL; if (PyDict_SetItemString(mod_dict, "TreeItems", (PyObject *)&BTreeItemsType) < 0) return NULL; #if defined(ZODB_64BIT_INTS) && defined(NEED_LONG_LONG_SUPPORT) if (PyDict_SetItemString(mod_dict, "using64bits", Py_True) < 0) return NULL; #else if (PyDict_SetItemString(mod_dict, "using64bits", Py_False) < 0) return NULL; #endif return module; } PyMODINIT_FUNC INITMODULE(void) { return module_init(); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/BTreeTemplate.c0000644000076500000240000022323614355020716016254 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #include "_compat.h" #define BTREETEMPLATE_C "$Id$\n" static long _get_max_size(BTree *self, PyObject *name, long default_max) { PyObject *size; long isize; size = PyObject_GetAttr(OBJECT(OBJECT(self)->ob_type), name); if (size == NULL) { PyErr_Clear(); return default_max; } isize = PyLong_AsLong(size); Py_DECREF(size); if (isize <= 0 && ! PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, "non-positive max size in BTree subclass"); return -1; } return isize; } static int _max_internal_size(BTree *self) { long isize; if (self->max_internal_size > 0) return self->max_internal_size; isize = _get_max_size(self, max_internal_size_str, -1); self->max_internal_size = isize; return isize; } static int _max_leaf_size(BTree *self) { long isize; if (self->max_leaf_size > 0) return self->max_leaf_size; isize = _get_max_size(self, max_leaf_size_str, -1); self->max_leaf_size = isize; return isize; } /* Sanity-check a BTree. This is a private helper for BTree_check. Return: * -1 Error. If it's an internal inconsistency in the BTree, * AssertionError is set. * 0 No problem found. * * nextbucket is the bucket "one beyond the end" of the BTree; the last bucket * directly reachable from following right child pointers *should* be linked * to nextbucket (and this is checked). */ static int BTree_check_inner(BTree *self, Bucket *nextbucket) { int i; Bucket *bucketafter; Sized *child; char *errormsg = "internal error"; /* someone should have overriden */ Sized *activated_child = NULL; int result = -1; /* until proved innocent */ #define CHECK(CONDITION, ERRORMSG) \ if (!(CONDITION)) { \ errormsg = (ERRORMSG); \ goto Error; \ } PER_USE_OR_RETURN(self, -1); CHECK(self->len >= 0, "BTree len < 0"); CHECK(self->len <= self->size, "BTree len > size"); if (self->len == 0) /* Empty BTree. */ { CHECK(self->firstbucket == NULL, "Empty BTree has non-NULL firstbucket"); result = 0; goto Done; } /* Non-empty BTree. */ CHECK(self->firstbucket != NULL, "Non-empty BTree has NULL firstbucket"); /* Obscure: The first bucket is pointed to at least by self->firstbucket * and data[0].child of whichever BTree node it's a child of. However, * if persistence is enabled then the latter BTree node may be a ghost * at this point, and so its pointers "don't count": we can only rely * on self's pointers being intact. */ #ifdef PERSISTENT CHECK(Py_REFCNT(self->firstbucket) >= 1, "Non-empty BTree firstbucket has refcount < 1"); #else CHECK(Py_REFCNT(self->firstbucket) >= 2, "Non-empty BTree firstbucket has refcount < 2"); #endif for (i = 0; i < self->len; ++i) { CHECK(self->data[i].child != NULL, "BTree has NULL child"); } if (SameType_Check(self, self->data[0].child)) { /* Our children are also BTrees. */ child = self->data[0].child; UNLESS (PER_USE(child)) goto Done; activated_child = child; CHECK(self->firstbucket == BTREE(child)->firstbucket, "BTree has firstbucket different than " "its first child's firstbucket"); PER_ALLOW_DEACTIVATION(child); activated_child = NULL; for (i = 0; i < self->len; ++i) { child = self->data[i].child; CHECK(SameType_Check(self, child), "BTree children have different types"); if (i == self->len - 1) bucketafter = nextbucket; else { BTree *child2 = BTREE(self->data[i+1].child); UNLESS (PER_USE(child2)) goto Done; bucketafter = child2->firstbucket; PER_ALLOW_DEACTIVATION(child2); } if (BTree_check_inner(BTREE(child), bucketafter) < 0) goto Done; } } else /* Our children are buckets. */ { CHECK(self->firstbucket == BUCKET(self->data[0].child), "Bottom-level BTree node has inconsistent firstbucket belief"); for (i = 0; i < self->len; ++i) { child = self->data[i].child; UNLESS (PER_USE(child)) goto Done; activated_child = child; CHECK(!SameType_Check(self, child), "BTree children have different types"); CHECK(child->len >= 1, "Bucket length < 1");/* no empty buckets! */ CHECK(child->len <= child->size, "Bucket len > size"); #ifdef PERSISTENT CHECK(Py_REFCNT(child) >= 1, "Bucket has refcount < 1"); #else CHECK(Py_REFCNT(child) >= 2, "Bucket has refcount < 2"); #endif if (i == self->len - 1) bucketafter = nextbucket; else bucketafter = BUCKET(self->data[i+1].child); CHECK(BUCKET(child)->next == bucketafter, "Bucket next pointer is damaged"); PER_ALLOW_DEACTIVATION(child); activated_child = NULL; } } result = 0; goto Done; Error: PyErr_SetString(PyExc_AssertionError, errormsg); result = -1; Done: /* No point updating access time -- this isn't a "real" use. */ PER_ALLOW_DEACTIVATION(self); if (activated_child) { PER_ALLOW_DEACTIVATION(activated_child); } return result; #undef CHECK } /* Sanity-check a BTree. This is the ._check() method. Return: * NULL Error. If it's an internal inconsistency in the BTree, * AssertionError is set. * Py_None No problem found. */ static PyObject* BTree_check(BTree *self) { PyObject *result = NULL; int i = BTree_check_inner(self, NULL); if (i >= 0) { result = Py_None; Py_INCREF(result); } return result; } #define _BGET_REPLACE_TYPE_ERROR 1 #define _BGET_ALLOW_TYPE_ERROR 0 /* ** _BTree_get ** ** Search a BTree. ** ** Arguments ** self a pointer to a BTree ** keyarg the key to search for, as a Python object ** has_key true/false; when false, try to return the associated ** value; when true, return a boolean ** replace_type_err true/false: When true, ignore the TypeError from ** a key conversion issue, instead ** transforming it into a KeyError set. If ** you are just reading/searching, set to ** true. If you will be adding/updating, ** however, set to false. Or use ** _BGET_REPLACE_TYPE_ERROR ** and _BGET_ALLOW_TYPE_ERROR, respectively. ** Return ** When has_key false: ** If key exists, its associated value. ** If key doesn't exist, NULL and KeyError is set. ** When has_key true: ** A Python int is returned in any case. ** If key exists, the depth of the bucket in which it was found. ** If key doesn't exist, 0. */ static PyObject * _BTree_get(BTree *self, PyObject *keyarg, int has_key, int replace_type_err) { KEY_TYPE key; PyObject *result = NULL; /* guilty until proved innocent */ int copied = 1; COPY_KEY_FROM_ARG(key, keyarg, copied); UNLESS (copied) { if (replace_type_err && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); PyErr_SetObject(PyExc_KeyError, keyarg); } return NULL; } PER_USE_OR_RETURN(self, NULL); if (self->len == 0) { /* empty BTree */ if (has_key) result = INT_FROM_LONG(0); else PyErr_SetObject(PyExc_KeyError, keyarg); } else { for (;;) { int i; Sized *child; BTREE_SEARCH(i, self, key, goto Done); child = self->data[i].child; has_key += has_key != 0; /* bump depth counter, maybe */ if (SameType_Check(self, child)) { PER_UNUSE(self); self = BTREE(child); PER_USE_OR_RETURN(self, NULL); } else { result = _bucket_get(BUCKET(child), keyarg, has_key); break; } } } Done: PER_UNUSE(self); return result; } static PyObject * BTree_get(BTree *self, PyObject *key) { return _BTree_get(self, key, 0, _BGET_REPLACE_TYPE_ERROR); } /* Create a new bucket for the BTree or TreeSet using the class attribute _bucket_type, which is normally initialized to BucketType or SetType as appropriate. */ static Sized * BTree_newBucket(BTree *self) { PyObject *factory; Sized *result; /* _bucket_type_str defined in BTreeModuleTemplate.c */ factory = PyObject_GetAttr((PyObject *)Py_TYPE(self), _bucket_type_str); if (factory == NULL) return NULL; /* TODO: Should we check that the factory actually returns something of the appropriate type? How? The C code here is going to depend on any custom bucket type having the same layout at the C level. */ result = SIZED(PyObject_CallObject(factory, NULL)); Py_DECREF(factory); return result; } /* * Move data from the current BTree, from index onward, to the newly created * BTree 'next'. self and next must both be activated. If index is OOB (< 0 * or >= self->len), use self->len / 2 as the index (i.e., split at the * midpoint). self must have at least 2 children on entry, and index must * be such that self and next each have at least one child at exit. self's * accessed time is updated. * * Return: * -1 error * 0 OK */ static int BTree_split(BTree *self, int index, BTree *next) { int next_size; Sized *child; if (index < 0 || index >= self->len) index = self->len / 2; next_size = self->len - index; ASSERT(index > 0, "split creates empty tree", -1); ASSERT(next_size > 0, "split creates empty tree", -1); next->data = BTree_Malloc(sizeof(BTreeItem) * next_size); if (!next->data) return -1; memcpy(next->data, self->data + index, sizeof(BTreeItem) * next_size); next->size = next_size; /* but don't set len until we succeed */ /* Set next's firstbucket. self->firstbucket is still correct. */ child = next->data[0].child; if (SameType_Check(self, child)) { PER_USE_OR_RETURN(child, -1); next->firstbucket = BTREE(child)->firstbucket; PER_UNUSE(child); } else next->firstbucket = BUCKET(child); Py_INCREF(next->firstbucket); next->len = next_size; self->len = index; return PER_CHANGED(self) >= 0 ? 0 : -1; } /* Fwd decl -- BTree_grow and BTree_split_root reference each other. */ static int BTree_grow(BTree *self, int index, int noval); /* Split the root. This is a little special because the root isn't a child * of anything else, and the root needs to retain its object identity. So * this routine moves the root's data into a new child, and splits the * latter. This leaves the root with two children. * * Return: * 0 OK * -1 error * * CAUTION: The caller must call PER_CHANGED on self. */ static int BTree_split_root(BTree *self, int noval) { BTree *child; BTreeItem *d; /* Create a child BTree, and a new data vector for self. */ child = BTREE(PyObject_CallObject(OBJECT(Py_TYPE(self)), NULL)); if (!child) return -1; d = BTree_Malloc(sizeof(BTreeItem) * 2); if (!d) { Py_DECREF(child); return -1; } /* Move our data to new BTree. */ child->size = self->size; child->len = self->len; child->data = self->data; child->firstbucket = self->firstbucket; Py_INCREF(child->firstbucket); /* Point self to child and split the child. */ self->data = d; self->len = 1; self->size = 2; self->data[0].child = SIZED(child); /* transfers reference ownership */ return BTree_grow(self, 0, noval); } /* ** BTree_grow ** ** Grow a BTree ** ** Arguments: self The BTree ** index self->data[index].child needs to be split. index ** must be 0 if self is empty (len == 0), and a new ** empty bucket is created then. ** noval Boolean; is this a set (true) or mapping (false)? ** ** Returns: 0 on success ** -1 on failure ** ** CAUTION: If self is empty on entry, this routine adds an empty bucket. ** That isn't a legitimate BTree; if the caller doesn't put something in ** in the bucket (say, because of a later error), the BTree must be cleared ** to get rid of the empty bucket. */ static int BTree_grow(BTree *self, int index, int noval) { int i; Sized *v, *e = 0; BTreeItem *d; if (self->len == self->size) { if (self->size) { d = BTree_Realloc(self->data, sizeof(BTreeItem) * self->size * 2); if (d == NULL) return -1; self->data = d; self->size *= 2; } else { d = BTree_Malloc(sizeof(BTreeItem) * 2); if (d == NULL) return -1; self->data = d; self->size = 2; } } if (self->len) { long max_size = _max_internal_size(self); if (max_size < 0) return -1; d = self->data + index; v = d->child; /* Create a new object of the same type as the target value */ e = (Sized *)PyObject_CallObject((PyObject *)Py_TYPE(v), NULL); if (e == NULL) return -1; UNLESS(PER_USE(v)) { Py_DECREF(e); return -1; } /* Now split between the original (v) and the new (e) at the midpoint*/ if (SameType_Check(self, v)) i = BTree_split((BTree *)v, -1, (BTree *)e); else i = bucket_split((Bucket *)v, -1, (Bucket *)e); PER_ALLOW_DEACTIVATION(v); if (i < 0) { Py_DECREF(e); assert(PyErr_Occurred()); return -1; } index++; d++; if (self->len > index) /* Shift up the old values one array slot */ memmove(d+1, d, sizeof(BTreeItem)*(self->len-index)); if (SameType_Check(self, v)) { COPY_KEY(d->key, BTREE(e)->data->key); /* We take the unused reference from e, so there's no reason to INCREF! */ /* INCREF_KEY(self->data[1].key); */ } else { COPY_KEY(d->key, BUCKET(e)->keys[0]); INCREF_KEY(d->key); } d->child = e; self->len++; if (self->len >= max_size * 2) /* the root is huge */ return BTree_split_root(self, noval); } else { /* The BTree is empty. Create an empty bucket. See CAUTION in * the comments preceding. */ assert(index == 0); d = self->data; d->child = BTree_newBucket(self); if (d->child == NULL) return -1; self->len = 1; Py_INCREF(d->child); self->firstbucket = (Bucket *)d->child; } return 0; } /* Return the rightmost bucket reachable from following child pointers * from self. The caller gets a new reference to this bucket. Note that * bucket 'next' pointers are not followed: if self is an interior node * of a BTree, this returns the rightmost bucket in that node's subtree. * In case of error, returns NULL. * * self must not be a ghost; this isn't checked. The result may be a ghost. * * Pragmatics: Note that the rightmost bucket's last key is the largest * key in self's subtree. */ static Bucket * BTree_lastBucket(BTree *self) { Sized *pchild; Bucket *result; UNLESS (self->data && self->len) { IndexError(-1); /* is this the best action to take? */ return NULL; } pchild = self->data[self->len - 1].child; if (SameType_Check(self, pchild)) { self = BTREE(pchild); PER_USE_OR_RETURN(self, NULL); result = BTree_lastBucket(self); PER_UNUSE(self); } else { Py_INCREF(pchild); result = BUCKET(pchild); } return result; } static int BTree_deleteNextBucket(BTree *self) { Bucket *b; UNLESS (PER_USE(self)) return -1; b = BTree_lastBucket(self); if (b == NULL) goto err; if (Bucket_deleteNextBucket(b) < 0) goto err; Py_DECREF(b); PER_UNUSE(self); return 0; err: Py_XDECREF(b); PER_ALLOW_DEACTIVATION(self); return -1; } /* ** _BTree_clear ** ** Clears out all of the values in the BTree (firstbucket, keys, and children); ** leaving self an empty BTree. ** ** Arguments: self The BTree ** ** Returns: 0 on success ** -1 on failure ** ** Internal: Deallocation order is important. The danger is that a long ** list of buckets may get freed "at once" via decref'ing the first bucket, ** in which case a chain of consequenct Py_DECREF calls may blow the stack. ** Luckily, every bucket has a refcount of at least two, one due to being a ** BTree node's child, and another either because it's not the first bucket in ** the chain (so the preceding bucket points to it), or because firstbucket ** points to it. By clearing in the natural depth-first, left-to-right ** order, the BTree->bucket child pointers prevent Py_DECREF(bucket->next) ** calls from freeing bucket->next, and the maximum stack depth is equal ** to the height of the tree. **/ static int _BTree_clear(BTree *self) { const int len = self->len; if (self->firstbucket) { /* Obscure: The first bucket is pointed to at least by * self->firstbucket and data[0].child of whichever BTree node it's * a child of. However, if persistence is enabled then the latter * BTree node may be a ghost at this point, and so its pointers "don't * count": we can only rely on self's pointers being intact. */ #ifdef PERSISTENT ASSERT(Py_REFCNT(self->firstbucket) > 0, "Invalid firstbucket pointer", -1); #else ASSERT(Py_REFCNT(self->firstbucket) > 1, "Invalid firstbucket pointer", -1); #endif Py_DECREF(self->firstbucket); self->firstbucket = NULL; } if (self->data) { int i; if (len > 0) /* 0 is special because key 0 is trash */ { Py_DECREF(self->data[0].child); } for (i = 1; i < len; i++) { #ifdef KEY_TYPE_IS_PYOBJECT DECREF_KEY(self->data[i].key); #endif Py_DECREF(self->data[i].child); } free(self->data); self->data = NULL; } self->len = self->size = 0; return 0; } /* Set (value != 0) or delete (value=0) a tree item. If unique is non-zero, then only change if the key is new. If noval is non-zero, then don't set a value (the tree is a set). Return: -1 error 0 successful, and number of entries didn't change >0 successful, and number of entries did change Internal There are two distinct return values > 0: 1 Successful, number of entries changed, but firstbucket did not go away. 2 Successful, number of entries changed, firstbucket did go away. This can only happen on a delete (value == NULL). The caller may need to change its own firstbucket pointer, and in any case *someone* needs to adjust the 'next' pointer of the bucket immediately preceding the bucket that went away (it needs to point to the bucket immediately following the bucket that went away). */ static int _BTree_set(BTree *self, PyObject *keyarg, PyObject *value, int unique, int noval) { int changed = 0; /* did I mutate? */ int min; /* index of child I searched */ BTreeItem *d; /* self->data[min] */ int childlength; /* len(self->data[min].child) */ int status; /* our return value; and return value from callee */ int self_was_empty; /* was self empty at entry? */ KEY_TYPE key; int copied = 1; COPY_KEY_FROM_ARG(key, keyarg, copied); if (!copied) return -1; #ifdef KEY_CHECK_ON_SET if (value && !KEY_CHECK_ON_SET(keyarg)) return -1; #endif PER_USE_OR_RETURN(self, -1); self_was_empty = self->len == 0; if (self_was_empty) { /* We're empty. Make room. */ if (value) { if (BTree_grow(self, 0, noval) < 0) goto Error; } else { /* Can't delete a key from an empty BTree. */ PyErr_SetObject(PyExc_KeyError, keyarg); goto Error; } } /* Find the right child to search, and hand the work off to it. */ BTREE_SEARCH(min, self, key, goto Error); d = self->data + min; #ifdef PERSISTENT PER_READCURRENT(self, goto Error); #endif if (SameType_Check(self, d->child)) status = _BTree_set(BTREE(d->child), keyarg, value, unique, noval); else { int bucket_changed = 0; status = _bucket_set(BUCKET(d->child), keyarg, value, unique, noval, &bucket_changed); #ifdef PERSISTENT /* If a BTree contains only a single bucket, BTree.__getstate__() * includes the bucket's entire state, and the bucket doesn't get * an oid of its own. So if we have a single oid-less bucket that * changed, it's *our* oid that should be marked as changed -- the * bucket doesn't have one. */ if (bucket_changed && self->len == 1 && self->data[0].child->oid == NULL) { changed = 1; } #endif } if (status == 0) goto Done; if (status < 0) goto Error; assert(status == 1 || status == 2); /* The child changed size. Get its new size. Note that since the tree * rooted at the child changed size, so did the tree rooted at self: * our status must be >= 1 too. */ UNLESS(PER_USE(d->child)) goto Error; childlength = d->child->len; PER_UNUSE(d->child); if (value) { /* A bucket got bigger -- if it's "too big", split it. */ int toobig; assert(status == 1); /* can be 2 only on deletes */ if (SameType_Check(self, d->child)) { long max_size = _max_internal_size(self); if (max_size < 0) return -1; toobig = childlength > max_size; } else { long max_size = _max_leaf_size(self); if (max_size < 0) return -1; toobig = childlength > max_size; } if (toobig) { if (BTree_grow(self, min, noval) < 0) goto Error; changed = 1; /* BTree_grow mutated self */ } goto Done; /* and status still == 1 */ } /* A bucket got smaller. This is much harder, and despite that we * don't try to rebalance the tree. */ if (min && childlength) { /* We removed a key. but the node child is non-empty. If the deleted key is the node key, then update the node key using the smallest key of the node child. This doesn't apply to the 0th node, whos key is unused. */ int _cmp = 1; TEST_KEY_SET_OR(_cmp, key, d->key) goto Error; if (_cmp == 0) /* Need to replace key with first key from child */ { Bucket *bucket; if (SameType_Check(self, d->child)) { UNLESS(PER_USE(d->child)) goto Error; bucket = BTREE(d->child)->firstbucket; PER_UNUSE(d->child); } else bucket = BUCKET(d->child); UNLESS(PER_USE(bucket)) goto Error; DECREF_KEY(d->key); COPY_KEY(d->key, bucket->keys[0]); INCREF_KEY(d->key); PER_UNUSE(bucket); if (PER_CHANGED(self) < 0) goto Error; } } if (status == 2) { /* The child must be a BTree because bucket.set never returns 2 */ /* Two problems to solve: May have to adjust our own firstbucket, * and the bucket that went away needs to get unlinked. */ if (min) { /* This wasn't our firstbucket, so no need to adjust ours (note * that it can't be the firstbucket of any node above us either). * Tell "the tree to the left" to do the unlinking. */ if (BTree_deleteNextBucket(BTREE(d[-1].child)) < 0) goto Error; status = 1; /* we solved the child's firstbucket problem */ } else { /* This was our firstbucket. Update to new firstbucket value. */ Bucket *nextbucket; UNLESS(PER_USE(d->child)) goto Error; nextbucket = BTREE(d->child)->firstbucket; PER_UNUSE(d->child); Py_XINCREF(nextbucket); Py_DECREF(self->firstbucket); self->firstbucket = nextbucket; changed = 1; /* The caller has to do the unlinking -- we can't. Also, since * it was our firstbucket, it may also be theirs. */ assert(status == 2); } } /* If the child isn't empty, we're done! We did all that was possible for * us to do with the firstbucket problems the child gave us, and since the * child isn't empty don't create any new firstbucket problems of our own. */ if (childlength) goto Done; /* The child became empty: we need to remove it from self->data. * But first, if we're a bottom-level node, we've got more bucket-fiddling * to set up. */ if (! SameType_Check(self, d->child)) { /* We're about to delete a bucket, so need to adjust bucket pointers. */ if (min) { /* It's not our first bucket, so we can tell the previous * bucket to adjust its reference to it. It can't be anyone * else's first bucket either, so the caller needn't do anything. */ if (Bucket_deleteNextBucket(BUCKET(d[-1].child)) < 0) goto Error; /* status should be 1, and already is: if it were 2, the * block above would have set it to 1 in its min != 0 branch. */ assert(status == 1); } else { Bucket *nextbucket; /* It's our first bucket. We can't unlink it directly. */ /* 'changed' will be set true by the deletion code following. */ UNLESS(PER_USE(d->child)) goto Error; nextbucket = BUCKET(d->child)->next; PER_UNUSE(d->child); Py_XINCREF(nextbucket); Py_DECREF(self->firstbucket); self->firstbucket = nextbucket; status = 2; /* we're giving our caller a new firstbucket problem */ } } /* Remove the child from self->data. */ Py_DECREF(d->child); #ifdef KEY_TYPE_IS_PYOBJECT if (min) { DECREF_KEY(d->key); } else if (self->len > 1) { /* We're deleting the first child of a BTree with more than one * child. The key at d+1 is about to be shifted into slot 0, * and hence never to be referenced again (the key in slot 0 is * trash). */ DECREF_KEY((d+1)->key); } /* Else min==0 and len==1: we're emptying the BTree entirely, and * there is no key in need of decrefing. */ #endif --self->len; if (min < self->len) memmove(d, d+1, (self->len - min) * sizeof(BTreeItem)); changed = 1; Done: #ifdef PERSISTENT if (changed) { if (PER_CHANGED(self) < 0) goto Error; } #endif PER_UNUSE(self); return status; Error: assert(PyErr_Occurred()); if (self_was_empty) { /* BTree_grow may have left the BTree in an invalid state. Make * sure the tree is a legitimate empty tree. */ _BTree_clear(self); } PER_UNUSE(self); return -1; } /* ** BTree_setitem ** ** wrapper for _BTree_set ** ** Arguments: self The BTree ** key The key to insert ** v The value to insert ** ** Returns -1 on failure ** 0 on success */ static int BTree_setitem(BTree *self, PyObject *key, PyObject *v) { if (_BTree_set(self, key, v, 0, 0) < 0) return -1; return 0; } #ifdef PERSISTENT static PyObject * BTree__p_deactivate(BTree *self, PyObject *args, PyObject *keywords) { int ghostify = 1; PyObject *force = NULL; if (args && PyTuple_GET_SIZE(args) > 0) { PyErr_SetString(PyExc_TypeError, "_p_deactivate takes not positional arguments"); return NULL; } if (keywords) { int size = PyDict_Size(keywords); force = PyDict_GetItemString(keywords, "force"); if (force) size--; if (size) { PyErr_SetString(PyExc_TypeError, "_p_deactivate only accepts keyword arg force"); return NULL; } } /* Always clear our node size cache, whether we're in a jar or not. It is only read from the type anyway, and we'll do so on the next write after we get activated. */ self->max_internal_size = 0; self->max_leaf_size = 0; if (self->jar && self->oid) { ghostify = self->state == cPersistent_UPTODATE_STATE; if (!ghostify && force) { if (PyObject_IsTrue(force)) ghostify = 1; if (PyErr_Occurred()) return NULL; } if (ghostify) { if (_BTree_clear(self) < 0) return NULL; PER_GHOSTIFY(self); } } Py_INCREF(Py_None); return Py_None; } #endif static PyObject * BTree_clear(BTree *self) { UNLESS (PER_USE(self)) return NULL; if (self->len) { if (_BTree_clear(self) < 0) goto err; if (PER_CHANGED(self) < 0) goto err; } PER_UNUSE(self); Py_INCREF(Py_None); return Py_None; err: PER_UNUSE(self); return NULL; } /* * Return: * * For an empty BTree (self->len == 0), None. * * For a BTree with one child (self->len == 1), and that child is a bucket, * and that bucket has a NULL oid, a one-tuple containing a one-tuple * containing the bucket's state: * * ( * ( * child[0].__getstate__(), * ), * ) * * Else a two-tuple. The first element is a tuple interleaving the BTree's * keys and direct children, of size 2*self->len - 1 (key[0] is unused and * is not saved). The second element is the firstbucket: * * ( * (child[0], key[1], child[1], key[2], child[2], ..., * key[len-1], child[len-1]), * self->firstbucket * ) * * In the above, key[i] means self->data[i].key, and similarly for child[i]. */ static PyObject * BTree_getstate(BTree *self) { PyObject *r = NULL; PyObject *o; int i, l; UNLESS (PER_USE(self)) return NULL; if (self->len) { r = PyTuple_New(self->len * 2 - 1); if (r == NULL) goto err; if (self->len == 1 && Py_TYPE(self->data->child) != Py_TYPE(self) #ifdef PERSISTENT && BUCKET(self->data->child)->oid == NULL #endif ) { /* We have just one bucket. Save its data directly. */ o = bucket_getstate((Bucket *)self->data->child); if (o == NULL) goto err; PyTuple_SET_ITEM(r, 0, o); ASSIGN(r, Py_BuildValue("(O)", r)); } else { for (i=0, l=0; i < self->len; i++) { if (i) { COPY_KEY_TO_OBJECT(o, self->data[i].key); PyTuple_SET_ITEM(r, l, o); l++; } o = (PyObject *)self->data[i].child; Py_INCREF(o); PyTuple_SET_ITEM(r,l,o); l++; } ASSIGN(r, Py_BuildValue("OO", r, self->firstbucket)); } } else { r = Py_None; Py_INCREF(r); } PER_UNUSE(self); return r; err: PER_UNUSE(self); Py_XDECREF(r); return NULL; } static int _BTree_setstate(BTree *self, PyObject *state, int noval) { PyObject *items, *firstbucket = NULL; BTreeItem *d; int len, l, i, copied=1; PyTypeObject *leaftype = (noval ? &SetType : &BucketType); if (_BTree_clear(self) < 0) return -1; /* The state of a BTree can be one of the following: None -- an empty BTree A one-tuple -- a single bucket btree A two-tuple -- a BTree with more than one bucket See comments for BTree_getstate() for the details. */ if (state == Py_None) return 0; if (!PyArg_ParseTuple(state, "O|O:__setstate__", &items, &firstbucket)) return -1; if (!PyTuple_Check(items)) { PyErr_SetString(PyExc_TypeError, "tuple required for first state element"); return -1; } len = PyTuple_Size(items); ASSERT(len >= 0, "_BTree_setstate: items tuple has negative size", -1); len = (len + 1) / 2; assert(len > 0); /* If the BTree is empty, it's state is None. */ assert(self->size == 0); /* We called _BTree_clear(). */ self->data = BTree_Malloc(sizeof(BTreeItem) * len); if (self->data == NULL) return -1; self->size = len; for (i = 0, d = self->data, l = 0; i < len; i++, d++) { PyObject *v; if (i) { /* skip the first key slot */ COPY_KEY_FROM_ARG(d->key, PyTuple_GET_ITEM(items, l), copied); l++; if (!copied) return -1; INCREF_KEY(d->key); } v = PyTuple_GET_ITEM(items, l); if (PyTuple_Check(v)) { /* Handle the special case in __getstate__() for a BTree with a single bucket. */ d->child = BTree_newBucket(self); if (!d->child) return -1; if (noval) { if (_set_setstate(BUCKET(d->child), v) < 0) return -1; } else { if (_bucket_setstate(BUCKET(d->child), v) < 0) return -1; } } else { if (!(SameType_Check(self, v) || PyObject_IsInstance(v, (PyObject *)leaftype))) { PyErr_Format(PyExc_TypeError, "tree child %s is neither %s nor %s", Py_TYPE(v)->tp_name, Py_TYPE(self)->tp_name, leaftype->tp_name); return -1; } d->child = (Sized *)v; Py_INCREF(v); } l++; } if (!firstbucket) firstbucket = (PyObject *)self->data->child; if (!PyObject_IsInstance(firstbucket, (PyObject *)leaftype)) { PyErr_SetString(PyExc_TypeError, "No firstbucket in non-empty BTree"); return -1; } self->firstbucket = BUCKET(firstbucket); Py_INCREF(firstbucket); #ifndef PERSISTENT /* firstbucket is also the child of some BTree node, but that node may * be a ghost if persistence is enabled. */ assert(Py_REFCNT(self->firstbucket) > 1); #endif self->len = len; return 0; } static PyObject * BTree_setstate(BTree *self, PyObject *arg) { int r; PER_PREVENT_DEACTIVATION(self); r = _BTree_setstate(self, arg, 0); PER_UNUSE(self); if (r < 0) return NULL; Py_INCREF(Py_None); return Py_None; } #ifdef PERSISTENT /* Recognize the special cases of a BTree that's empty or contains a single * bucket. In the former case, return a borrowed reference to Py_None. * In this single-bucket case, the bucket state is embedded directly in the * BTree state, like so: * * ( * ( * thebucket.__getstate__(), * ), * ) * * When this obtains, return a borrowed reference to thebucket.__getstate__(). * Else return NULL with an exception set. The exception should always be * ConflictError then, but may be TypeError if the state makes no sense at all * for a BTree (corrupted or hostile state). */ PyObject * get_bucket_state(PyObject *t) { if (t == Py_None) return Py_None; /* an empty BTree */ if (! PyTuple_Check(t)) { PyErr_SetString(PyExc_TypeError, "_p_resolveConflict: expected tuple or None for state"); return NULL; } if (PyTuple_GET_SIZE(t) == 2) { /* A non-degenerate BTree. */ return merge_error(-1, -1, -1, 11); } /* We're in the one-bucket case. */ if (PyTuple_GET_SIZE(t) != 1) { PyErr_SetString(PyExc_TypeError, "_p_resolveConflict: expected 1- or 2-tuple for state"); return NULL; } t = PyTuple_GET_ITEM(t, 0); if (! PyTuple_Check(t) || PyTuple_GET_SIZE(t) != 1) { PyErr_SetString(PyExc_TypeError, "_p_resolveConflict: expected 1-tuple containing " "bucket state"); return NULL; } t = PyTuple_GET_ITEM(t, 0); if (! PyTuple_Check(t)) { PyErr_SetString(PyExc_TypeError, "_p_resolveConflict: expected tuple for bucket state"); return NULL; } return t; } /* Tricky. The only kind of BTree conflict we can actually potentially * resolve is the special case of a BTree containing a single bucket, * in which case this becomes a fancy way of calling the bucket conflict * resolution code. */ static PyObject * BTree__p_resolveConflict(BTree *self, PyObject *args) { PyObject *s[3]; PyObject *x, *y, *z; if (!PyArg_ParseTuple(args, "OOO", &x, &y, &z)) return NULL; s[0] = get_bucket_state(x); if (s[0] == NULL) return NULL; s[1] = get_bucket_state(y); if (s[1] == NULL) return NULL; s[2] = get_bucket_state(z); if (s[2] == NULL) return NULL; if (PyObject_IsInstance((PyObject *)self, (PyObject *)&BTreeType)) x = _bucket__p_resolveConflict(OBJECT(&BucketType), s); else x = _bucket__p_resolveConflict(OBJECT(&SetType), s); if (x == NULL) return NULL; return Py_BuildValue("((N))", x); } #endif /* BTree_findRangeEnd -- Find one end, expressed as a bucket and position, for a range search. If low, return bucket and index of the smallest item >= key, otherwise return bucket and index of the largest item <= key. If exclude_equal, exact matches aren't acceptable; if one is found, move right if low, or left if !low (this is for range searches exclusive of an endpoint). Return: -1 Error; offset and bucket unchanged 0 Not found; offset and bucket unchanged 1 Correct bucket and offset stored; the caller owns a new reference to the bucket. Internal: We do binary searches in BTree nodes downward, at each step following C(i) where K(i) <= key < K(i+1). As always, K(i) <= C(i) < K(i+1) too. (See Maintainer.txt for the meaning of that notation.) That eventually leads to a bucket where we do Bucket_findRangeEnd. That usually works, but there are two cases where it can fail to find the correct answer: 1. On a low search, we find a bucket with keys >= K(i), but that doesn't imply there are keys in the bucket >= key. For example, suppose a bucket has keys in 1..100, its successor's keys are in 200..300, and we're doing a low search on 150. We'll end up in the first bucket, but there are no keys >= 150 in it. K(i+1) > key, though, and all the keys in C(i+1) >= K(i+1) > key, so the first key in the next bucket (if any) is the correct result. This is easy to find by following the bucket 'next' pointer. 2. On a high search, again that the keys in the bucket are >= K(i) doesn't imply that any key in the bucket is <= key, but it's harder for this to fail (and an earlier version of this routine didn't catch it): if K(i) itself is in the bucket, it works (then K(i) <= key is *a* key in the bucket that's in the desired range). But when keys get deleted from buckets, they aren't also deleted from BTree nodes, so there's no guarantee that K(i) is in the bucket. For example, delete the smallest key S from some bucket, and S remains in the interior BTree nodes. Do a high search for S, and the BTree nodes direct the search to the bucket S used to be in, but all keys remaining in that bucket are > S. The largest key in the *preceding* bucket (if any) is < K(i), though, and K(i) <= key, so the largest key in the preceding bucket is < key and so is the proper result. This is harder to get at efficiently, as buckets are linked only in the increasing direction. While we're searching downward, deepest_smaller is set to the node deepest in the tree where we *could* have gone to the left of C(i). The rightmost bucket in deepest_smaller's subtree is the bucket preceding the bucket we find at first. This is clumsy to get at, but efficient. */ static int BTree_findRangeEnd(BTree *self, PyObject *keyarg, int low, int exclude_equal, Bucket **bucket, int *offset) { Sized *deepest_smaller = NULL; /* last possibility to move left */ int deepest_smaller_is_btree = 0; /* Boolean; if false, it's a bucket */ Bucket *pbucket; int self_got_rebound = 0; /* Boolean; when true, deactivate self */ int result = -1; /* Until proven innocent */ int i; KEY_TYPE key; int copied = 1; COPY_KEY_FROM_ARG(key, keyarg, copied); UNLESS (copied) return -1; /* We don't need to: PER_USE_OR_RETURN(self, -1); because the caller does. */ UNLESS (self->data && self->len) return 0; /* Search downward until hitting a bucket, stored in pbucket. */ for (;;) { Sized *pchild; int pchild_is_btree; BTREE_SEARCH(i, self, key, goto Done); pchild = self->data[i].child; pchild_is_btree = SameType_Check(self, pchild); if (i) { deepest_smaller = self->data[i-1].child; deepest_smaller_is_btree = pchild_is_btree; } if (pchild_is_btree) { if (self_got_rebound) { PER_UNUSE(self); } self = BTREE(pchild); self_got_rebound = 1; PER_USE_OR_RETURN(self, -1); } else { pbucket = BUCKET(pchild); break; } } /* Search the bucket for a suitable key. */ i = Bucket_findRangeEnd(pbucket, keyarg, low, exclude_equal, offset); if (i < 0) goto Done; if (i > 0) { Py_INCREF(pbucket); *bucket = pbucket; result = 1; goto Done; } /* This may be one of the two difficult cases detailed in the comments. */ if (low) { Bucket *next; UNLESS(PER_USE(pbucket)) goto Done; next = pbucket->next; if (next) { result = 1; Py_INCREF(next); *bucket = next; *offset = 0; } else result = 0; PER_UNUSE(pbucket); } /* High-end search: if it's possible to go left, do so. */ else if (deepest_smaller) { if (deepest_smaller_is_btree) { UNLESS(PER_USE(deepest_smaller)) goto Done; /* We own the reference this returns. */ pbucket = BTree_lastBucket(BTREE(deepest_smaller)); PER_UNUSE(deepest_smaller); if (pbucket == NULL) goto Done; /* error */ } else { pbucket = BUCKET(deepest_smaller); Py_INCREF(pbucket); } UNLESS(PER_USE(pbucket)) goto Done; result = 1; *bucket = pbucket; /* transfer ownership to caller */ *offset = pbucket->len - 1; PER_UNUSE(pbucket); } else result = 0; /* simply not found */ Done: if (self_got_rebound) { PER_UNUSE(self); } return result; } static PyObject * BTree_maxminKey(BTree *self, PyObject *args, int min) { PyObject *key=0; Bucket *bucket = NULL; int offset, rc; int empty_tree = 1; UNLESS (PyArg_ParseTuple(args, "|O", &key)) return NULL; UNLESS (PER_USE(self)) return NULL; UNLESS (self->data && self->len) goto empty; /* Find the range */ if (key && key != Py_None) { if ((rc = BTree_findRangeEnd(self, key, min, 0, &bucket, &offset)) <= 0) { if (rc < 0) goto err; empty_tree = 0; goto empty; } PER_UNUSE(self); UNLESS (PER_USE(bucket)) { Py_DECREF(bucket); return NULL; } } else if (min) { bucket = self->firstbucket; PER_UNUSE(self); PER_USE_OR_RETURN(bucket, NULL); Py_INCREF(bucket); offset = 0; } else { bucket = BTree_lastBucket(self); PER_UNUSE(self); UNLESS (PER_USE(bucket)) { Py_DECREF(bucket); return NULL; } assert(bucket->len); offset = bucket->len - 1; } COPY_KEY_TO_OBJECT(key, bucket->keys[offset]); PER_UNUSE(bucket); Py_DECREF(bucket); return key; empty: PyErr_SetString(PyExc_ValueError, empty_tree ? "empty tree" : "no key satisfies the conditions"); err: PER_UNUSE(self); if (bucket) { PER_UNUSE(bucket); Py_DECREF(bucket); } return NULL; } static PyObject * BTree_minKey(BTree *self, PyObject *args) { return BTree_maxminKey(self, args, 1); } static PyObject * BTree_maxKey(BTree *self, PyObject *args) { return BTree_maxminKey(self, args, 0); } /* ** BTree_rangeSearch ** ** Generates a BTreeItems object based on the two indexes passed in, ** being the range between them. ** */ static PyObject * BTree_rangeSearch(BTree *self, PyObject *args, PyObject *kw, char type) { PyObject *min = Py_None; PyObject *max = Py_None; int excludemin = 0; int excludemax = 0; int rc; Bucket *lowbucket = NULL; Bucket *highbucket = NULL; int lowoffset; int highoffset; PyObject *result; if (args) { if (! PyArg_ParseTupleAndKeywords(args, kw, "|OOii", search_keywords, &min, &max, &excludemin, &excludemax)) return NULL; } UNLESS (PER_USE(self)) return NULL; UNLESS (self->data && self->len) goto empty; /* Find the low range */ if (min != Py_None) { if ((rc = BTree_findRangeEnd(self, min, 1, excludemin, &lowbucket, &lowoffset)) <= 0) { if (rc < 0) goto err; goto empty; } } else { lowbucket = self->firstbucket; lowoffset = 0; if (excludemin) { int bucketlen; UNLESS (PER_USE(lowbucket)) goto err; bucketlen = lowbucket->len; PER_UNUSE(lowbucket); if (bucketlen > 1) lowoffset = 1; else if (self->len < 2) goto empty; else { /* move to first item in next bucket */ Bucket *next; UNLESS (PER_USE(lowbucket)) goto err; next = lowbucket->next; PER_UNUSE(lowbucket); assert(next != NULL); lowbucket = next; /* and lowoffset is still 0 */ assert(lowoffset == 0); } } Py_INCREF(lowbucket); } /* Find the high range */ if (max != Py_None) { if ((rc = BTree_findRangeEnd(self, max, 0, excludemax, &highbucket, &highoffset)) <= 0) { Py_DECREF(lowbucket); if (rc < 0) goto err; goto empty; } } else { int bucketlen; highbucket = BTree_lastBucket(self); assert(highbucket != NULL); /* we know self isn't empty */ UNLESS (PER_USE(highbucket)) goto err_and_decref_buckets; bucketlen = highbucket->len; PER_UNUSE(highbucket); highoffset = bucketlen - 1; if (excludemax) { if (highoffset > 0) --highoffset; else if (self->len < 2) goto empty_and_decref_buckets; else /* move to last item of preceding bucket */ { int status; assert(highbucket != self->firstbucket); Py_DECREF(highbucket); status = PreviousBucket(&highbucket, self->firstbucket); if (status < 0) { Py_DECREF(lowbucket); goto err; } assert(status > 0); Py_INCREF(highbucket); UNLESS (PER_USE(highbucket)) goto err_and_decref_buckets; highoffset = highbucket->len - 1; PER_UNUSE(highbucket); } } assert(highoffset >= 0); } /* It's still possible that the range is empty, even if min < max. For * example, if min=3 and max=4, and 3 and 4 aren't in the BTree, but 2 and * 5 are, then the low position points to the 5 now and the high position * points to the 2 now. They're not necessarily even in the same bucket, * so there's no trick we can play with pointer compares to get out * cheap in general. */ if (lowbucket == highbucket && lowoffset > highoffset) goto empty_and_decref_buckets; /* definitely empty */ /* The buckets differ, or they're the same and the offsets show a non- * empty range. */ if (min != Py_None && max != Py_None && /* both args user-supplied */ lowbucket != highbucket) /* and different buckets */ { KEY_TYPE first; KEY_TYPE last; int cmp; /* Have to check the hard way: see how the endpoints compare. */ UNLESS (PER_USE(lowbucket)) goto err_and_decref_buckets; COPY_KEY(first, lowbucket->keys[lowoffset]); PER_UNUSE(lowbucket); UNLESS (PER_USE(highbucket)) goto err_and_decref_buckets; COPY_KEY(last, highbucket->keys[highoffset]); PER_UNUSE(highbucket); TEST_KEY_SET_OR(cmp, first, last) goto err_and_decref_buckets; if (cmp > 0) goto empty_and_decref_buckets; } PER_UNUSE(self); result = newBTreeItems(type, lowbucket, lowoffset, highbucket, highoffset); Py_DECREF(lowbucket); Py_DECREF(highbucket); return result; err_and_decref_buckets: Py_DECREF(lowbucket); Py_DECREF(highbucket); err: PER_UNUSE(self); return NULL; empty_and_decref_buckets: Py_DECREF(lowbucket); Py_DECREF(highbucket); empty: PER_UNUSE(self); return newBTreeItems(type, 0, 0, 0, 0); } /* ** BTree_keys */ static PyObject * BTree_keys(BTree *self, PyObject *args, PyObject *kw) { return BTree_rangeSearch(self, args, kw, 'k'); } /* ** BTree_values */ static PyObject * BTree_values(BTree *self, PyObject *args, PyObject *kw) { return BTree_rangeSearch(self, args, kw, 'v'); } /* ** BTree_items */ static PyObject * BTree_items(BTree *self, PyObject *args, PyObject *kw) { return BTree_rangeSearch(self, args, kw, 'i'); } static PyObject * BTree_byValue(BTree *self, PyObject *omin) { PyObject *r=0, *o=0, *item=0; VALUE_TYPE min; VALUE_TYPE v; int copied=1; SetIteration it = {0, 0, 1}; UNLESS (PER_USE(self)) return NULL; COPY_VALUE_FROM_ARG(min, omin, copied); UNLESS(copied) return NULL; UNLESS (r=PyList_New(0)) goto err; it.set=BTree_rangeSearch(self, NULL, NULL, 'i'); UNLESS(it.set) goto err; if (nextBTreeItems(&it) < 0) goto err; while (it.position >= 0) { if (TEST_VALUE(it.value, min) >= 0) { UNLESS (item = PyTuple_New(2)) goto err; COPY_KEY_TO_OBJECT(o, it.key); UNLESS (o) goto err; PyTuple_SET_ITEM(item, 1, o); COPY_VALUE(v, it.value); NORMALIZE_VALUE(v, min); COPY_VALUE_TO_OBJECT(o, v); DECREF_VALUE(v); UNLESS (o) goto err; PyTuple_SET_ITEM(item, 0, o); if (PyList_Append(r, item) < 0) goto err; Py_DECREF(item); item = 0; } if (nextBTreeItems(&it) < 0) goto err; } item=PyObject_GetAttr(r,sort_str); UNLESS (item) goto err; ASSIGN(item, PyObject_CallObject(item, NULL)); UNLESS (item) goto err; ASSIGN(item, PyObject_GetAttr(r, reverse_str)); UNLESS (item) goto err; ASSIGN(item, PyObject_CallObject(item, NULL)); UNLESS (item) goto err; Py_DECREF(item); finiSetIteration(&it); PER_UNUSE(self); return r; err: PER_UNUSE(self); Py_XDECREF(r); finiSetIteration(&it); Py_XDECREF(item); return NULL; } /* ** BTree_getm */ static PyObject * BTree_getm(BTree *self, PyObject *args) { PyObject *key, *d=Py_None, *r; UNLESS (PyArg_ParseTuple(args, "O|O", &key, &d)) return NULL; if ((r=_BTree_get(self, key, 0, _BGET_REPLACE_TYPE_ERROR))) return r; UNLESS (BTree_ShouldSuppressKeyError()) return NULL; PyErr_Clear(); Py_INCREF(d); return d; } static PyObject * BTree_setdefault(BTree *self, PyObject *args) { PyObject *key; PyObject *failobj; /* default */ PyObject *value; /* return value */ if (! PyArg_UnpackTuple(args, "setdefault", 2, 2, &key, &failobj)) return NULL; value = _BTree_get(self, key, 0, _BGET_ALLOW_TYPE_ERROR); if (value != NULL) return value; /* The key isn't in the tree. If that's not due to a KeyError exception, * pass back the unexpected exception. */ if (! BTree_ShouldSuppressKeyError()) return NULL; PyErr_Clear(); /* Associate `key` with `failobj` in the tree, and return `failobj`. */ value = failobj; if (_BTree_set(self, key, failobj, 0, 0) < 0) value = NULL; Py_XINCREF(value); return value; } /* forward declaration */ static Py_ssize_t BTree_length_or_nonzero(BTree *self, int nonzero); static PyObject * BTree_pop(BTree *self, PyObject *args) { PyObject *key; PyObject *failobj = NULL; /* default */ PyObject *value; /* return value */ if (! PyArg_UnpackTuple(args, "pop", 1, 2, &key, &failobj)) return NULL; value = _BTree_get(self, key, 0, _BGET_ALLOW_TYPE_ERROR); if (value != NULL) { /* Delete key and associated value. */ if (_BTree_set(self, key, NULL, 0, 0) < 0) { Py_DECREF(value); return NULL;; } return value; } /* The key isn't in the tree. If that's not due to a KeyError exception, * pass back the unexpected exception. */ if (! BTree_ShouldSuppressKeyError()) return NULL; if (failobj != NULL) { /* Clear the KeyError and return the explicit default. */ PyErr_Clear(); Py_INCREF(failobj); return failobj; } /* No default given. The only difference in this case is the error * message, which depends on whether the tree is empty. */ if (BTree_length_or_nonzero(self, 1) == 0) /* tree is empty */ PyErr_SetString(PyExc_KeyError, "pop(): BTree is empty"); return NULL; } static PyObject* BTree_popitem(BTree* self, PyObject* args) { PyObject* key = NULL; PyObject* pop_args = NULL; PyObject* result_val = NULL; PyObject* result = NULL; if (PyTuple_Size(args) != 0) { PyErr_SetString(PyExc_TypeError, "popitem(): Takes no arguments."); return NULL; } key = BTree_minKey(self, args); /* reuse existing empty tuple. */ if (!key) { PyErr_Clear(); PyErr_SetString(PyExc_KeyError, "popitem(): empty BTree."); return NULL; } pop_args = PyTuple_Pack(1, key); if (pop_args) { result_val = BTree_pop(self, pop_args); Py_DECREF(pop_args); if (result_val) { result = PyTuple_Pack(2, key, result_val); Py_DECREF(result_val); } } Py_DECREF(key); return result; } /* Search BTree self for key. This is the sq_contains slot of the * PySequenceMethods. * * Return: * -1 error * 0 not found * 1 found */ static int BTree_contains(BTree *self, PyObject *key) { PyObject *asobj = _BTree_get(self, key, 1, _BGET_REPLACE_TYPE_ERROR); int result = -1; if (asobj != NULL) { result = INT_AS_LONG(asobj) ? 1 : 0; Py_DECREF(asobj); } else if (BTree_ShouldSuppressKeyError()) { PyErr_Clear(); result = 0; } return result; } static PyObject * BTree_has_key(BTree *self, PyObject *key) { int result = -1; result = BTree_contains(self, key); if (result == -1) { return NULL; } if (result) Py_RETURN_TRUE; Py_RETURN_FALSE; } static PyObject * BTree_addUnique(BTree *self, PyObject *args) { int grew; PyObject *key, *v; UNLESS (PyArg_ParseTuple(args, "OO", &key, &v)) return NULL; if ((grew=_BTree_set(self, key, v, 1, 0)) < 0) return NULL; return INT_FROM_LONG(grew); } /**************************************************************************/ /* Iterator support. */ /* A helper to build all the iterators for BTrees and TreeSets. * If args is NULL, the iterator spans the entire structure. Else it's an * argument tuple, with optional low and high arguments. * kind is 'k', 'v' or 'i'. * Returns a BTreeIter object, or NULL if error. */ static PyObject * buildBTreeIter(BTree *self, PyObject *args, PyObject *kw, char kind) { BTreeIter *result = NULL; BTreeItems *items = (BTreeItems *)BTree_rangeSearch(self, args, kw, kind); if (items) { result = BTreeIter_new(items); Py_DECREF(items); } return (PyObject *)result; } /* The implementation of iter(BTree_or_TreeSet); the BTree tp_iter slot. */ static PyObject * BTree_getiter(BTree *self) { return buildBTreeIter(self, NULL, NULL, 'k'); } /* The implementation of BTree.iterkeys(). */ static PyObject * BTree_iterkeys(BTree *self, PyObject *args, PyObject *kw) { return buildBTreeIter(self, args, kw, 'k'); } /* The implementation of BTree.itervalues(). */ static PyObject * BTree_itervalues(BTree *self, PyObject *args, PyObject *kw) { return buildBTreeIter(self, args, kw, 'v'); } /* The implementation of BTree.iteritems(). */ static PyObject * BTree_iteritems(BTree *self, PyObject *args, PyObject *kw) { return buildBTreeIter(self, args, kw, 'i'); } /* End of iterator support. */ /* Caution: Even though the _firstbucket attribute is read-only, a program could do arbitrary damage to the btree internals. For example, it could call clear() on a bucket inside a BTree. We need to decide if the convenience for inspecting BTrees is worth the risk. */ static struct PyMemberDef BTree_members[] = { {"_firstbucket", T_OBJECT, offsetof(BTree, firstbucket), READONLY}, {NULL} }; static struct PyMethodDef BTree_methods[] = { {"__getstate__", (PyCFunction) BTree_getstate, METH_NOARGS, "__getstate__() -> state\n\n" "Return the picklable state of the BTree."}, {"__setstate__", (PyCFunction) BTree_setstate, METH_O, "__setstate__(state)\n\n" "Set the state of the BTree."}, {"has_key", (PyCFunction) BTree_has_key, METH_O, "has_key(key)\n\n" "Return true if the BTree contains the given key."}, {"keys", (PyCFunction) BTree_keys, METH_VARARGS | METH_KEYWORDS, "keys([min, max]) -> list of keys\n\n" "Returns the keys of the BTree. If min and max are supplied, only\n" "keys greater than min and less than max are returned."}, {"values", (PyCFunction) BTree_values, METH_VARARGS | METH_KEYWORDS, "values([min, max]) -> list of values\n\n" "Returns the values of the BTree. If min and max are supplied, only\n" "values corresponding to keys greater than min and less than max are\n" "returned."}, {"items", (PyCFunction) BTree_items, METH_VARARGS | METH_KEYWORDS, "items([min, max]) -> -- list of key, value pairs\n\n" "Returns the items of the BTree. If min and max are supplied, only\n" "items with keys greater than min and less than max are returned."}, {"byValue", (PyCFunction) BTree_byValue, METH_O, "byValue(min) -> list of value, key pairs\n\n" "Returns list of value, key pairs where the value is >= min. The\n" "list is sorted by value. Note that items() returns keys in the\n" "opposite order."}, {"get", (PyCFunction) BTree_getm, METH_VARARGS, "get(key[, default=None]) -> Value for key or default\n\n" "Return the value or the default if the key is not found."}, {"setdefault", (PyCFunction) BTree_setdefault, METH_VARARGS, "D.setdefault(k, d) -> D.get(k, d), also set D[k]=d if k not in D.\n\n" "Return the value like get() except that if key is missing, d is both\n" "returned and inserted into the BTree as the value of k."}, {"pop", (PyCFunction) BTree_pop, METH_VARARGS, "D.pop(k[, d]) -> v, remove key and return the corresponding value.\n\n" "If key is not found, d is returned if given, otherwise KeyError\n" "is raised."}, {"popitem", (PyCFunction)BTree_popitem, METH_VARARGS, "D.popitem() -> (k, v), remove and return some (key, value) pair\n" "as a 2-tuple; but raise KeyError if D is empty."}, {"maxKey", (PyCFunction) BTree_maxKey, METH_VARARGS, "maxKey([max]) -> key\n\n" "Return the largest key in the BTree. If max is specified, return\n" "the largest key <= max."}, {"minKey", (PyCFunction) BTree_minKey, METH_VARARGS, "minKey([mi]) -> key\n\n" "Return the smallest key in the BTree. If min is specified, return\n" "the smallest key >= min."}, {"clear", (PyCFunction) BTree_clear, METH_NOARGS, "clear()\n\nRemove all of the items from the BTree."}, {"insert", (PyCFunction)BTree_addUnique, METH_VARARGS, "insert(key, value) -> 0 or 1\n\n" "Add an item if the key is not already used. Return 1 if the item was\n" "added, or 0 otherwise."}, {"update", (PyCFunction) Mapping_update, METH_O, "update(collection)\n\n Add the items from the given collection."}, {"iterkeys", (PyCFunction) BTree_iterkeys, METH_VARARGS | METH_KEYWORDS, "B.iterkeys([min[,max]]) -> an iterator over the keys of B"}, {"itervalues", (PyCFunction) BTree_itervalues, METH_VARARGS | METH_KEYWORDS, "B.itervalues([min[,max]]) -> an iterator over the values of B"}, {"iteritems", (PyCFunction) BTree_iteritems, METH_VARARGS | METH_KEYWORDS, "B.iteritems([min[,max]]) -> an iterator over the (key, value) " "items of B"}, {"_check", (PyCFunction) BTree_check, METH_NOARGS, "Perform sanity check on BTree, and raise exception if flawed."}, #ifdef PERSISTENT {"_p_resolveConflict", (PyCFunction) BTree__p_resolveConflict, METH_VARARGS, "_p_resolveConflict() -- Reinitialize from a newly created copy"}, {"_p_deactivate", (PyCFunction) BTree__p_deactivate, METH_VARARGS | METH_KEYWORDS, "_p_deactivate()\n\nReinitialize from a newly created copy."}, #endif {NULL, NULL} }; static int BTree_init(PyObject *self, PyObject *args, PyObject *kwds) { PyObject *v = NULL; BTREE(self)->max_leaf_size = 0; BTREE(self)->max_internal_size = 0; if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "BTree", &v)) return -1; if (v) return update_from_seq(self, v); else return 0; } static void BTree_dealloc(BTree *self) { PyObject_GC_UnTrack((PyObject *)self); if (self->state != cPersistent_GHOST_STATE) { _BTree_clear(self); } cPersistenceCAPI->pertype->tp_dealloc((PyObject *)self); } static int BTree_traverse(BTree *self, visitproc visit, void *arg) { int err = 0; int i, len; #define VISIT(SLOT) \ if (SLOT) { \ err = visit((PyObject *)(SLOT), arg); \ if (err) \ goto Done; \ } if (Py_TYPE(self) == &BTreeType) assert(Py_TYPE(self)->tp_dictoffset == 0); /* Call our base type's traverse function. Because BTrees are * subclasses of Peristent, there must be one. */ err = cPersistenceCAPI->pertype->tp_traverse((PyObject *)self, visit, arg); if (err) goto Done; /* If this is registered with the persistence system, cleaning up cycles * is the database's problem. It would be horrid to unghostify BTree * nodes here just to chase pointers every time gc runs. */ if (self->state == cPersistent_GHOST_STATE) goto Done; len = self->len; #ifdef KEY_TYPE_IS_PYOBJECT /* Keys are Python objects so need to be traversed. Note that the * key 0 slot is unused and should not be traversed. */ for (i = 1; i < len; i++) VISIT(self->data[i].key); #endif /* Children are always pointers, and child 0 is legit. */ for (i = 0; i < len; i++) VISIT(self->data[i].child); VISIT(self->firstbucket); Done: return err; #undef VISIT } static int BTree_tp_clear(BTree *self) { if (self->state != cPersistent_GHOST_STATE) _BTree_clear(self); return 0; } /* * Return the number of elements in a BTree. nonzero is a Boolean, and * when true requests just a non-empty/empty result. Testing for emptiness * is efficient (constant-time). Getting the true length takes time * proportional to the number of leaves (buckets). * * Return: * When nonzero true: * -1 error * 0 empty * 1 not empty * When nonzero false (possibly expensive!): * -1 error * >= 0 number of elements. */ static Py_ssize_t BTree_length_or_nonzero(BTree *self, int nonzero) { int result; Bucket *b; Bucket *next; PER_USE_OR_RETURN(self, -1); b = self->firstbucket; PER_UNUSE(self); if (nonzero) return b != NULL; result = 0; while (b) { PER_USE_OR_RETURN(b, -1); result += b->len; next = b->next; PER_UNUSE(b); b = next; } return result; } static Py_ssize_t BTree_length(BTree *self) { return BTree_length_or_nonzero(self, 0); } static PyMappingMethods BTree_as_mapping = { (lenfunc)BTree_length, /* mp_length */ (binaryfunc)BTree_get, /* mp_subscript */ (objobjargproc)BTree_setitem, /* mp_ass_subscript */ }; static PySequenceMethods BTree_as_sequence = { (lenfunc)0, /* sq_length */ (binaryfunc)0, /* sq_concat */ (ssizeargfunc)0, /* sq_repeat */ (ssizeargfunc)0, /* sq_item */ (ssizessizeargfunc)0, /* sq_slice */ (ssizeobjargproc)0, /* sq_ass_item */ (ssizessizeobjargproc)0, /* sq_ass_slice */ (objobjproc)BTree_contains, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; static Py_ssize_t BTree_nonzero(BTree *self) { return BTree_length_or_nonzero(self, 1); } static PyNumberMethods BTree_as_number_for_nonzero = { 0, /* nb_add */ bucket_sub, /* nb_subtract */ 0, /* nb_multiply */ 0, /* nb_remainder */ 0, /* nb_divmod */ 0, /* nb_power */ 0, /* nb_negative */ 0, /* nb_positive */ 0, /* nb_absolute */ (inquiry)BTree_nonzero, /* nb_nonzero */ (unaryfunc)0, /* nb_invert */ (binaryfunc)0, /* nb_lshift */ (binaryfunc)0, /* nb_rshift */ bucket_and, /* nb_and */ (binaryfunc)0, /* nb_xor */ bucket_or, /* nb_or */ }; static PyObject* BTreeType_setattro_allowed_names; /* initialized in module */ static int BTreeType_setattro(PyTypeObject* type, PyObject* name, PyObject* value) { /* type.tp_setattro prohibits setting any attributes on a built-in type, so we need to use our own (metaclass) type to handle it. The set of allowable values needs to be carefully controlled (e.g., setting methods would be bad). Alternately, we could use heap-allocated types when they are supported an all the versions we care about, because those do allow setting attributes. */ int allowed; allowed = PySequence_Contains(BTreeType_setattro_allowed_names, name); if (allowed < 0) { return -1; } if (allowed) { PyDict_SetItem(type->tp_dict, name, value); PyType_Modified(type); if (PyErr_Occurred()) { return -1; } return 0; } return PyType_Type.tp_setattro((PyObject*)type, name, value); } static PyTypeObject BTreeTypeType = { PyVarObject_HEAD_INIT(NULL, 0) MODULE_NAME MOD_NAME_PREFIX "BTreeType", 0, /* tp_basicsize */ 0, /* tp_itemsize */ 0, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ (setattrofunc)BTreeType_setattro, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ 0, /* tp_members */ }; static PyTypeObject BTreeType = { PyVarObject_HEAD_INIT(&BTreeTypeType, 0) MODULE_NAME MOD_NAME_PREFIX "BTree", /* tp_name */ sizeof(BTree), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)BTree_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ &BTree_as_number_for_nonzero, /* tp_as_number */ &BTree_as_sequence, /* tp_as_sequence */ &BTree_as_mapping, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ 0, /* tp_doc */ (traverseproc)BTree_traverse, /* tp_traverse */ (inquiry)BTree_tp_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)BTree_getiter, /* tp_iter */ 0, /* tp_iternext */ BTree_methods, /* tp_methods */ BTree_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ BTree_init, /* tp_init */ 0, /* tp_alloc */ 0, /*PyType_GenericNew,*/ /* tp_new */ }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/BucketTemplate.c0000644000076500000240000015322314355020716016466 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #include "SetOpTemplate.h" #define BUCKETTEMPLATE_C "$Id$\n" /* Use BUCKET_SEARCH to find the index at which a key belongs. * INDEX An int lvalue to hold the index i such that KEY belongs at * SELF->keys[i]. Note that this will equal SELF->len if KEY * is larger than the bucket's largest key. Else it's the * smallest i such that SELF->keys[i] >= KEY. * ABSENT An int lvalue to hold a Boolean result, true (!= 0) if the * key is absent, false (== 0) if the key is at INDEX. * SELF A pointer to a Bucket node. * KEY The key you're looking for, of type KEY_TYPE. * ONERROR What to do if key comparison raises an exception; for example, * perhaps 'return NULL'. * * See Maintainer.txt for discussion: this is optimized in subtle ways. * It's recommended that you call this at the start of a routine, waiting * to check for self->len == 0 after (if an empty bucket is special in * context; INDEX becomes 0 and ABSENT becomes true if this macro is run * with an empty SELF, and that may be all the invoker needs to know). */ #define BUCKET_SEARCH(INDEX, ABSENT, SELF, KEY, ONERROR) { \ int _lo = 0; \ int _hi = (SELF)->len; \ int _i; \ int _cmp = 1; \ for (_i = _hi >> 1; _lo < _hi; _i = (_lo + _hi) >> 1) { \ TEST_KEY_SET_OR(_cmp, (SELF)->keys[_i], (KEY)) \ ONERROR; \ if (_cmp < 0) _lo = _i + 1; \ else if (_cmp == 0) break; \ else _hi = _i; \ } \ (INDEX) = _i; \ (ABSENT) = _cmp; \ } /* ** _bucket_get ** ** Search a bucket for a given key. ** ** Arguments ** self The bucket ** keyarg The key to look for ** has_key Boolean; if true, return a true/false result; else return ** the value associated with the key. When true, ignore the TypeError from ** a key conversion issue, instead ** transforming it into a KeyError. ** ** Return ** If has_key: ** Returns the Python int 0 if the key is absent, else returns ** has_key itself as a Python int. A BTree caller generally passes ** the depth of the bucket for has_key, so a true result returns ** the bucket depth then. ** Note that has_key should be true when searching set buckets. ** If not has_key: ** If the key is present, returns the associated value, and the ** caller owns the reference. Else returns NULL and sets KeyError. ** Whether or not has_key: ** If a comparison sets an exception, returns NULL. */ static PyObject * _bucket_get(Bucket *self, PyObject *keyarg, int has_key) { int i, cmp; KEY_TYPE key; PyObject *r = NULL; int copied = 1; COPY_KEY_FROM_ARG(key, keyarg, copied); UNLESS (copied) { if (has_key && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); PyErr_SetObject(PyExc_KeyError, keyarg); } return NULL; } UNLESS (PER_USE(self)) return NULL; BUCKET_SEARCH(i, cmp, self, key, goto Done); if (has_key) r = INT_FROM_LONG(cmp ? 0 : has_key); else { if (cmp == 0) { COPY_VALUE_TO_OBJECT(r, self->values[i]); } else PyErr_SetObject(PyExc_KeyError, keyarg); } Done: PER_UNUSE(self); return r; } static PyObject * bucket_getitem(Bucket *self, PyObject *key) { PyObject* result; result = _bucket_get(self, key, 0); if (result == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); PyErr_SetObject(PyExc_KeyError, key); } return result; } /* ** Bucket_grow ** ** Resize a bucket. ** ** Arguments: self The bucket. ** newsize The new maximum capacity. If < 0, double the ** current size unless the bucket is currently empty, ** in which case use MIN_BUCKET_ALLOC. ** noval Boolean; if true, allocate only key space and not ** value space ** ** Returns: -1 on error, and MemoryError exception is set ** 0 on success */ static int Bucket_grow(Bucket *self, int newsize, int noval) { KEY_TYPE *keys; VALUE_TYPE *values; if (self->size) { if (newsize < 0) newsize = self->size * 2; if (newsize < 0) /* int overflow */ goto Overflow; UNLESS (keys = BTree_Realloc(self->keys, sizeof(KEY_TYPE) * newsize)) return -1; UNLESS (noval) { values = BTree_Realloc(self->values, sizeof(VALUE_TYPE) * newsize); if (values == NULL) { free(keys); return -1; } self->values = values; } self->keys = keys; } else { if (newsize < 0) newsize = MIN_BUCKET_ALLOC; UNLESS (self->keys = BTree_Malloc(sizeof(KEY_TYPE) * newsize)) return -1; UNLESS (noval) { self->values = BTree_Malloc(sizeof(VALUE_TYPE) * newsize); if (self->values == NULL) { free(self->keys); self->keys = NULL; return -1; } } } self->size = newsize; return 0; Overflow: PyErr_NoMemory(); return -1; } /* So far, bucket_append is called only by multiunion_m(), so is called * only when MULTI_INT_UNION is defined. Flavors of BTree/Bucket that * don't support MULTI_INT_UNION don't call bucket_append (yet), and * gcc complains if bucket_append is compiled in those cases. So only * compile bucket_append if it's going to be used. */ #ifdef MULTI_INT_UNION /* * Append a slice of the "from" bucket to self. * * self Append (at least keys) to this bucket. self must be activated * upon entry, and remains activated at exit. If copyValues * is true, self must be empty or already have a non-NULL values * pointer. self's access and modification times aren't updated. * from The bucket from which to take keys, and possibly values. from * must be activated upon entry, and remains activated at exit. * If copyValues is true, from must have a non-NULL values * pointer. self and from must not be the same. from's access * time isn't updated. * i, n The slice from[i : i+n] is appended to self. Must have * i >= 0, n > 0 and i+n <= from->len. * copyValues Boolean. If true, copy values from the slice as well as keys. * In this case, from must have a non-NULL values pointer, and * self must too (unless self is empty, in which case a values * vector will be allocated for it). * overallocate Boolean. If self doesn't have enough room upon entry to hold * all the appended stuff, then if overallocate is false exactly * enough room will be allocated to hold the new stuff, else if * overallocate is true an excess will be allocated. overallocate * may be a good idea if you expect to append more stuff to self * later; else overallocate should be false. * * CAUTION: If self is empty upon entry (self->size == 0), and copyValues is * false, then no space for values will get allocated. This can be a trap if * the caller intends to copy values itself. * * Return * -1 Error. * 0 OK. */ static int bucket_append(Bucket *self, Bucket *from, int i, int n, int copyValues, int overallocate) { int newlen; assert(self && from && self != from); assert(i >= 0); assert(n > 0); assert(i+n <= from->len); /* Make room. */ newlen = self->len + n; if (newlen > self->size) { int newsize = newlen; if (overallocate) /* boost by 25% -- pretty arbitrary */ newsize += newsize >> 2; if (Bucket_grow(self, newsize, ! copyValues) < 0) return -1; } assert(newlen <= self->size); /* Copy stuff. */ memcpy(self->keys + self->len, from->keys + i, n * sizeof(KEY_TYPE)); if (copyValues) { assert(self->values); assert(from->values); memcpy(self->values + self->len, from->values + i, n * sizeof(VALUE_TYPE)); } self->len = newlen; /* Bump refcounts. */ #ifdef KEY_TYPE_IS_PYOBJECT { int j; PyObject **p = from->keys + i; for (j = 0; j < n; ++j, ++p) { Py_INCREF(*p); } } #endif #ifdef VALUE_TYPE_IS_PYOBJECT if (copyValues) { int j; PyObject **p = from->values + i; for (j = 0; j < n; ++j, ++p) { Py_INCREF(*p); } } #endif return 0; } #endif /* MULTI_INT_UNION */ /* ** _bucket_set: Assign a value to a key in a bucket, delete a key+value ** pair, or just insert a key. ** ** Arguments ** self The bucket ** keyarg The key to look for ** v The value to associate with key; NULL means delete the key. ** If NULL, it's an error (KeyError) if the key isn't present. ** Note that if this is a set bucket, and you want to insert ** a new set element, v must be non-NULL although its exact ** value will be ignored. Passing Py_None is good for this. ** unique Boolean; when true, don't replace the value if the key is ** already present. ** noval Boolean; when true, operate on keys only (ignore values) ** changed ignored on input ** ** Return ** -1 on error ** 0 on success and the # of bucket entries didn't change ** 1 on success and the # of bucket entries did change ** *changed If non-NULL, set to 1 on any mutation of the bucket. */ static int _bucket_set(Bucket *self, PyObject *keyarg, PyObject *v, int unique, int noval, int *changed) { int i, cmp; KEY_TYPE key; /* Subtle: there may or may not be a value. If there is, we need to * check its type early, so that in case of error we can get out before * mutating the bucket. But because value isn't used on all paths, if * we don't initialize value then gcc gives a nuisance complaint that * value may be used initialized (it can't be, but gcc doesn't know * that). So we initialize it. However, VALUE_TYPE can be various types, * including int, PyObject*, and char[6], so it's a puzzle to spell * initialization. It so happens that {0} is a valid initializer for all * these types. */ VALUE_TYPE value = {0}; /* squash nuisance warning */ int result = -1; /* until proven innocent */ int copied = 1; COPY_KEY_FROM_ARG(key, keyarg, copied); UNLESS(copied) return -1; #ifdef KEY_CHECK_ON_SET if (v && !KEY_CHECK_ON_SET(keyarg)) return -1; #endif /* Copy the value early (if needed), so that in case of error a * pile of bucket mutations don't need to be undone. */ if (v && !noval) { COPY_VALUE_FROM_ARG(value, v, copied); UNLESS(copied) return -1; } UNLESS (PER_USE(self)) return -1; BUCKET_SEARCH(i, cmp, self, key, goto Done); if (cmp == 0) { /* The key exists, at index i. */ if (v) { /* The key exists at index i, and there's a new value. * If unique, we're not supposed to replace it. If noval, or this * is a set bucket (self->values is NULL), there's nothing to do. */ if (unique || noval || self->values == NULL) { result = 0; goto Done; } /* The key exists at index i, and we need to replace the value. */ #ifdef VALUE_SAME /* short-circuit if no change */ if (VALUE_SAME(self->values[i], value)) { result = 0; goto Done; } #endif if (changed) *changed = 1; DECREF_VALUE(self->values[i]); COPY_VALUE(self->values[i], value); INCREF_VALUE(self->values[i]); if (PER_CHANGED(self) >= 0) result = 0; goto Done; } /* The key exists at index i, and should be deleted. */ DECREF_KEY(self->keys[i]); self->len--; if (i < self->len) memmove(self->keys + i, self->keys + i+1, sizeof(KEY_TYPE)*(self->len - i)); if (self->values) { DECREF_VALUE(self->values[i]); if (i < self->len) memmove(self->values + i, self->values + i+1, sizeof(VALUE_TYPE)*(self->len - i)); } if (! self->len) { self->size = 0; free(self->keys); self->keys = NULL; if (self->values) { free(self->values); self->values = NULL; } } if (changed) *changed = 1; if (PER_CHANGED(self) >= 0) result = 1; goto Done; } /* The key doesn't exist, and belongs at index i. */ if (!v) { /* Can't delete a non-existent key. */ PyErr_SetObject(PyExc_KeyError, keyarg); goto Done; } /* The key doesn't exist and should be inserted at index i. */ if (self->len == self->size && Bucket_grow(self, -1, noval) < 0) goto Done; if (self->len > i) { memmove(self->keys + i + 1, self->keys + i, sizeof(KEY_TYPE) * (self->len - i)); if (self->values) { memmove(self->values + i + 1, self->values + i, sizeof(VALUE_TYPE) * (self->len - i)); } } COPY_KEY(self->keys[i], key); INCREF_KEY(self->keys[i]); if (! noval) { COPY_VALUE(self->values[i], value); INCREF_VALUE(self->values[i]); } self->len++; if (changed) *changed = 1; if (PER_CHANGED(self) >= 0) result = 1; Done: PER_UNUSE(self); return result; } /* ** bucket_setitem ** ** wrapper for _bucket_set (eliminates +1 return code) ** ** Arguments: self The bucket ** key The key to insert under ** v The value to insert ** ** Returns 0 on success ** -1 on failure */ static int bucket_setitem(Bucket *self, PyObject *key, PyObject *v) { if (_bucket_set(self, key, v, 0, 0, 0) < 0) return -1; return 0; } /** ** Accepts a sequence of 2-tuples, or any object with an items() ** method that returns an iterable object producing 2-tuples. */ static int update_from_seq(PyObject *map, PyObject *seq) { PyObject *iter, *o, *k, *v; int err = -1; /* One path creates a new seq object. The other path has an INCREF of the seq argument. So seq must always be DECREFed on the way out. */ /* Use items() if it's not a sequence. Alas, PySequence_Check() * returns true for a PeristentMapping or PersistentDict, and we * want to use items() in those cases too. */ if (!PySequence_Check(seq) || /* or it "looks like a dict" */ PyObject_HasAttrString(seq, "items")) { PyObject *items; items = PyObject_GetAttrString(seq, "items"); if (items == NULL) return -1; seq = PyObject_CallObject(items, NULL); Py_DECREF(items); if (seq == NULL) return -1; } else Py_INCREF(seq); iter = PyObject_GetIter(seq); if (iter == NULL) goto err; while (1) { o = PyIter_Next(iter); if (o == NULL) { if (PyErr_Occurred()) goto err; else break; } if (!PyTuple_Check(o) || PyTuple_GET_SIZE(o) != 2) { Py_DECREF(o); PyErr_SetString(PyExc_TypeError, "Sequence must contain 2-item tuples"); goto err; } k = PyTuple_GET_ITEM(o, 0); v = PyTuple_GET_ITEM(o, 1); if (PyObject_SetItem(map, k, v) < 0) { Py_DECREF(o); goto err; } Py_DECREF(o); } err = 0; err: Py_DECREF(iter); Py_DECREF(seq); return err; } static PyObject * Mapping_update(PyObject *self, PyObject *seq) { if (update_from_seq(self, seq) < 0) return NULL; Py_INCREF(Py_None); return Py_None; } /* ** bucket_split ** ** Splits one bucket into two ** ** Arguments: self The bucket ** index the index of the key to split at (O.O.B use midpoint) ** next the new bucket to split into ** ** Returns: 0 on success ** -1 on failure */ static int bucket_split(Bucket *self, int index, Bucket *next) { int next_size; ASSERT(self->len > 1, "split of empty bucket", -1); if (index < 0 || index >= self->len) index = self->len / 2; next_size = self->len - index; next->keys = BTree_Malloc(sizeof(KEY_TYPE) * next_size); if (!next->keys) return -1; memcpy(next->keys, self->keys + index, sizeof(KEY_TYPE) * next_size); if (self->values) { next->values = BTree_Malloc(sizeof(VALUE_TYPE) * next_size); if (!next->values) { free(next->keys); next->keys = NULL; return -1; } memcpy(next->values, self->values + index, sizeof(VALUE_TYPE) * next_size); } next->size = next_size; next->len = next_size; self->len = index; next->next = self->next; Py_INCREF(next); self->next = next; if (PER_CHANGED(self) < 0) return -1; return 0; } /* Set self->next to self->next->next, i.e. unlink self's successor from * the chain. * * Return: * -1 error * 0 OK */ static int Bucket_deleteNextBucket(Bucket *self) { int result = -1; /* until proven innocent */ Bucket *successor; PER_USE_OR_RETURN(self, -1); successor = self->next; if (successor) { Bucket *next; /* Before: self -> successor -> next * After: self --------------> next */ UNLESS (PER_USE(successor)) goto Done; next = successor->next; PER_UNUSE(successor); Py_XINCREF(next); /* it may be NULL, of course */ self->next = next; Py_DECREF(successor); if (PER_CHANGED(self) < 0) goto Done; } result = 0; Done: PER_UNUSE(self); return result; } /* Bucket_findRangeEnd -- Find the index of a range endpoint (possibly) contained in a bucket. Arguments: self The bucket keyarg The key to match against low Boolean; true for low end of range, false for high exclude_equal Boolean; if true, don't accept an exact match, and if there is one then move right if low and left if !low. offset The output offset If low true, *offset <- index of the smallest item >= key, if low false the index of the largest item <= key. In either case, if there is no such index, *offset is left alone and 0 is returned. Return: 0 No suitable index exists; *offset has not been changed 1 The correct index was stored into *offset -1 Error Example: Suppose the keys are [2, 4], and exclude_equal is false. Searching for 2 sets *offset to 0 and returns 1 regardless of low. Searching for 4 sets *offset to 1 and returns 1 regardless of low. Searching for 1: If low true, sets *offset to 0, returns 1. If low false, returns 0. Searching for 3: If low true, sets *offset to 1, returns 1. If low false, sets *offset to 0, returns 1. Searching for 5: If low true, returns 0. If low false, sets *offset to 1, returns 1. The 1, 3 and 5 examples are the same when exclude_equal is true. */ static int Bucket_findRangeEnd(Bucket *self, PyObject *keyarg, int low, int exclude_equal, int *offset) { int i, cmp; int result = -1; /* until proven innocent */ KEY_TYPE key; int copied = 1; COPY_KEY_FROM_ARG(key, keyarg, copied); UNLESS (copied) return -1; UNLESS (PER_USE(self)) return -1; BUCKET_SEARCH(i, cmp, self, key, goto Done); if (cmp == 0) { /* exact match at index i */ if (exclude_equal) { /* but we don't want an exact match */ if (low) ++i; else --i; } } /* Else keys[i-1] < key < keys[i], picturing infinities at OOB indices, * and i has the smallest item > key, which is correct for low. */ else if (! low) /* i-1 has the largest item < key (unless i-1 is 0OB) */ --i; result = 0 <= i && i < self->len; if (result) *offset = i; Done: PER_UNUSE(self); return result; } static PyObject * Bucket_maxminKey(Bucket *self, PyObject *args, int min) { PyObject *key=0; int rc, offset = 0; int empty_bucket = 1; if (args && ! PyArg_ParseTuple(args, "|O", &key)) return NULL; PER_USE_OR_RETURN(self, NULL); UNLESS (self->len) goto empty; /* Find the low range */ if (key && key != Py_None) { if ((rc = Bucket_findRangeEnd(self, key, min, 0, &offset)) <= 0) { if (rc < 0) return NULL; empty_bucket = 0; goto empty; } } else if (min) offset = 0; else offset = self->len -1; COPY_KEY_TO_OBJECT(key, self->keys[offset]); PER_UNUSE(self); return key; empty: PyErr_SetString(PyExc_ValueError, empty_bucket ? "empty bucket" : "no key satisfies the conditions"); PER_UNUSE(self); return NULL; } static PyObject * Bucket_minKey(Bucket *self, PyObject *args) { return Bucket_maxminKey(self, args, 1); } static PyObject * Bucket_maxKey(Bucket *self, PyObject *args) { return Bucket_maxminKey(self, args, 0); } static int Bucket_rangeSearch(Bucket *self, PyObject *args, PyObject *kw, int *low, int *high) { PyObject *min = Py_None; PyObject *max = Py_None; int excludemin = 0; int excludemax = 0; int rc; if (args) { if (! PyArg_ParseTupleAndKeywords(args, kw, "|OOii", search_keywords, &min, &max, &excludemin, &excludemax)) return -1; } UNLESS (self->len) goto empty; /* Find the low range */ if (min != Py_None) { rc = Bucket_findRangeEnd(self, min, 1, excludemin, low); if (rc < 0) return -1; if (rc == 0) goto empty; } else { *low = 0; if (excludemin) { if (self->len < 2) goto empty; ++*low; } } /* Find the high range */ if (max != Py_None) { rc = Bucket_findRangeEnd(self, max, 0, excludemax, high); if (rc < 0) return -1; if (rc == 0) goto empty; } else { *high = self->len - 1; if (excludemax) { if (self->len < 2) goto empty; --*high; } } /* If min < max to begin with, it's quite possible that low > high now. */ if (*low <= *high) return 0; empty: *low = 0; *high = -1; return 0; } /* ** bucket_keys ** ** Generate a list of all keys in the bucket ** ** Arguments: self The Bucket ** args (unused) ** ** Returns: list of bucket keys */ static PyObject * bucket_keys(Bucket *self, PyObject *args, PyObject *kw) { PyObject *r = NULL, *key; int i, low, high; PER_USE_OR_RETURN(self, NULL); if (Bucket_rangeSearch(self, args, kw, &low, &high) < 0) goto err; r = PyList_New(high-low+1); if (r == NULL) goto err; for (i=low; i <= high; i++) { COPY_KEY_TO_OBJECT(key, self->keys[i]); if (PyList_SetItem(r, i-low , key) < 0) goto err; } PER_UNUSE(self); return r; err: PER_UNUSE(self); Py_XDECREF(r); return NULL; } /* ** bucket_values ** ** Generate a list of all values in the bucket ** ** Arguments: self The Bucket ** args (unused) ** ** Returns list of values */ static PyObject * bucket_values(Bucket *self, PyObject *args, PyObject *kw) { PyObject *r=0, *v; int i, low, high; PER_USE_OR_RETURN(self, NULL); if (Bucket_rangeSearch(self, args, kw, &low, &high) < 0) goto err; UNLESS (r=PyList_New(high-low+1)) goto err; for (i=low; i <= high; i++) { COPY_VALUE_TO_OBJECT(v, self->values[i]); UNLESS (v) goto err; if (PyList_SetItem(r, i-low, v) < 0) goto err; } PER_UNUSE(self); return r; err: PER_UNUSE(self); Py_XDECREF(r); return NULL; } /* ** bucket_items ** ** Returns a list of all items in a bucket ** ** Arguments: self The Bucket ** args (unused) ** ** Returns: list of all items in the bucket */ static PyObject * bucket_items(Bucket *self, PyObject *args, PyObject *kw) { PyObject *r=0, *o=0, *item=0; int i, low, high; PER_USE_OR_RETURN(self, NULL); if (Bucket_rangeSearch(self, args, kw, &low, &high) < 0) goto err; UNLESS (r=PyList_New(high-low+1)) goto err; for (i=low; i <= high; i++) { UNLESS (item = PyTuple_New(2)) goto err; COPY_KEY_TO_OBJECT(o, self->keys[i]); UNLESS (o) goto err; PyTuple_SET_ITEM(item, 0, o); COPY_VALUE_TO_OBJECT(o, self->values[i]); UNLESS (o) goto err; PyTuple_SET_ITEM(item, 1, o); if (PyList_SetItem(r, i-low, item) < 0) goto err; item = 0; } PER_UNUSE(self); return r; err: PER_UNUSE(self); Py_XDECREF(r); Py_XDECREF(item); return NULL; } static PyObject * bucket_byValue(Bucket *self, PyObject *omin) { PyObject *r=0, *o=0, *item=0; VALUE_TYPE min; VALUE_TYPE v; int i, l, copied=1; PER_USE_OR_RETURN(self, NULL); COPY_VALUE_FROM_ARG(min, omin, copied); UNLESS(copied) return NULL; for (i=0, l=0; i < self->len; i++) if (TEST_VALUE(self->values[i], min) >= 0) l++; UNLESS (r=PyList_New(l)) goto err; for (i=0, l=0; i < self->len; i++) { if (TEST_VALUE(self->values[i], min) < 0) continue; UNLESS (item = PyTuple_New(2)) goto err; COPY_KEY_TO_OBJECT(o, self->keys[i]); UNLESS (o) goto err; PyTuple_SET_ITEM(item, 1, o); COPY_VALUE(v, self->values[i]); NORMALIZE_VALUE(v, min); COPY_VALUE_TO_OBJECT(o, v); DECREF_VALUE(v); UNLESS (o) goto err; PyTuple_SET_ITEM(item, 0, o); if (PyList_SetItem(r, l, item) < 0) goto err; l++; item = 0; } item=PyObject_GetAttr(r,sort_str); UNLESS (item) goto err; ASSIGN(item, PyObject_CallObject(item, NULL)); UNLESS (item) goto err; ASSIGN(item, PyObject_GetAttr(r, reverse_str)); UNLESS (item) goto err; ASSIGN(item, PyObject_CallObject(item, NULL)); UNLESS (item) goto err; Py_DECREF(item); PER_UNUSE(self); return r; err: PER_UNUSE(self); Py_XDECREF(r); Py_XDECREF(item); return NULL; } static int _bucket_clear(Bucket *self) { const int len = self->len; /* Don't declare i at this level. If neither keys nor values are * PyObject*, i won't be referenced, and you'll get a nuisance compiler * wng for declaring it here. */ self->len = self->size = 0; if (self->next) { Py_DECREF(self->next); self->next = NULL; } /* Silence compiler warning about unused variable len for the case when neither key nor value is an object, i.e. II. */ (void)len; if (self->keys) { #ifdef KEY_TYPE_IS_PYOBJECT int i; for (i = 0; i < len; ++i) DECREF_KEY(self->keys[i]); #endif free(self->keys); self->keys = NULL; } if (self->values) { #ifdef VALUE_TYPE_IS_PYOBJECT int i; for (i = 0; i < len; ++i) DECREF_VALUE(self->values[i]); #endif free(self->values); self->values = NULL; } return 0; } #ifdef PERSISTENT static PyObject * bucket__p_deactivate(Bucket *self, PyObject *args, PyObject *keywords) { int ghostify = 1; PyObject *force = NULL; if (args && PyTuple_GET_SIZE(args) > 0) { PyErr_SetString(PyExc_TypeError, "_p_deactivate takes no positional arguments"); return NULL; } if (keywords) { int size = PyDict_Size(keywords); force = PyDict_GetItemString(keywords, "force"); if (force) size--; if (size) { PyErr_SetString(PyExc_TypeError, "_p_deactivate only accepts keyword arg force"); return NULL; } } if (self->jar && self->oid) { ghostify = self->state == cPersistent_UPTODATE_STATE; if (!ghostify && force) { if (PyObject_IsTrue(force)) ghostify = 1; if (PyErr_Occurred()) return NULL; } if (ghostify) { if (_bucket_clear(self) < 0) return NULL; PER_GHOSTIFY(self); } } Py_INCREF(Py_None); return Py_None; } #endif static PyObject * bucket_clear(Bucket *self, PyObject *args) { PER_USE_OR_RETURN(self, NULL); if (self->len) { if (_bucket_clear(self) < 0) return NULL; if (PER_CHANGED(self) < 0) goto err; } PER_UNUSE(self); Py_INCREF(Py_None); return Py_None; err: PER_UNUSE(self); return NULL; } /* * Return: * * For a set bucket (self->values is NULL), a one-tuple or two-tuple. The * first element is a tuple of keys, of length self->len. The second element * is the next bucket, present if and only if next is non-NULL: * * ( * (keys[0], keys[1], ..., keys[len-1]), * next iff non-NULL> * ) * * For a mapping bucket (self->values is not NULL), a one-tuple or two-tuple. * The first element is a tuple interleaving keys and values, of length * 2 * self->len. The second element is the next bucket, present iff next is * non-NULL: * * ( * (keys[0], values[0], keys[1], values[1], ..., * keys[len-1], values[len-1]), * next iff non-NULL> * ) */ static PyObject * bucket_getstate(Bucket *self) { PyObject *o = NULL, *items = NULL, *state; int i, len, l; PER_USE_OR_RETURN(self, NULL); len = self->len; if (self->values) /* Bucket */ { items = PyTuple_New(len * 2); if (items == NULL) goto err; for (i = 0, l = 0; i < len; i++) { COPY_KEY_TO_OBJECT(o, self->keys[i]); if (o == NULL) goto err; PyTuple_SET_ITEM(items, l, o); l++; COPY_VALUE_TO_OBJECT(o, self->values[i]); if (o == NULL) goto err; PyTuple_SET_ITEM(items, l, o); l++; } } else /* Set */ { items = PyTuple_New(len); if (items == NULL) goto err; for (i = 0; i < len; i++) { COPY_KEY_TO_OBJECT(o, self->keys[i]); if (o == NULL) goto err; PyTuple_SET_ITEM(items, i, o); } } if (self->next) state = Py_BuildValue("OO", items, self->next); else state = Py_BuildValue("(O)", items); Py_DECREF(items); PER_UNUSE(self); return state; err: PER_UNUSE(self); Py_XDECREF(items); return NULL; } static int _bucket_setstate(Bucket *self, PyObject *state) { PyObject *k, *v, *items; Bucket *next = NULL; int i, l, len, copied=1; KEY_TYPE *keys; VALUE_TYPE *values; if (!PyArg_ParseTuple(state, "O|O:__setstate__", &items, &next)) return -1; if (!PyTuple_Check(items)) { PyErr_SetString(PyExc_TypeError, "tuple required for first state element"); return -1; } len = PyTuple_Size(items); ASSERT(len >= 0, "_bucket_setstate: items tuple has negative size", -1); len /= 2; for (i = self->len; --i >= 0; ) { DECREF_KEY(self->keys[i]); DECREF_VALUE(self->values[i]); } self->len = 0; if (self->next) { Py_DECREF(self->next); self->next = NULL; } if (len > self->size) { keys = BTree_Realloc(self->keys, sizeof(KEY_TYPE)*len); if (keys == NULL) return -1; values = BTree_Realloc(self->values, sizeof(VALUE_TYPE)*len); if (values == NULL) return -1; self->keys = keys; self->values = values; self->size = len; } for (i=0, l=0; i < len; i++) { k = PyTuple_GET_ITEM(items, l); l++; v = PyTuple_GET_ITEM(items, l); l++; COPY_KEY_FROM_ARG(self->keys[i], k, copied); if (!copied) return -1; COPY_VALUE_FROM_ARG(self->values[i], v, copied); if (!copied) return -1; INCREF_KEY(self->keys[i]); INCREF_VALUE(self->values[i]); } self->len = len; if (next) { self->next = next; Py_INCREF(next); } return 0; } static PyObject * bucket_setstate(Bucket *self, PyObject *state) { int r; PER_PREVENT_DEACTIVATION(self); r = _bucket_setstate(self, state); PER_UNUSE(self); if (r < 0) return NULL; Py_INCREF(Py_None); return Py_None; } static PyObject * bucket_sub(PyObject *self, PyObject *other) { PyObject *args = Py_BuildValue("OO", self, other); return difference_m(NULL, args); } static PyObject * bucket_or(PyObject *self, PyObject *other) { PyObject *args = Py_BuildValue("OO", self, other); return union_m(NULL, args); } static PyObject * bucket_and(PyObject *self, PyObject *other) { PyObject *args = Py_BuildValue("OO", self, other); return intersection_m(NULL, args); } static PyObject * bucket_setdefault(Bucket *self, PyObject *args) { PyObject *key; PyObject *failobj; /* default */ PyObject *value; /* return value */ int dummy_changed; /* in order to call _bucket_set */ if (! PyArg_UnpackTuple(args, "setdefault", 2, 2, &key, &failobj)) return NULL; value = _bucket_get(self, key, 0); if (value != NULL) return value; /* The key isn't in the bucket. If that's not due to a KeyError exception, * pass back the unexpected exception. */ if (! BTree_ShouldSuppressKeyError()) return NULL; PyErr_Clear(); /* Associate `key` with `failobj` in the bucket, and return `failobj`. */ value = failobj; if (_bucket_set(self, key, failobj, 0, 0, &dummy_changed) < 0) value = NULL; Py_XINCREF(value); return value; } /* forward declaration */ static int Bucket_length(Bucket *self); static PyObject * bucket_pop(Bucket *self, PyObject *args) { PyObject *key; PyObject *failobj = NULL; /* default */ PyObject *value; /* return value */ int dummy_changed; /* in order to call _bucket_set */ if (! PyArg_UnpackTuple(args, "pop", 1, 2, &key, &failobj)) return NULL; value = _bucket_get(self, key, 0); if (value != NULL) { /* Delete key and associated value. */ if (_bucket_set(self, key, NULL, 0, 0, &dummy_changed) < 0) { Py_DECREF(value); return NULL; } return value; } /* The key isn't in the bucket. If that's not due to a KeyError exception, * pass back the unexpected exception. */ if (! BTree_ShouldSuppressKeyError()) return NULL; if (failobj != NULL) { /* Clear the KeyError and return the explicit default. */ PyErr_Clear(); Py_INCREF(failobj); return failobj; } /* No default given. The only difference in this case is the error * message, which depends on whether the bucket is empty. */ if (Bucket_length(self) == 0) PyErr_SetString(PyExc_KeyError, "pop(): Bucket is empty"); return NULL; } static PyObject* bucket_popitem(Bucket* self, PyObject* args) { PyObject* key = NULL; PyObject* pop_args = NULL; PyObject* result_val = NULL; PyObject* result = NULL; if (PyTuple_Size(args) != 0) { PyErr_SetString(PyExc_TypeError, "popitem(): Takes no arguments."); return NULL; } key = Bucket_minKey(self, args); /* reuse existing empty tuple. */ if (!key) { PyErr_Clear(); PyErr_SetString(PyExc_KeyError, "popitem(): empty bucket."); return NULL; } pop_args = PyTuple_Pack(1, key); if (pop_args) { result_val = bucket_pop(self, pop_args); Py_DECREF(pop_args); if (result_val) { result = PyTuple_Pack(2, key, result_val); Py_DECREF(result_val); } } Py_DECREF(key); return result; } /* Search bucket self for key. This is the sq_contains slot of the * PySequenceMethods. * * Return: * -1 error * 0 not found * 1 found */ static int bucket_contains(Bucket *self, PyObject *key) { PyObject *asobj = _bucket_get(self, key, 1); int result = -1; if (asobj != NULL) { result = INT_AS_LONG(asobj) ? 1 : 0; Py_DECREF(asobj); } else if (BTree_ShouldSuppressKeyError()) { PyErr_Clear(); result = 0; } return result; } static PyObject * bucket_has_key(Bucket *self, PyObject *key) { int result = -1; result = bucket_contains(self, key); if (result == -1) { return NULL; } if (result) Py_RETURN_TRUE; Py_RETURN_FALSE; } /* ** bucket_getm ** */ static PyObject * bucket_getm(Bucket *self, PyObject *args) { PyObject *key, *d=Py_None, *r; if (!PyArg_ParseTuple(args, "O|O:get", &key, &d)) return NULL; r = _bucket_get(self, key, 0); if (r) return r; if (PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); PyErr_SetObject(PyExc_KeyError, key); } if (!BTree_ShouldSuppressKeyError()) return NULL; PyErr_Clear(); Py_INCREF(d); return d; } /**************************************************************************/ /* Iterator support. */ /* A helper to build all the iterators for Buckets and Sets. * If args is NULL, the iterator spans the entire structure. Else it's an * argument tuple, with optional low and high arguments. * kind is 'k', 'v' or 'i'. * Returns a BTreeIter object, or NULL if error. */ static PyObject * buildBucketIter(Bucket *self, PyObject *args, PyObject *kw, char kind) { BTreeItems *items; int lowoffset, highoffset; BTreeIter *result = NULL; PER_USE_OR_RETURN(self, NULL); if (Bucket_rangeSearch(self, args, kw, &lowoffset, &highoffset) < 0) goto Done; items = (BTreeItems *)newBTreeItems(kind, self, lowoffset, self, highoffset); if (items == NULL) goto Done; result = BTreeIter_new(items); /* win or lose, we're done */ Py_DECREF(items); Done: PER_UNUSE(self); return (PyObject *)result; } /* The implementation of iter(Bucket_or_Set); the Bucket tp_iter slot. */ static PyObject * Bucket_getiter(Bucket *self) { return buildBucketIter(self, NULL, NULL, 'k'); } /* The implementation of Bucket.iterkeys(). */ static PyObject * Bucket_iterkeys(Bucket *self, PyObject *args, PyObject *kw) { return buildBucketIter(self, args, kw, 'k'); } /* The implementation of Bucket.itervalues(). */ static PyObject * Bucket_itervalues(Bucket *self, PyObject *args, PyObject *kw) { return buildBucketIter(self, args, kw, 'v'); } /* The implementation of Bucket.iteritems(). */ static PyObject * Bucket_iteritems(Bucket *self, PyObject *args, PyObject *kw) { return buildBucketIter(self, args, kw, 'i'); } /* End of iterator support. */ #ifdef PERSISTENT static PyObject *merge_error(int p1, int p2, int p3, int reason); static PyObject *bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3); static PyObject * _bucket__p_resolveConflict(PyObject *ob_type, PyObject *s[3]) { PyObject *result = NULL; /* guilty until proved innocent */ Bucket *b[3] = {NULL, NULL, NULL}; PyObject *meth = NULL; PyObject *a = NULL; int i; for (i = 0; i < 3; i++) { PyObject *r; b[i] = (Bucket*)PyObject_CallObject((PyObject *)ob_type, NULL); if (b[i] == NULL) goto Done; if (s[i] == Py_None) /* None is equivalent to empty, for BTrees */ continue; meth = PyObject_GetAttr((PyObject *)b[i], __setstate___str); if (meth == NULL) goto Done; a = PyTuple_New(1); if (a == NULL) goto Done; PyTuple_SET_ITEM(a, 0, s[i]); Py_INCREF(s[i]); r = PyObject_CallObject(meth, a); /* b[i].__setstate__(s[i]) */ if (r == NULL) goto Done; Py_DECREF(r); Py_DECREF(a); Py_DECREF(meth); a = meth = NULL; } if (b[0]->next != b[1]->next || b[0]->next != b[2]->next) merge_error(-1, -1, -1, 0); else result = bucket_merge(b[0], b[1], b[2]); Done: Py_XDECREF(meth); Py_XDECREF(a); Py_XDECREF(b[0]); Py_XDECREF(b[1]); Py_XDECREF(b[2]); return result; } static PyObject * bucket__p_resolveConflict(Bucket *self, PyObject *args) { PyObject *s[3]; if (!PyArg_ParseTuple(args, "OOO", &s[0], &s[1], &s[2])) return NULL; return _bucket__p_resolveConflict((PyObject *)Py_TYPE(self), s); } #endif /* Caution: Even though the _next attribute is read-only, a program could do arbitrary damage to the btree internals. For example, it could call clear() on a bucket inside a BTree. We need to decide if the convenience for inspecting BTrees is worth the risk. */ static struct PyMemberDef Bucket_members[] = { {"_next", T_OBJECT, offsetof(Bucket, next)}, {NULL} }; static struct PyMethodDef Bucket_methods[] = { {"__getstate__", (PyCFunction) bucket_getstate, METH_NOARGS, "__getstate__() -- Return the picklable state of the object"}, {"__setstate__", (PyCFunction) bucket_setstate, METH_O, "__setstate__() -- Set the state of the object"}, {"keys", (PyCFunction) bucket_keys, METH_VARARGS | METH_KEYWORDS, "keys([min, max]) -- Return the keys"}, {"has_key", (PyCFunction) bucket_has_key, METH_O, "has_key(key) -- Test whether the bucket contains the given key"}, {"clear", (PyCFunction) bucket_clear, METH_VARARGS, "clear() -- Remove all of the items from the bucket"}, {"update", (PyCFunction) Mapping_update, METH_O, "update(collection) -- Add the items from the given collection"}, {"maxKey", (PyCFunction) Bucket_maxKey, METH_VARARGS, "maxKey([key]) -- Find the maximum key\n\n" "If an argument is given, find the maximum <= the argument"}, {"minKey", (PyCFunction) Bucket_minKey, METH_VARARGS, "minKey([key]) -- Find the minimum key\n\n" "If an argument is given, find the minimum >= the argument"}, {"values", (PyCFunction) bucket_values, METH_VARARGS | METH_KEYWORDS, "values([min, max]) -- Return the values"}, {"items", (PyCFunction) bucket_items, METH_VARARGS | METH_KEYWORDS, "items([min, max])) -- Return the items"}, {"byValue", (PyCFunction) bucket_byValue, METH_O, "byValue(min) -- " "Return value-keys with values >= min and reverse sorted by values"}, {"get", (PyCFunction) bucket_getm, METH_VARARGS, "get(key[,default]) -- Look up a value\n\n" "Return the default (or None) if the key is not found."}, {"setdefault", (PyCFunction) bucket_setdefault, METH_VARARGS, "D.setdefault(k, d) -> D.get(k, d), also set D[k]=d if k not in D.\n\n" "Return the value like get() except that if key is missing, d is both\n" "returned and inserted into the bucket as the value of k."}, {"pop", (PyCFunction) bucket_pop, METH_VARARGS, "D.pop(k[, d]) -> v, remove key and return the corresponding value.\n\n" "If key is not found, d is returned if given, otherwise KeyError\n" "is raised."}, {"popitem", (PyCFunction)bucket_popitem, METH_VARARGS, "D.popitem() -> (k, v), remove and return some (key, value) pair\n" "as a 2-tuple; but raise KeyError if D is empty."}, {"iterkeys", (PyCFunction) Bucket_iterkeys, METH_VARARGS | METH_KEYWORDS, "B.iterkeys([min[,max]]) -> an iterator over the keys of B"}, {"itervalues", (PyCFunction) Bucket_itervalues, METH_VARARGS | METH_KEYWORDS, "B.itervalues([min[,max]]) -> an iterator over the values of B"}, {"iteritems", (PyCFunction) Bucket_iteritems, METH_VARARGS | METH_KEYWORDS, "B.iteritems([min[,max]]) -> an iterator over the (key, value) " "items of B"}, #ifdef EXTRA_BUCKET_METHODS EXTRA_BUCKET_METHODS #endif #ifdef PERSISTENT {"_p_resolveConflict", (PyCFunction) bucket__p_resolveConflict, METH_VARARGS, "_p_resolveConflict() -- Reinitialize from a newly created copy"}, {"_p_deactivate", (PyCFunction) bucket__p_deactivate, METH_VARARGS | METH_KEYWORDS, "_p_deactivate() -- Reinitialize from a newly created copy"}, #endif {NULL, NULL} }; static int Bucket_init(PyObject *self, PyObject *args, PyObject *kwds) { PyObject *v = NULL; if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "Bucket", &v)) return -1; if (v) return update_from_seq(self, v); else return 0; } static void bucket_dealloc(Bucket *self) { PyObject_GC_UnTrack((PyObject *)self); if (self->state != cPersistent_GHOST_STATE) { _bucket_clear(self); } cPersistenceCAPI->pertype->tp_dealloc((PyObject *)self); } static int bucket_traverse(Bucket *self, visitproc visit, void *arg) { int err = 0; int i, len; #define VISIT(SLOT) \ if (SLOT) { \ err = visit((PyObject *)(SLOT), arg); \ if (err) \ goto Done; \ } /* Call our base type's traverse function. Because buckets are * subclasses of Peristent, there must be one. */ err = cPersistenceCAPI->pertype->tp_traverse((PyObject *)self, visit, arg); if (err) goto Done; /* If this is registered with the persistence system, cleaning up cycles * is the database's problem. It would be horrid to unghostify buckets * here just to chase pointers every time gc runs. */ if (self->state == cPersistent_GHOST_STATE) goto Done; len = self->len; /* if neither keys nor values are PyObject*, "i" is otherwise unreferenced and we get a nuisance compiler wng */ (void)i; (void)len; #ifdef KEY_TYPE_IS_PYOBJECT /* Keys are Python objects so need to be traversed. */ for (i = 0; i < len; i++) VISIT(self->keys[i]); #endif #ifdef VALUE_TYPE_IS_PYOBJECT if (self->values != NULL) { /* self->values exists (this is a mapping bucket, not a set bucket), * and are Python objects, so need to be traversed. */ for (i = 0; i < len; i++) VISIT(self->values[i]); } #endif VISIT(self->next); Done: return err; #undef VISIT } static int bucket_tp_clear(Bucket *self) { if (self->state != cPersistent_GHOST_STATE) _bucket_clear(self); return 0; } /* Code to access Bucket objects as mappings */ static int Bucket_length( Bucket *self) { int r; UNLESS (PER_USE(self)) return -1; r = self->len; PER_UNUSE(self); return r; } static PyMappingMethods Bucket_as_mapping = { (lenfunc)Bucket_length, /*mp_length*/ (binaryfunc)bucket_getitem, /*mp_subscript*/ (objobjargproc)bucket_setitem, /*mp_ass_subscript*/ }; static PySequenceMethods Bucket_as_sequence = { (lenfunc)0, /* sq_length */ (binaryfunc)0, /* sq_concat */ (ssizeargfunc)0, /* sq_repeat */ (ssizeargfunc)0, /* sq_item */ (ssizessizeargfunc)0, /* sq_slice */ (ssizeobjargproc)0, /* sq_ass_item */ (ssizessizeobjargproc)0, /* sq_ass_slice */ (objobjproc)bucket_contains, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; static PyNumberMethods Bucket_as_number = { (binaryfunc)0, /* nb_add */ bucket_sub, /* nb_subtract */ (binaryfunc)0, /* nb_multiply */ (binaryfunc)0, /* nb_remainder */ (binaryfunc)0, /* nb_divmod */ (ternaryfunc)0, /* nb_power */ (unaryfunc)0, /* nb_negative */ (unaryfunc)0, /* nb_positive */ (unaryfunc)0, /* nb_absolute */ (inquiry)0, /* nb_bool */ (unaryfunc)0, /* nb_invert */ (binaryfunc)0, /* nb_lshift */ (binaryfunc)0, /* nb_rshift */ bucket_and, /* nb_and */ (binaryfunc)0, /* nb_xor */ bucket_or, /* nb_or */ }; static PyObject * bucket_repr(Bucket *self) { PyObject *i, *r; i = bucket_items(self, NULL, NULL); if (!i) { return NULL; } r = PyUnicode_FromFormat("%s(%R)", Py_TYPE(self)->tp_name, i); Py_DECREF(i); return r; } static PyTypeObject BucketType = { PyVarObject_HEAD_INIT(NULL, 0) MODULE_NAME MOD_NAME_PREFIX "Bucket", /* tp_name */ sizeof(Bucket), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)bucket_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ (reprfunc)bucket_repr, /* tp_repr */ &Bucket_as_number, /* tp_as_number */ &Bucket_as_sequence, /* tp_as_sequence */ &Bucket_as_mapping, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ 0, /* tp_doc */ (traverseproc)bucket_traverse, /* tp_traverse */ (inquiry)bucket_tp_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)Bucket_getiter, /* tp_iter */ 0, /* tp_iternext */ Bucket_methods, /* tp_methods */ Bucket_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ Bucket_init, /* tp_init */ 0, /* tp_alloc */ 0, /*PyType_GenericNew,*/ /* tp_new */ }; static int nextBucket(SetIteration *i) { if (i->position >= 0) { UNLESS(PER_USE(BUCKET(i->set))) return -1; if (i->position) { DECREF_KEY(i->key); DECREF_VALUE(i->value); } if (i->position < BUCKET(i->set)->len) { COPY_KEY(i->key, BUCKET(i->set)->keys[i->position]); INCREF_KEY(i->key); COPY_VALUE(i->value, BUCKET(i->set)->values[i->position]); INCREF_VALUE(i->value); i->position ++; } else { i->position = -1; PER_ACCESSED(BUCKET(i->set)); } PER_ALLOW_DEACTIVATION(BUCKET(i->set)); } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/Interfaces.py0000644000076500000240000005675114626022106016052 0ustar00jensstaff############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from zope.interface import Attribute from zope.interface import Interface from zope.interface.common.collections import IMapping from zope.interface.common.collections import ISized from zope.interface.common.sequence import IMinimalSequence class ICollection(Interface): """ A collection of zero or more objects. In a boolean context, objects implementing this interface are `True` if the collection is non-empty, and `False` if the collection is empty. """ def clear(): """Remove all of the items from the collection.""" # Backwards compatibility alias. To be removed in 5.0. # Docs deprecated only in docs/api.rst. IReadSequence = IMinimalSequence class IKeyed(ICollection): def has_key(key): """Check whether the object has an item with the given key. Return a true value if the key is present, else a false value. """ def keys(min=None, max=None, excludemin=False, excludemax=False): """ Return an :mod:`IMinimalSequence ` containing the keys in the collection. The type of the ``IMinimalSequence`` is not specified. It could be a `list` or a `tuple` or some other type. All arguments are optional, and may be specified as keyword arguments, or by position. If a *min* is specified, then output is constrained to keys greater than or equal to the given min, and, if *excludemin* is specified and true, is further constrained to keys strictly greater than *min*. A *min* value of `None` is ignored. If *min* is `None` or not specified, and *excludemin* is true, the smallest key is excluded. If a *max* is specified, then output is constrained to keys less than or equal to the given *max*, and, if *excludemax* is specified and true, is further constrained to keys strictly less than *max*. A *max* value of `None` is ignored. If *max* is `None` or not specified, and *excludemax* is true, the largest key is excluded. """ def maxKey(key=None): """Return the maximum key. If a key argument if provided and not None, return the largest key that is less than or equal to the argument. Raise an exception if no such key exists. """ def minKey(key=None): """Return the minimum key. If a key argument if provided and not None, return the smallest key that is greater than or equal to the argument. Raise an exception if no such key exists. """ class ISetMutable(IKeyed): def insert(key): """Add the key (value) to the set. If the key was already in the set, return 0, otherwise return 1. """ def remove(key): """Remove the key from the set. Raises :class:`KeyError` if key is not in the set. """ def update(seq): """Add the items from the given sequence to the set.""" def __and__(other): """ Shortcut for :meth:`~BTrees.Interfaces.IMerge.intersection` .. versionadded:: 4.8.0 """ def __iand__(other): """ As for :meth:`set.intersection_update`: Update this object, keeping only elements found in both it and other. .. versionadded:: 4.8.0 """ def __or__(other): """ Shortcut for :meth:`~BTrees.Interfaces.IMerge.union` .. versionadded:: 4.8.0 """ def __ior__(other): """ As for :meth:`set.update`: Update this object, adding elements from *other*. .. versionadded:: 4.8.0 """ def __sub__(other): """ Shortcut for :meth:`~BTrees.Interfaces.IMerge.difference` .. versionadded:: 4.8.0 """ def __isub__(other): """ As for :meth:`set.difference_update`: Update this object, removing elements found in *other*. .. versionadded:: 4.8.0 """ def isdisjoint(other): """ As for :meth:`set.isdisjoint`: Return True if the set has no elements in common with other. .. versionadded:: 4.8.0 """ def discard(key): """ As for :meth:`set.discard`: Remove the *key* from the set, but only if it is present. .. versionadded:: 4.8.0 """ def pop(): """ As for :meth:`set.pop`: Remove and return an arbitrary element; raise :exc:`KeyError` if the object is empty. .. versionadded:: 4.8.0 """ def __ixor__(other): """ As for :meth:`set.symmetric_difference_update`: Update this object, keeping only elements found in either set but not in both. .. versionadded:: 4.8.0 """ class IKeySequence(IKeyed, ISized): def __getitem__(index): """Return the key in the given index position. This allows iteration with for loops and use in functions, like map and list, that read sequences. """ class ISet(ISetMutable, IKeySequence): """ A set of unique items stored in a single persistent object. """ class ITreeSet(ISetMutable): """ A set of unique items stored in a tree of persistent objects. """ class IMinimalDictionary(IKeyed, IMapping): """ Mapping operations. .. versionchanged:: 4.8.0 Now extends :class:`zope.interface.common.collections.IMapping`. """ def get(key, default): """Get the value associated with the given key. Return the default if :meth:`~BTrees.Interfaces.IKeyed.has_key` is false with the given key. """ def __getitem__(key): """Get the value associated with the given key. Raise :class:`KeyError` if :meth:`~BTrees.Interfaces.IKeyed.has_key` is false with the given key. """ def __setitem__(key, value): """Set the value associated with the given key.""" def __delitem__(key): """Delete the value associated with the given key. Raise class:`KeyError` if :meth:`~BTrees.Interfaces.IKeyed.has_key` is false with the given key. """ def values(min=None, max=None, excludemin=False, excludemax=False): """ Return a minimal sequence containing the values in the collection. Return value is an :mod:`IMinimalSequence `. The type of the ``IMinimalSequence`` is not specified. It could be a `list` or a `tuple` or some other type. All arguments are optional, and may be specified as keyword arguments, or by position. If a *min* is specified, then output is constrained to values whose keys are greater than or equal to the given *min*, and, if *excludemin* is specified and true, is further constrained to values whose keys are strictly greater than *min*. A *min* value of `None` is ignored. If *min* is `None` or not specified, and *excludemin* is true, the value corresponding to the smallest key is excluded. If a *max* is specified, then output is constrained to values whose keys are less than or equal to the given *max*, and, if *excludemax* is specified and true, is further constrained to values whose keys are strictly less than *max*. A *max* value of `None` is ignored. If *max* is `None` or not specified, and *excludemax* is true, the value corresponding to the largest key is excluded. """ def items(min=None, max=None, excludemin=False, excludemax=False): """ Return an ``IMinimalSequence`` containing the items in the collection. An item is a 2-tuple, a ``(key, value)`` pair. The type of the ``IMinimalSequence`` is not specified. It could be a `list` or a `tuple` or some other type. All arguments are optional, and may be specified as keyword arguments, or by position. If a *min* is specified, then output is constrained to items whose keys are greater than or equal to the given *min*, and, if *excludemin* is specified and true, is further constrained to items whose keys are strictly greater than *min*. A *min* value of `None` is ignored. If *min* is `None` or not specified, and *excludemin* is true, the item with the smallest key is excluded. If a *max* is specified, then output is constrained to items whose keys are less than or equal to the given *max*, and, if *excludemax is specified and true, is further constrained to items whose keys are strictly less than *max*. A *max* value of `None` is ignored. If *max* is `None` or not specified, and *excludemax* is true, the item with the largest key is excluded. """ class IDictionaryIsh(IMinimalDictionary): def update(collection): """Add the items from the given collection object to the collection. The input collection must be a sequence of (key, value) 2-tuples, or an object with an 'items' method that returns a sequence of (key, value) pairs. """ def byValue(minValue): """Return a sequence of (value, key) pairs, sorted by value. Values < minValue are omitted and other values are "normalized" by the minimum value. This normalization may be a noop, but, for integer values, the normalization is division. """ def setdefault(key, d): """D.setdefault(k, d) -> D.get(k, d), also set D[k]=d if k not in D. Return the value like :meth:`~BTrees.Interfaces.IMinimalDictionary.get` except that if key is missing, d is both returned and inserted into the dictionary as the value of k. Note that, unlike as for Python's :meth:`dict.setdefault`, d is not optional. Python defaults d to None, but that doesn't make sense for mappings that can't have None as a value (for example, an :class:`~BTrees.IIBTree.IIBTree` can have only integers as values). """ def pop(key, d): """D.pop(k[, d]) -> v, remove key and return the corresponding value. If key is not found, d is returned if given, otherwise :class:`KeyError` is raised. """ def popitem(): """ D.popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty. .. versionadded:: 4.8.0 """ class IBTree(IDictionaryIsh): def insert(key, value): """Insert a key and value into the collection. If the key was already in the collection, then there is no change and 0 is returned. If the key was not already in the collection, then the item is added and 1 is returned. This method is here to allow one to generate random keys and to insert and test whether the key was there in one operation. A standard idiom for generating new keys will be:: key = generate_key() while not t.insert(key, value): key=generate_key() """ def __and__(other): """Shortcut for :meth:`~BTrees.Interfaces.IMerge.intersection`""" def __or__(other): """Shortcut for :meth:`~BTrees.Interfaces.IMerge.union`""" def __sub__(other): """Shortcut for :meth:`~BTrees.Interfaces.IMerge.difference`""" class IMerge(Interface): """Object with methods for merging sets, buckets, and trees. These methods are supplied in modules that define collection classes with particular key and value types. The operations apply only to collections from the same module. For example, the :meth:`BTrees.IIBTree.IIBTree.union` can only be used with :class:`~BTrees.IIBTree.IIBTree`, :class:`~BTrees.IIBTree.IIBucket`, :class:`~BTrees.IIBTree.IISet`, and :class:`~BTrees.IIBTree.IITreeSet`. The number protocols methods ``__and__``, ``__or__`` and ``__sub__`` are provided by all the data structures. They are shortcuts for :meth:`~BTrees.Interfaces.IMerge.intersection`, :meth:`~BTrees.Interfaces.IMerge.union` and :meth:`~BTrees.Interfaces.IMerge.difference`. The implementing module has a value type. The :class:`~BTrees.IOBTree.IOBTree` and :class:`~BTrees.OOBTree.OOBTree` modules have object value type. The :class:`~BTrees.IIBTree.IIBTree` and :class:`~BTrees.OIBTree.OIBTree` modules have integer value types. Other modules may be defined in the future that have other value types. The individual types are classified into set (Set and TreeSet) and mapping (Bucket and BTree) types. """ def difference(c1, c2): """Return the keys or items in c1 for which there is no key in c2. If c1 is None, then None is returned. If c2 is None, then c1 is returned. If neither c1 nor c2 is None, the output is a Set if c1 is a Set or TreeSet, and is a Bucket if c1 is a Bucket or BTree. While *c1* must be one of those types, *c2* can be any Python iterable returning the correct types of objects. .. versionchanged:: 4.8.0 Add support for *c2* to be an arbitrary iterable. """ def union(c1, c2): """Compute the Union of c1 and c2. If c1 is None, then c2 is returned, otherwise, if c2 is None, then c1 is returned. The output is a Set containing keys from the input collections. *c1* and *c2* can be any Python iterables returning the correct type of objects. .. versionchanged:: 4.8.0 Add support for arbitrary iterables. """ def intersection(c1, c2): """Compute the intersection of c1 and c2. If c1 is None, then c2 is returned, otherwise, if c2 is None, then c1 is returned. The output is a Set containing matching keys from the input collections. *c1* and *c2* can be any Python iterables returning the correct type of objects. .. versionchanged:: 4.8.0 Add support for arbitrary iterables. """ class IBTreeModule(Interface): """These are available in all modules (IOBTree, OIBTree, OOBTree, IIBTree, IFBTree, LFBTree, LOBTree, OLBTree, and LLBTree). """ BTree = Attribute( """The IBTree for this module. Also available as [prefix]BTree, as in IOBTree.""") Bucket = Attribute( """The leaf-node data buckets used by the BTree. (IBucket is not currently defined in this file, but is essentially IDictionaryIsh, with the exception of __nonzero__, as of this writing.) Also available as [prefix]Bucket, as in IOBucket.""") TreeSet = Attribute( """The ITreeSet for this module. Also available as [prefix]TreeSet, as in IOTreeSet.""") Set = Attribute( """The ISet for this module: the leaf-node data buckets used by the TreeSet. Also available as [prefix]BTree, as in IOSet.""") class IIMerge(IMerge): """Merge collections with integer value type. A primary intent is to support operations with no or integer values, which are used as "scores" to rate indiviual keys. That is, in this context, a BTree or Bucket is viewed as a set with scored keys, using integer scores. """ def weightedUnion(c1, c2, weight1=1, weight2=1): """Compute the weighted union of c1 and c2. If c1 and c2 are None, the output is (0, None). If c1 is None and c2 is not None, the output is (weight2, c2). If c1 is not None and c2 is None, the output is (weight1, c1). Else, and hereafter, c1 is not None and c2 is not None. If c1 and c2 are both sets, the output is 1 and the (unweighted) union of the sets. Else the output is 1 and a Bucket whose keys are the union of c1 and c2's keys, and whose values are:: v1*weight1 + v2*weight2 where: v1 is 0 if the key is not in c1 1 if the key is in c1 and c1 is a set c1[key] if the key is in c1 and c1 is a mapping v2 is 0 if the key is not in c2 1 if the key is in c2 and c2 is a set c2[key] if the key is in c2 and c2 is a mapping Note that c1 and c2 must be collections. """ def weightedIntersection(c1, c2, weight1=1, weight2=1): """Compute the weighted intersection of c1 and c2. If c1 and c2 are None, the output is (0, None). If c1 is None and c2 is not None, the output is (weight2, c2). If c1 is not None and c2 is None, the output is (weight1, c1). Else, and hereafter, c1 is not None and c2 is not None. If c1 and c2 are both sets, the output is the sum of the weights and the (unweighted) intersection of the sets. Else the output is 1 and a Bucket whose keys are the intersection of c1 and c2's keys, and whose values are:: v1*weight1 + v2*weight2 where: v1 is 1 if c1 is a set c1[key] if c1 is a mapping v2 is 1 if c2 is a set c2[key] if c2 is a mapping Note that c1 and c2 must be collections. """ class IMergeIntegerKey(IMerge): """:class:`~BTrees.Interfaces.IMerge`-able objects with integer keys. Concretely, this means the types in :class:`~BTree.IOBTree.IOBTree` and :class:`~BTrees.IIBTree.IIBTree`. """ def multiunion(seq): """Return union of (zero or more) integer sets, as an integer set. seq is a sequence of objects each convertible to an integer set. These objects are convertible to an integer set: + An integer, which is added to the union. + A Set or TreeSet from the same module (for example, an :class:`BTrees.IIBTree.TreeSet` for :meth:`BTrees.IIBTree.multiunion`). The elements of the set are added to the union. + A Bucket or BTree from the same module (for example, an :class:`BTrees.IOBTree.IOBTree` for :meth:`BTrees.IOBTree.multiunion`). The keys of the mapping are added to the union. + Any iterable Python object that iterates across integers. This will be slower than the above types. The union is returned as a Set from the same module (for example, :meth:`BTrees.IIBTree.multiunion` returns an :class:`BTrees.IIBTree.IISet`). The point to this method is that it can run much faster than doing a sequence of two-input :meth:`~BTrees.Interfaces.IMerge.union` calls. Under the covers, all the integers in all the inputs are sorted via a single linear-time radix sort, then duplicates are removed in a second linear-time pass. .. versionchanged:: 4.8.0 Add support for arbitrary iterables of integers. """ class IBTreeFamily(Interface): """the 64-bit or 32-bit family""" IF = Attribute('The IIntegerFloatBTreeModule for this family') II = Attribute('The IIntegerIntegerBTreeModule for this family') IO = Attribute('The IIntegerObjectBTreeModule for this family') IU = Attribute('The IIntegerUnsignedBTreeModule for this family') UF = Attribute('The IUnsignedFloatBTreeModule for this family') UI = Attribute('The IUnsignedIntegerBTreeModule for this family') UO = Attribute('The IUnsignedObjectBTreeModule for this family') UU = Attribute('The IUnsignedUnsignedBTreeModule for this family') OI = Attribute('The IObjectIntegerBTreeModule for this family') OO = Attribute('The IObjectObjectBTreeModule for this family') OU = Attribute('The IObjectUnsignedBTreeModule for this family') maxint = Attribute('The maximum signed integer storable in this family') maxuint = Attribute('The maximum unsigned integer storable in this family') minint = Attribute('The minimum signed integer storable in this family') class _IMergeBTreeModule(IBTreeModule, IMerge): family = Attribute('The IBTreeFamily of this module') class IIntegerObjectBTreeModule(_IMergeBTreeModule): """Keys, or set values, are signed ints; values are objects. Describes IOBTree and LOBTree. """ class IUnsignedObjectBTreeModule(_IMergeBTreeModule): """Keys, or set values, are unsigned ints; values are objects. Describes UOBTree and QOBTree. """ class IObjectIntegerBTreeModule(_IMergeBTreeModule): """Keys, or set values, are objects; values are signed ints. Object keys (and set values) must sort reliably (for instance, *not* on object id)! Homogenous key types recommended. Describes OIBTree and OLBTree. """ class IObjectUnsignedBTreeModule(_IMergeBTreeModule): """Keys, or set values, are objects; values are signed ints. Object keys (and set values) must sort reliably (for instance, *not* on object id)! Homogenous key types recommended. Describes OUBTree and OQBTree. """ class IIntegerIntegerBTreeModule(_IMergeBTreeModule, IMergeIntegerKey): """Keys, or set values, are signed integers; values are signed integers. Describes IIBTree and LLBTree """ class IUnsignedUnsignedBTreeModule(_IMergeBTreeModule, IMergeIntegerKey): """Keys, or set values, are unsigned ints; values are unsigned ints. Describes UUBTree and QQBTree """ class IUnsignedIntegerBTreeModule(_IMergeBTreeModule, IMergeIntegerKey): """Keys, or set values, are unsigned ints; values are signed ints. Describes UIBTree and QLBTree """ class IIntegerUnsignedBTreeModule(_IMergeBTreeModule, IMergeIntegerKey): """Keys, or set values, are signed ints; values are unsigned ints. Describes IUBTree and LQBTree """ class IObjectObjectBTreeModule(IBTreeModule, IMerge): """Keys, or set values, are objects; values are also objects. Object keys (and set values) must sort reliably (for instance, *not* on object id)! Homogenous key types recommended. Note that there's no ``family`` attribute; all families include the OO flavor of BTrees. Describes OOBTree """ class IIntegerFloatBTreeModule(_IMergeBTreeModule): """Keys, or set values, are signed ints; values are floats. Describes IFBTree and LFBTree """ class IUnsignedFloatBTreeModule(_IMergeBTreeModule): """Keys, or set values, are unsigned ints; values are floats. Describes UFBTree and QFBTree """ try: from ZODB.POSException import BTreesConflictError except ImportError: class BTreesConflictError(ValueError): @property def reason(self): return self.args[-1] ############################################################### # IMPORTANT NOTE # # Getting the length of a BTree, TreeSet, or output of keys, # values, or items of same is expensive. If you need to get the # length, you need to maintain this separately. # # Eventually, I need to express this through the interfaces. # ################################################################ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1715872922.0 BTrees-6.0/src/BTrees/Length.py0000644000076500000240000000362214621422232015174 0ustar00jensstaff############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import persistent class Length(persistent.Persistent): """BTree lengths are often too expensive to compute. Objects that use BTrees need to keep track of lengths themselves. This class provides an object for doing this. As a bonus, the object support application-level conflict resolution. It is tempting to to assign length objects to __len__ attributes to provide instance-specific __len__ methods. However, this no longer works as expected, because new-style classes cache class-defined slot methods (like __len__) in C type slots. Thus, instance-defined slot fillers are ignored. """ # class-level default required to keep copy.deepcopy happy -- see # https://bugs.launchpad.net/zodb/+bug/516653 value = 0 def __init__(self, v=0): self.value = v def __getstate__(self): return self.value def __setstate__(self, v): self.value = v def set(self, v): "Set the length value to v." self.value = v def _p_resolveConflict(self, old, s1, s2): return s1 + s2 - old def change(self, delta): "Add delta to the length value." self.value += delta def __call__(self, *args): "Return the current length value." return self.value ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/MergeTemplate.c0000644000076500000240000002744614330745562016325 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #define MERGETEMPLATE_C "$Id$\n" /**************************************************************************** Set operations ****************************************************************************/ static int merge_output(Bucket *r, SetIteration *i, int mapping) { if (r->len >= r->size && Bucket_grow(r, -1, !mapping) < 0) return -1; COPY_KEY(r->keys[r->len], i->key); INCREF_KEY(r->keys[r->len]); if (mapping) { COPY_VALUE(r->values[r->len], i->value); INCREF_VALUE(r->values[r->len]); } r->len++; return 0; } /* The "reason" argument is a little integer giving "a reason" for the * error. In the Zope3 codebase, these are mapped to explanatory strings * via zodb/btrees/interfaces.py. */ static PyObject * merge_error(int p1, int p2, int p3, int reason) { PyObject *r; UNLESS (r=Py_BuildValue("iiii", p1, p2, p3, reason)) r=Py_None; if (ConflictError == NULL) { ConflictError = PyExc_ValueError; Py_INCREF(ConflictError); } PyErr_SetObject(ConflictError, r); if (r != Py_None) { Py_DECREF(r); } return NULL; } /* It's hard to explain "the rules" for bucket_merge, in large part because * any automatic conflict-resolution scheme is going to be incorrect for * some endcases of *some* app. The scheme here is pretty conservative, * and should be OK for most apps. It's easier to explain what the code * allows than what it forbids: * * Leaving things alone: it's OK if both s2 and s3 leave a piece of s1 * alone (don't delete the key, and don't change the value). * * Key deletion: a transaction (s2 or s3) can delete a key (from s1), but * only if the other transaction (of s2 and s3) doesn't delete the same key. * However, it's not OK for s2 and s3 to, between them, end up deleting all * the keys. This is a higher-level constraint, due to that the caller of * bucket_merge() doesn't have enough info to unlink the resulting empty * bucket from its BTree correctly. It's also not OK if s2 or s3 are empty, * because the transaction that emptied the bucket unlinked the bucket from * the tree, and nothing we do here can get it linked back in again. * * Key insertion: s2 or s3 can add a new key, provided the other transaction * doesn't insert the same key. It's not OK even if they insert the same * pair. * * Mapping value modification: s2 or s3 can modify the value associated * with a key in s1, provided the other transaction doesn't make a * modification of the same key to a different value. It's OK if s2 and s3 * both give the same new value to the key while it's hard to be precise about * why, this doesn't seem consistent with that it's *not* OK for both to add * a new key mapping to the same value). */ static PyObject * bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3) { Bucket *r=0; PyObject *s; SetIteration i1 = {0,0,0}, i2 = {0,0,0}, i3 = {0,0,0}; int cmp12, cmp13, cmp23, mapping, set; /* If either "after" bucket is empty, punt. */ if (s2->len == 0 || s3->len == 0) { merge_error(-1, -1, -1, 12); goto err; } if (initSetIteration(&i1, OBJECT(s1), 1) < 0) goto err; if (initSetIteration(&i2, OBJECT(s2), 1) < 0) goto err; if (initSetIteration(&i3, OBJECT(s3), 1) < 0) goto err; mapping = i1.usesValue | i2.usesValue | i3.usesValue; set = !mapping; if (mapping) r = (Bucket *)PyObject_CallObject((PyObject *)&BucketType, NULL); else r = (Bucket *)PyObject_CallObject((PyObject *)&SetType, NULL); if (r == NULL) goto err; if (i1.next(&i1) < 0) goto err; if (i2.next(&i2) < 0) goto err; if (i3.next(&i3) < 0) goto err; /* Consult zodb/btrees/interfaces.py for the meaning of the last * argument passed to merge_error(). */ /* TODO: This isn't passing on errors raised by value comparisons. */ while (i1.position >= 0 && i2.position >= 0 && i3.position >= 0) { TEST_KEY_SET_OR(cmp12, i1.key, i2.key) goto err; TEST_KEY_SET_OR(cmp13, i1.key, i3.key) goto err; if (cmp12==0) { if (cmp13==0) { if (set || (TEST_VALUE(i1.value, i2.value) == 0)) { /* change in i3 value or all same */ if (merge_output(r, &i3, mapping) < 0) goto err; } else if (set || (TEST_VALUE(i1.value, i3.value) == 0)) { /* change in i2 value */ if (merge_output(r, &i2, mapping) < 0) goto err; } else { /* conflicting value changes in i2 and i3 */ merge_error(i1.position, i2.position, i3.position, 1); goto err; } if (i1.next(&i1) < 0) goto err; if (i2.next(&i2) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else if (cmp13 > 0) { /* insert i3 */ if (merge_output(r, &i3, mapping) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else if (set || (TEST_VALUE(i1.value, i2.value) == 0)) { /* deleted in i3 */ if (i3.position == 1) { /* Deleted the first item. This will modify the parent node, so we don't know if merging will be safe */ merge_error(i1.position, i2.position, i3.position, 13); goto err; } if (i1.next(&i1) < 0) goto err; if (i2.next(&i2) < 0) goto err; } else { /* conflicting del in i3 and change in i2 */ merge_error(i1.position, i2.position, i3.position, 2); goto err; } } else if (cmp13 == 0) { if (cmp12 > 0) { /* insert i2 */ if (merge_output(r, &i2, mapping) < 0) goto err; if (i2.next(&i2) < 0) goto err; } else if (set || (TEST_VALUE(i1.value, i3.value) == 0)) { /* deleted in i2 */ if (i2.position == 1) { /* Deleted the first item. This will modify the parent node, so we don't know if merging will be safe */ merge_error(i1.position, i2.position, i3.position, 13); goto err; } if (i1.next(&i1) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else { /* conflicting del in i2 and change in i3 */ merge_error(i1.position, i2.position, i3.position, 3); goto err; } } else { /* Both keys changed */ TEST_KEY_SET_OR(cmp23, i2.key, i3.key) goto err; if (cmp23==0) { /* dueling inserts or deletes */ merge_error(i1.position, i2.position, i3.position, 4); goto err; } if (cmp12 > 0) { /* insert i2 */ if (cmp23 > 0) { /* insert i3 first */ if (merge_output(r, &i3, mapping) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else { /* insert i2 first */ if (merge_output(r, &i2, mapping) < 0) goto err; if (i2.next(&i2) < 0) goto err; } } else if (cmp13 > 0) { /* Insert i3 */ if (merge_output(r, &i3, mapping) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else { /* 1<2 and 1<3: both deleted 1.key */ merge_error(i1.position, i2.position, i3.position, 5); goto err; } } } while (i2.position >= 0 && i3.position >= 0) { /* New inserts */ TEST_KEY_SET_OR(cmp23, i2.key, i3.key) goto err; if (cmp23==0) { /* dueling inserts */ merge_error(i1.position, i2.position, i3.position, 6); goto err; } if (cmp23 > 0) { /* insert i3 */ if (merge_output(r, &i3, mapping) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else { /* insert i2 */ if (merge_output(r, &i2, mapping) < 0) goto err; if (i2.next(&i2) < 0) goto err; } } while (i1.position >= 0 && i2.position >= 0) { /* remainder of i1 deleted in i3 */ TEST_KEY_SET_OR(cmp12, i1.key, i2.key) goto err; if (cmp12 > 0) { /* insert i2 */ if (merge_output(r, &i2, mapping) < 0) goto err; if (i2.next(&i2) < 0) goto err; } else if (cmp12==0 && (set || (TEST_VALUE(i1.value, i2.value) == 0))) { /* delete i3 */ if (i1.next(&i1) < 0) goto err; if (i2.next(&i2) < 0) goto err; } else { /* Dueling deletes or delete and change */ merge_error(i1.position, i2.position, i3.position, 7); goto err; } } while (i1.position >= 0 && i3.position >= 0) { /* remainder of i1 deleted in i2 */ TEST_KEY_SET_OR(cmp13, i1.key, i3.key) goto err; if (cmp13 > 0) { /* insert i3 */ if (merge_output(r, &i3, mapping) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else if (cmp13==0 && (set || (TEST_VALUE(i1.value, i3.value) == 0))) { /* delete i2 */ if (i1.next(&i1) < 0) goto err; if (i3.next(&i3) < 0) goto err; } else { /* Dueling deletes or delete and change */ merge_error(i1.position, i2.position, i3.position, 8); goto err; } } if (i1.position >= 0) { /* Dueling deletes */ merge_error(i1.position, i2.position, i3.position, 9); goto err; } while (i2.position >= 0) { /* Inserting i2 at end */ if (merge_output(r, &i2, mapping) < 0) goto err; if (i2.next(&i2) < 0) goto err; } while (i3.position >= 0) { /* Inserting i3 at end */ if (merge_output(r, &i3, mapping) < 0) goto err; if (i3.next(&i3) < 0) goto err; } /* If the output bucket is empty, conflict resolution doesn't have * enough info to unlink it from its containing BTree correctly. */ if (r->len == 0) { merge_error(-1, -1, -1, 10); goto err; } finiSetIteration(&i1); finiSetIteration(&i2); finiSetIteration(&i3); if (s1->next) { Py_INCREF(s1->next); r->next = s1->next; } s = bucket_getstate(r); Py_DECREF(r); return s; err: finiSetIteration(&i1); finiSetIteration(&i2); finiSetIteration(&i3); Py_XDECREF(r); return NULL; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/SetOpTemplate.c0000644000076500000240000004156014330745562016311 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ /**************************************************************************** Set operations ****************************************************************************/ #include "SetOpTemplate.h" #define SETOPTEMPLATE_C "$Id$\n" #ifdef KEY_CHECK static int nextKeyAsSet(SetIteration *i) { if (i->position >= 0) { if (i->position) { DECREF_KEY(i->key); i->position = -1; } else i->position = 1; } return 0; } #endif static int nextGenericKeyIter(SetIteration* i) { PyObject* next = NULL; int copied = 1; if (i->position < 0) { /* Already finished. Do nothing. */ return 0; } if (i->position) { /* If we've been called before, release the key cache. */ DECREF_KEY(i->key); } i->position += 1; next = PyIter_Next(i->set); if (next == NULL) { /* Either an error, or the end of iteration. */ if (!PyErr_Occurred()) { /* End of iteration. */ i->position = -1; return 0; } /* Propagate the error. */ return -1; } COPY_KEY_FROM_ARG(i->key, next, copied); Py_DECREF(next); UNLESS(copied) return -1; INCREF_KEY(i->key); return 0; } /* initSetIteration * * Start the set iteration protocol. See the comments at struct SetIteration. * * Arguments * i The address of a SetIteration control struct. * s The address of the set, bucket, BTree, ..., to be iterated. * useValues Boolean; if true, and s has values (is a mapping), copy * them into i->value each time i->next() is called; else * ignore s's values even if s is a mapping. * * Return * 0 on success; -1 and an exception set if error. * i.usesValue is set to 1 (true) if s has values and useValues was * true; else usesValue is set to 0 (false). * i.set gets a new reference to s, or to some other object used to * iterate over s. * i.position is set to 0. * i.next is set to an appropriate iteration function. * i.key and i.value are left alone. * * Internal * i.position < 0 means iteration terminated. * i.position = 0 means iteration hasn't yet begun (next() hasn't * been called yet). * In all other cases, i.key, and possibly i.value, own references. * These must be cleaned up, either by next() routines, or by * finiSetIteration. * next() routines must ensure the above. They should return without * doing anything when i.position < 0. * It's the responsibility of {init, fini}setIteration to clean up * the reference in i.set, and to ensure that no stale references * live in i.key or i.value if iteration terminates abnormally. * A SetIteration struct has been cleaned up iff i.set is NULL. */ static int initSetIteration(SetIteration *i, PyObject *s, int useValues) { i->set = NULL; i->position = -1; /* set to 0 only on normal return */ i->usesValue = 0; /* assume it's a set or that values aren't iterated */ if (PyObject_IsInstance(s, (PyObject *)&BucketType)) { i->set = s; Py_INCREF(s); if (useValues) { i->usesValue = 1; i->next = nextBucket; } else i->next = nextSet; } else if (PyObject_IsInstance(s, (PyObject *)&SetType)) { i->set = s; Py_INCREF(s); i->next = nextSet; } else if (PyObject_IsInstance(s, (PyObject *)&BTreeType)) { i->set = BTree_rangeSearch(BTREE(s), NULL, NULL, 'i'); UNLESS(i->set) return -1; if (useValues) { i->usesValue = 1; i->next = nextBTreeItems; } else i->next = nextTreeSetItems; } else if (PyObject_IsInstance(s, (PyObject *)&TreeSetType)) { i->set = BTree_rangeSearch(BTREE(s), NULL, NULL, 'k'); UNLESS(i->set) return -1; i->next = nextTreeSetItems; } #ifdef KEY_CHECK else if (KEY_CHECK(s)) { int copied = 1; COPY_KEY_FROM_ARG(i->key, s, copied); UNLESS (copied) return -1; INCREF_KEY(i->key); i->set = s; Py_INCREF(s); i->next = nextKeyAsSet; } #endif else if (!useValues) { /* If we don't need keys and values, we can just use an iterator. */ /* Unfortunately, it can't be just any iterator, it must be sorted for the set algorithms to work. So we must materialize a list and sort it. If this raises a TypeError, let that propagate. */ /* Error detection on types is moved to the next() call. */ /* This is slower, but very convenient. */ PyObject* list = PySequence_List(s); UNLESS(list) return -1; if (PyList_Sort(list) == -1) { Py_DECREF(list); return -1; } /* The reference to the iterater will keep the list alive */ i->set = PyObject_GetIter(list); Py_DECREF(list); UNLESS(i->set) return -1; i->next = nextGenericKeyIter; } else { PyErr_SetString(PyExc_TypeError, "set operation: invalid argument, cannot iterate"); return -1; } i->position = 0; return 0; } #ifndef MERGE_WEIGHT #define MERGE_WEIGHT(O, w) (O) #endif static int copyRemaining(Bucket *r, SetIteration *i, int merge, /* See comment # 42 */ #ifdef MERGE VALUE_TYPE w) #else int w) #endif { while (i->position >= 0) { if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) return -1; COPY_KEY(r->keys[r->len], i->key); INCREF_KEY(r->keys[r->len]); if (merge) { COPY_VALUE(r->values[r->len], MERGE_WEIGHT(i->value, w)); INCREF_VALUE(r->values[r->len]); } r->len++; if (i->next(i) < 0) return -1; } return 0; } /* This is the workhorse for all set merge operations: the weighted and * unweighted flavors of union and intersection, and set difference. The * algorithm is conceptually simple but the code is complicated due to all * the options. * * s1, s2 * The input collections to be merged. * * usevalues1, usevalues2 * Booleans. In the output, should values from s1 (or s2) be used? This * only makes sense when an operation intends to support mapping outputs; * these should both be false for operations that want pure set outputs. * * w1, w2 * If usevalues1(2) are true, these are the weights to apply to the * input values. * * c1 * Boolean. Should keys that appear in c1 but not c2 appear in the output? * c12 * Boolean. Should keys that appear in both inputs appear in the output? * c2 * Boolean. Should keys that appear in c2 but not c1 appear in the output? * * Returns NULL if error, else a Set or Bucket, depending on whether a set or * mapping was requested. */ static PyObject * set_operation(PyObject *s1, PyObject *s2, int usevalues1, int usevalues2, /* Comment # 42 The following ifdef works around a template/type problem Weights are passed as integers. In particular, the weight passed by difference is one. This works fine in the int value and float value cases but makes no sense in the object value case. In the object value case, we don't do merging, so we don't use the weights, so it doesn't matter what they are. */ #ifdef MERGE VALUE_TYPE w1, VALUE_TYPE w2, #else int w1, int w2, #endif int c1, int c12, int c2) { Bucket *r=0; SetIteration i1 = {0,0,0}, i2 = {0,0,0}; int cmp, merge; if (initSetIteration(&i1, s1, usevalues1) < 0) goto err; if (initSetIteration(&i2, s2, usevalues2) < 0) goto err; merge = i1.usesValue | i2.usesValue; if (merge) { #ifndef MERGE if (c12 && i1.usesValue && i2.usesValue) goto invalid_set_operation; #endif if (! i1.usesValue&& i2.usesValue) { SetIteration t; int i; /* See comment # 42 above */ #ifdef MERGE VALUE_TYPE v; #else int v; #endif t=i1; i1=i2; i2=t; i=c1; c1=c2; c2=i; v=w1; w1=w2; w2=v; } #ifdef MERGE_DEFAULT i1.value=MERGE_DEFAULT; i2.value=MERGE_DEFAULT; #else if (i1.usesValue) { if (! i2.usesValue && c2) goto invalid_set_operation; } else { if (c1 || c12) goto invalid_set_operation; } #endif UNLESS(r=BUCKET(PyObject_CallObject(OBJECT(&BucketType), NULL))) goto err; } else { UNLESS(r=BUCKET(PyObject_CallObject(OBJECT(&SetType), NULL))) goto err; } if (i1.next(&i1) < 0) goto err; if (i2.next(&i2) < 0) goto err; while (i1.position >= 0 && i2.position >= 0) { TEST_KEY_SET_OR(cmp, i1.key, i2.key) goto err; if(cmp < 0) { if(c1) { if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) goto err; COPY_KEY(r->keys[r->len], i1.key); INCREF_KEY(r->keys[r->len]); if (merge) { COPY_VALUE(r->values[r->len], MERGE_WEIGHT(i1.value, w1)); INCREF_VALUE(r->values[r->len]); } r->len++; } if (i1.next(&i1) < 0) goto err; } else if(cmp==0) { if(c12) { if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) goto err; COPY_KEY(r->keys[r->len], i1.key); INCREF_KEY(r->keys[r->len]); if (merge) { #ifdef MERGE r->values[r->len] = MERGE(i1.value, w1, i2.value, w2); #else COPY_VALUE(r->values[r->len], i1.value); INCREF_VALUE(r->values[r->len]); #endif } r->len++; } if (i1.next(&i1) < 0) goto err; if (i2.next(&i2) < 0) goto err; } else { if(c2) { if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) goto err; COPY_KEY(r->keys[r->len], i2.key); INCREF_KEY(r->keys[r->len]); if (merge) { COPY_VALUE(r->values[r->len], MERGE_WEIGHT(i2.value, w2)); INCREF_VALUE(r->values[r->len]); } r->len++; } if (i2.next(&i2) < 0) goto err; } } if(c1 && copyRemaining(r, &i1, merge, w1) < 0) goto err; if(c2 && copyRemaining(r, &i2, merge, w2) < 0) goto err; finiSetIteration(&i1); finiSetIteration(&i2); return OBJECT(r); #ifndef MERGE_DEFAULT invalid_set_operation: PyErr_SetString(PyExc_TypeError, "invalid set operation"); #endif err: finiSetIteration(&i1); finiSetIteration(&i2); Py_XDECREF(r); return NULL; } static PyObject * difference_m(PyObject *ignored, PyObject *args) { PyObject *o1, *o2; UNLESS(PyArg_ParseTuple(args, "OO", &o1, &o2)) return NULL; if (o1 == Py_None || o2 == Py_None) { /* difference(None, X) -> None; difference(X, None) -> X */ Py_INCREF(o1); return o1; } return set_operation(o1, o2, 1, 0, /* preserve values from o1, ignore o2's */ 1, 0, /* o1's values multiplied by 1 */ 1, 0, 0); /* take only keys unique to o1 */ } static PyObject * union_m(PyObject *ignored, PyObject *args) { PyObject *o1, *o2; UNLESS(PyArg_ParseTuple(args, "OO", &o1, &o2)) return NULL; if (o1 == Py_None) { Py_INCREF(o2); return o2; } else if (o2 == Py_None) { Py_INCREF(o1); return o1; } return set_operation(o1, o2, 0, 0, /* ignore values in both */ 1, 1, /* the weights are irrelevant */ 1, 1, 1); /* take all keys */ } static PyObject * intersection_m(PyObject *ignored, PyObject *args) { PyObject *o1, *o2; UNLESS(PyArg_ParseTuple(args, "OO", &o1, &o2)) return NULL; if (o1 == Py_None) { Py_INCREF(o2); return o2; } else if (o2 == Py_None) { Py_INCREF(o1); return o1; } return set_operation(o1, o2, 0, 0, /* ignore values in both */ 1, 1, /* the weights are irrelevant */ 0, 1, 0); /* take only keys common to both */ } #ifdef MERGE static PyObject * wunion_m(PyObject *ignored, PyObject *args) { PyObject *o1, *o2; VALUE_TYPE w1 = 1, w2 = 1; UNLESS(PyArg_ParseTuple(args, "OO|" VALUE_PARSE VALUE_PARSE, &o1, &o2, &w1, &w2) ) return NULL; if (o1 == Py_None) return Py_BuildValue(VALUE_PARSE "O", (o2 == Py_None ? 0 : w2), o2); else if (o2 == Py_None) return Py_BuildValue(VALUE_PARSE "O", w1, o1); o1 = set_operation(o1, o2, 1, 1, w1, w2, 1, 1, 1); if (o1) ASSIGN(o1, Py_BuildValue(VALUE_PARSE "O", (VALUE_TYPE)1, o1)); return o1; } static PyObject * wintersection_m(PyObject *ignored, PyObject *args) { PyObject *o1, *o2; VALUE_TYPE w1 = 1, w2 = 1; UNLESS(PyArg_ParseTuple(args, "OO|" VALUE_PARSE VALUE_PARSE, &o1, &o2, &w1, &w2) ) return NULL; if (o1 == Py_None) return Py_BuildValue(VALUE_PARSE "O", (o2 == Py_None ? 0 : w2), o2); else if (o2 == Py_None) return Py_BuildValue(VALUE_PARSE "O", w1, o1); o1 = set_operation(o1, o2, 1, 1, w1, w2, 0, 1, 0); if (o1) ASSIGN(o1, Py_BuildValue(VALUE_PARSE "O", ((o1->ob_type == (PyTypeObject*)(&SetType)) ? w2+w1 : 1), o1)); return o1; } #endif #ifdef MULTI_INT_UNION #include "sorters.c" /* Input is a sequence of integer sets (or convertible to sets by the set iteration protocol). Output is the union of the sets. The point is to run much faster than doing pairs of unions. */ static PyObject * multiunion_m(PyObject *ignored, PyObject *args) { PyObject *seq; /* input sequence */ int n; /* length of input sequence */ PyObject *set = NULL; /* an element of the input sequence */ Bucket *result; /* result set */ SetIteration setiter = {0}; int i; UNLESS(PyArg_ParseTuple(args, "O", &seq)) return NULL; n = PyObject_Length(seq); if (n < 0) return NULL; /* Construct an empty result set. */ result = BUCKET(PyObject_CallObject(OBJECT(&SetType), NULL)); if (result == NULL) return NULL; /* For each set in the input sequence, append its elements to the result set. At this point, we ignore the possibility of duplicates. */ for (i = 0; i < n; ++i) { set = PySequence_GetItem(seq, i); if (set == NULL) goto Error; /* If set is a bucket, do a straight resize + memcpy. */ if (set->ob_type == (PyTypeObject*)&SetType || set->ob_type == (PyTypeObject*)&BucketType) { Bucket *b = BUCKET(set); int status = 0; UNLESS (PER_USE(b)) goto Error; if (b->len) status = bucket_append(result, b, 0, b->len, 0, i < n-1); PER_UNUSE(b); if (status < 0) goto Error; } else { /* No cheap way: iterate over set's elements one at a time. */ if (initSetIteration(&setiter, set, 0) < 0) goto Error; if (setiter.next(&setiter) < 0) goto Error; while (setiter.position >= 0) { if (result->len >= result->size && Bucket_grow(result, -1, 1) < 0) goto Error; COPY_KEY(result->keys[result->len], setiter.key); ++result->len; /* We know the key is an int, so no need to incref it. */ if (setiter.next(&setiter) < 0) goto Error; } finiSetIteration(&setiter); } Py_DECREF(set); set = NULL; } /* Combine, sort, remove duplicates, and reset the result's len. If the set shrinks (which happens if and only if there are duplicates), no point to realloc'ing the set smaller, as we expect the result set to be short-lived. */ if (result->len > 0) { size_t newlen; /* number of elements in final result set */ newlen = sort_int_nodups(result->keys, (size_t)result->len); result->len = (int)newlen; } return (PyObject *)result; Error: Py_DECREF(result); Py_XDECREF(set); finiSetIteration(&setiter); return NULL; } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/SetOpTemplate.h0000644000076500000240000000043114330745562016306 0ustar00jensstaff# ifndef SETOPTEMPLATE_H # define SETOPTEMPLATE_H #include "Python.h" static PyObject * union_m(PyObject *ignored, PyObject *args); static PyObject * intersection_m(PyObject *ignored, PyObject *args); static PyObject * difference_m(PyObject *ignored, PyObject *args); # endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/SetTemplate.c0000644000076500000240000004677414355020716016020 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #include "_compat.h" #define SETTEMPLATE_C "$Id$\n" static PyObject * Set_insert(Bucket *self, PyObject *args) { PyObject *key; int i; UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL; if ( (i=_bucket_set(self, key, Py_None, 1, 1, 0)) < 0) return NULL; return INT_FROM_LONG(i); } /* _Set_update and _TreeSet_update are identical except for the function they call to add the element to the set. */ static int _Set_update(Bucket *self, PyObject *seq) { int n=0, ind=0; PyObject *iter, *v; iter = PyObject_GetIter(seq); if (iter == NULL) return -1; while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) goto err; else break; } ind = _bucket_set(self, v, Py_None, 1, 1, 0); Py_DECREF(v); if (ind < 0) goto err; else n += ind; } err: Py_DECREF(iter); if (ind < 0) return -1; return n; } static PyObject * Set_update(Bucket *self, PyObject *args) { PyObject *seq = NULL; int n = 0; if (!PyArg_ParseTuple(args, "|O:update", &seq)) return NULL; if (seq) { n = _Set_update(self, seq); if (n < 0) return NULL; } return INT_FROM_LONG(n); } static PyObject * Set_remove(Bucket *self, PyObject *args) { PyObject *key; UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL; if (_bucket_set(self, key, NULL, 0, 1, 0) < 0) return NULL; Py_INCREF(Py_None); return Py_None; } static PyObject* Set_discard(Bucket* self, PyObject* args) { PyObject *key; UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL; if (_bucket_set(self, key, NULL, 0, 1, 0) < 0) { if (BTree_ShouldSuppressKeyError()) { PyErr_Clear(); } else if (PyErr_ExceptionMatches(PyExc_TypeError)) { /* Failed to compare, so it can't be in the tree. */ PyErr_Clear(); } else { return NULL; } } Py_INCREF(Py_None); return Py_None; } static PyObject* Set_pop(Bucket* self, PyObject* args) { PyObject* result = NULL; PyObject* key = NULL; PyObject* remove_args = NULL; PyObject* remove_result = NULL; if (PyTuple_Size(args) != 0) { PyErr_SetString(PyExc_TypeError, "pop(): Takes no arguments."); return NULL; } key = Bucket_minKey(self, args); /* reuse existing empty tuple */ if (!key) { PyErr_Clear(); PyErr_SetString(PyExc_KeyError, "pop(): empty bucket."); return NULL; } remove_args = PyTuple_Pack(1, key); if (remove_args) { remove_result = Set_remove(self, remove_args); Py_DECREF(remove_args); if (remove_result) { Py_INCREF(key); result = key; Py_DECREF(remove_result); } } return result; } static PyObject* Set_isdisjoint(Bucket* self, PyObject* other) { PyObject* iter = NULL; PyObject* result = NULL; PyObject* v = NULL; int contained = 0; if (other == (PyObject*)self) { if (self->len == 0) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } iter = PyObject_GetIter(other); if (iter == NULL) { return NULL; } while (1) { if (result != NULL) { break; } v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } contained = bucket_contains(self, v); if (contained == -1) { goto err; } if (contained == 1) { result = Py_False; } Py_DECREF(v); } if (result == NULL) { result = Py_True; } Py_INCREF(result); err: Py_DECREF(iter); return result; } static int _set_setstate(Bucket *self, PyObject *args) { PyObject *k, *items; Bucket *next=0; int i, l, copied=1; KEY_TYPE *keys; UNLESS (PyArg_ParseTuple(args, "O|O", &items, &next)) return -1; if (!PyTuple_Check(items)) { PyErr_SetString(PyExc_TypeError, "tuple required for first state element"); return -1; } if ((l=PyTuple_Size(items)) < 0) return -1; for (i=self->len; --i >= 0; ) { DECREF_KEY(self->keys[i]); } self->len=0; if (self->next) { Py_DECREF(self->next); self->next=0; } if (l > self->size) { UNLESS (keys=BTree_Realloc(self->keys, sizeof(KEY_TYPE)*l)) return -1; self->keys=keys; self->size=l; } for (i=0; ikeys[i], k, copied); UNLESS (copied) return -1; INCREF_KEY(self->keys[i]); } self->len=l; if (next) { self->next=next; Py_INCREF(next); } return 0; } static PyObject * set_setstate(Bucket *self, PyObject *args) { int r; UNLESS (PyArg_ParseTuple(args, "O", &args)) return NULL; PER_PREVENT_DEACTIVATION(self); r=_set_setstate(self, args); PER_UNUSE(self); if (r < 0) return NULL; Py_INCREF(Py_None); return Py_None; } static struct PyMethodDef Set_methods[] = { {"__getstate__", (PyCFunction) bucket_getstate, METH_VARARGS, "__getstate__()\nReturn the picklable state of the object"}, {"__setstate__", (PyCFunction) set_setstate, METH_VARARGS, "__setstate__()\nSet the state of the object"}, {"keys", (PyCFunction) bucket_keys, METH_VARARGS | METH_KEYWORDS, "keys()\nReturn the keys"}, {"has_key", (PyCFunction) bucket_has_key, METH_O, "has_key(key)\nTest whether the bucket contains the given key"}, {"clear", (PyCFunction) bucket_clear, METH_VARARGS, "clear()\nRemove all of the items from the bucket"}, {"maxKey", (PyCFunction) Bucket_maxKey, METH_VARARGS, "maxKey([key])\nFind the maximum key\n\n" "If an argument is given, find the maximum <= the argument"}, {"minKey", (PyCFunction) Bucket_minKey, METH_VARARGS, "minKey([key])\nFind the minimum key\n\n" "If an argument is given, find the minimum >= the argument"}, #ifdef PERSISTENT {"_p_resolveConflict", (PyCFunction) bucket__p_resolveConflict, METH_VARARGS, "_p_resolveConflict()\nReinitialize from a newly created copy"}, {"_p_deactivate", (PyCFunction) bucket__p_deactivate, METH_VARARGS | METH_KEYWORDS, "_p_deactivate()\nReinitialize from a newly created copy"}, #endif {"add", (PyCFunction)Set_insert, METH_VARARGS, "add(id)\nAdd a key to the set"}, {"insert", (PyCFunction)Set_insert, METH_VARARGS, "insert(id)\nAdd a key to the set"}, {"update", (PyCFunction)Set_update, METH_VARARGS, "update(seq)\nAdd the items from the given sequence to the set"}, {"remove", (PyCFunction)Set_remove, METH_VARARGS, "remove(id)\nRemove an id from the set"}, {"discard", (PyCFunction)Set_discard, METH_VARARGS, "Remove an element from a set if it is a member.\n\n" "If the element is not a member, do nothing."}, {"isdisjoint", (PyCFunction)Set_isdisjoint, METH_O, "Return True if two sets have a null intersection."}, {"pop", (PyCFunction)Set_pop, METH_VARARGS, "Remove and return an arbitrary item."}, {NULL, NULL} /* sentinel */ }; static int Set_init(PyObject *self, PyObject *args, PyObject *kwds) { PyObject *v = NULL; if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "Set", &v)) return -1; if (v) return _Set_update((Bucket *)self, v); else return 0; } static PyObject * set_repr(Bucket *self) { static PyObject *format; PyObject *r, *t; if (!format) format = TEXT_FROM_STRING(MOD_NAME_PREFIX "Set(%s)"); UNLESS (t = PyTuple_New(1)) return NULL; UNLESS (r = bucket_keys(self, NULL, NULL)) goto err; PyTuple_SET_ITEM(t, 0, r); r = t; ASSIGN(r, TEXT_FORMAT(format, r)); return r; err: Py_DECREF(t); return NULL; } static Py_ssize_t set_length(Bucket *self) { int r; PER_USE_OR_RETURN(self, -1); r = self->len; PER_UNUSE(self); return r; } static PyObject * set_item(Bucket *self, Py_ssize_t index) { PyObject *r=0; PER_USE_OR_RETURN(self, NULL); if (index >= 0 && index < self->len) { COPY_KEY_TO_OBJECT(r, self->keys[index]); } else IndexError(index); PER_UNUSE(self); return r; } /* * In-place operators. * The implementation is identical with TreeSet, with the only * differences being the calls to insert/remove items and clear * the object. * * This implementation is naive and matches the Python versions, accepting * nearly any iterable. */ static PyObject* set_isub(Bucket* self, PyObject* other) { PyObject* iter = NULL; PyObject* result = NULL; PyObject* v = NULL; if (other == (PyObject*)self) { v = bucket_clear(self, NULL); if (v == NULL) { goto err; } Py_DECREF(v); } else { iter = PyObject_GetIter(other); if (iter == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } if (_bucket_set(self, v, NULL, 0, 1, 0) < 0) { if (BTree_ShouldSuppressKeyError()) { PyErr_Clear(); } else { Py_DECREF(v); goto err; } } Py_DECREF(v); } } Py_INCREF(self); result = (PyObject*)self; err: Py_XDECREF(iter); return result; } static PyObject* set_ior(Bucket* self, PyObject* other) { PyObject* update_args = NULL; PyObject* result = NULL; update_args = PyTuple_Pack(1, other); if (!update_args) { return NULL; } result = Set_update(self, update_args); Py_DECREF(update_args); if (!result) { return NULL; } Py_DECREF(result); Py_INCREF(self); return (PyObject*)self; } static PyObject* set_ixor(Bucket* self, PyObject* other) { PyObject* iter = NULL; PyObject* result = NULL; PyObject* v = NULL; int contained = 0; if (other == (PyObject*)self) { v = bucket_clear(self, NULL); if (v == NULL) { goto err; } Py_DECREF(v); } else { iter = PyObject_GetIter(other); if (iter == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } /* contained is also used as an error flag for the removal/addition */ contained = bucket_contains(self, v); if (contained != -1) { /* If not present (contained == 0), add it, otherwise remove it. */ contained = _bucket_set(self, v, contained == 0 ? Py_None : NULL, contained == 0 ? 1 : 0, 1, 0); } Py_DECREF(v); if (contained < 0) { goto err; } } } Py_INCREF(self); result = (PyObject*)self; err: Py_XDECREF(iter); return result; } static PyObject* Generic_set_xor(PyObject* self, PyObject* other) { PyObject* set_self = NULL; PyObject* set_other = NULL; PyObject* set_xor = NULL; PyObject* result = NULL; set_self = PySet_New(self); set_other = PySet_New(other); if (set_self == NULL || set_other == NULL) { goto err; } set_xor = PyNumber_Xor(set_self, set_other); if (set_xor == NULL) { goto err; } result = PyObject_CallFunctionObjArgs((PyObject*)Py_TYPE(self), set_xor, NULL); err: Py_XDECREF(set_self); Py_XDECREF(set_other); Py_XDECREF(set_xor); return result; } static PyObject* set_iand(Bucket* self, PyObject* other) { PyObject* iter = NULL; PyObject* v = NULL; PyObject* result = NULL; PyObject* tmp_list = NULL; int contained = 0; tmp_list = PyList_New(0); if (tmp_list == NULL) { return NULL; } iter = PyObject_GetIter(other); if (iter == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } contained = bucket_contains(self, v); if (contained == 1) { /* Yay, we had it and get to keep it. */ if (PyList_Append(tmp_list, v) < 0) { Py_DECREF(v); goto err; } } /* Done with the sequence value now Either it was already in the set, which is fine, or there was an error. */ Py_DECREF(v); if (contained == -1) { goto err; } } /* Replace our contents with the list of keys we built. */ v = bucket_clear(self, NULL); if (v == NULL) { goto err; } Py_DECREF(v); if (_Set_update(self, tmp_list) < 0) { goto err; } Py_INCREF(self); result = (PyObject*)self; err: Py_DECREF(iter); Py_DECREF(tmp_list); return result; } static PySequenceMethods set_as_sequence = { (lenfunc)set_length, /* sq_length */ (binaryfunc)0, /* sq_concat */ (ssizeargfunc)0, /* sq_repeat */ (ssizeargfunc)set_item, /* sq_item */ (ssizessizeargfunc)0, /* sq_slice */ (ssizeobjargproc)0, /* sq_ass_item */ (ssizessizeobjargproc)0, /* sq_ass_slice */ (objobjproc)bucket_contains, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; static PyNumberMethods set_as_number = { (binaryfunc)0, /* nb_add */ bucket_sub, /* nb_subtract */ (binaryfunc)0, /* nb_multiply */ (binaryfunc)0, /* nb_remainder */ (binaryfunc)0, /* nb_divmod */ (ternaryfunc)0, /* nb_power */ (unaryfunc)0, /* nb_negative */ (unaryfunc)0, /* nb_positive */ (unaryfunc)0, /* nb_absolute */ (inquiry)0, /* nb_bool */ (unaryfunc)0, /* nb_invert */ (binaryfunc)0, /* nb_lshift */ (binaryfunc)0, /* nb_rshift */ bucket_and, /* nb_and */ (binaryfunc)Generic_set_xor, /* nb_xor */ bucket_or, /* nb_or */ 0, /*nb_int*/ 0, /*nb_reserved*/ 0, /*nb_float*/ 0, /*nb_inplace_add*/ (binaryfunc)set_isub, /*nb_inplace_subtract*/ 0, /*nb_inplace_multiply*/ 0, /*nb_inplace_remainder*/ 0, /*nb_inplace_power*/ 0, /*nb_inplace_lshift*/ 0, /*nb_inplace_rshift*/ (binaryfunc)set_iand, /*nb_inplace_and*/ (binaryfunc)set_ixor, /*nb_inplace_xor*/ (binaryfunc)set_ior, /*nb_inplace_or*/ }; static PyTypeObject SetType = { PyVarObject_HEAD_INIT(NULL, 0) /* PyPersist_Type */ MODULE_NAME MOD_NAME_PREFIX "Set", /* tp_name */ sizeof(Bucket), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)bucket_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ (reprfunc)set_repr, /* tp_repr */ &set_as_number, /* tp_as_number */ &set_as_sequence, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ 0, /* tp_doc */ (traverseproc)bucket_traverse, /* tp_traverse */ (inquiry)bucket_tp_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)Bucket_getiter, /* tp_iter */ 0, /* tp_iternext */ Set_methods, /* tp_methods */ Bucket_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ Set_init, /* tp_init */ 0, /* tp_alloc */ 0, /*PyType_GenericNew,*/ /* tp_new */ }; static int nextSet(SetIteration *i) { if (i->position >= 0) { UNLESS(PER_USE(BUCKET(i->set))) return -1; if (i->position) { DECREF_KEY(i->key); } if (i->position < BUCKET(i->set)->len) { COPY_KEY(i->key, BUCKET(i->set)->keys[i->position]); INCREF_KEY(i->key); i->position ++; } else { i->position = -1; PER_ACCESSED(BUCKET(i->set)); } PER_ALLOW_DEACTIVATION(BUCKET(i->set)); } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/TreeSetTemplate.c0000644000076500000240000004257414355020716016632 0ustar00jensstaff/***************************************************************************** Copyright (c) 2001, 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ #include "_compat.h" #define TREESETTEMPLATE_C "$Id$\n" static PyObject * TreeSet_insert(BTree *self, PyObject *args) { PyObject *key; int i; if (!PyArg_ParseTuple(args, "O:insert", &key)) return NULL; i = _BTree_set(self, key, Py_None, 1, 1); if (i < 0) return NULL; return INT_FROM_LONG(i); } /* _Set_update and _TreeSet_update are identical except for the function they call to add the element to the set. */ static int _TreeSet_update(BTree *self, PyObject *seq) { int n=0, ind=0; PyObject *iter, *v; iter = PyObject_GetIter(seq); if (iter == NULL) return -1; while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) goto err; else break; } ind = _BTree_set(self, v, Py_None, 1, 1); Py_DECREF(v); if (ind < 0) goto err; else n += ind; } err: Py_DECREF(iter); if (ind < 0) return -1; return n; } static PyObject * TreeSet_update(BTree *self, PyObject *args) { PyObject *seq = NULL; int n = 0; if (!PyArg_ParseTuple(args, "|O:update", &seq)) return NULL; if (seq) { n = _TreeSet_update(self, seq); if (n < 0) return NULL; } return INT_FROM_LONG(n); } static PyObject * TreeSet_remove(BTree *self, PyObject *args) { PyObject *key; UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL; if (_BTree_set(self, key, NULL, 0, 1) < 0) return NULL; Py_INCREF(Py_None); return Py_None; } static PyObject * TreeSet_discard(BTree *self, PyObject *args) { PyObject *key; UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL; if (_BTree_set(self, key, NULL, 0, 1) < 0) { if (BTree_ShouldSuppressKeyError()) { PyErr_Clear(); } else if (PyErr_ExceptionMatches(PyExc_TypeError)) { /* Failed to compare, so it can't be in the tree. */ PyErr_Clear(); } else { return NULL; } } Py_INCREF(Py_None); return Py_None; } static PyObject* TreeSet_pop(BTree* self, PyObject* args) { PyObject* result = NULL; PyObject* key = NULL; PyObject* remove_args = NULL; PyObject* remove_result = NULL; if (PyTuple_Size(args) != 0) { PyErr_SetString(PyExc_TypeError, "pop(): Takes no arguments."); return NULL; } key = BTree_minKey(self, args); /* reuse existing empty tuple */ if (!key) { PyErr_Clear(); PyErr_SetString(PyExc_KeyError, "pop(): empty tree."); return NULL; } remove_args = PyTuple_Pack(1, key); if (remove_args) { remove_result = TreeSet_remove(self, remove_args); Py_DECREF(remove_args); if (remove_result) { Py_INCREF(key); result = key; Py_DECREF(remove_result); } } return result; } static PyObject* TreeSet_isdisjoint(BTree* self, PyObject* other) { PyObject* iter = NULL; PyObject* result = NULL; PyObject* v = NULL; int contained = 0; if (other == (PyObject*)self) { if (self->len == 0) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } iter = PyObject_GetIter(other); if (iter == NULL) { return NULL; } while (1) { if (result != NULL) { break; } v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } contained = BTree_contains(self, v); if (contained == -1) { goto err; } if (contained == 1) { result = Py_False; } Py_DECREF(v); } if (result == NULL) { result = Py_True; } Py_INCREF(result); err: Py_DECREF(iter); return result; } static PyObject * TreeSet_setstate(BTree *self, PyObject *args) { int r; if (!PyArg_ParseTuple(args,"O",&args)) return NULL; PER_PREVENT_DEACTIVATION(self); r=_BTree_setstate(self, args, 1); PER_UNUSE(self); if (r < 0) return NULL; Py_INCREF(Py_None); return Py_None; } static struct PyMethodDef TreeSet_methods[] = { {"__getstate__", (PyCFunction) BTree_getstate, METH_NOARGS, "__getstate__() -> state\n\n" "Return the picklable state of the TreeSet."}, {"__setstate__", (PyCFunction) TreeSet_setstate, METH_VARARGS, "__setstate__(state)\n\n" "Set the state of the TreeSet."}, {"has_key", (PyCFunction) BTree_has_key, METH_O, "has_key(key)\n\n" "Return true if the TreeSet contains the given key."}, {"keys", (PyCFunction) BTree_keys, METH_VARARGS | METH_KEYWORDS, "keys([min, max]) -> list of keys\n\n" "Returns the keys of the TreeSet. If min and max are supplied, only\n" "keys greater than min and less than max are returned."}, {"maxKey", (PyCFunction) BTree_maxKey, METH_VARARGS, "maxKey([max]) -> key\n\n" "Return the largest key in the BTree. If max is specified, return\n" "the largest key <= max."}, {"minKey", (PyCFunction) BTree_minKey, METH_VARARGS, "minKey([mi]) -> key\n\n" "Return the smallest key in the BTree. If min is specified, return\n" "the smallest key >= min."}, {"clear", (PyCFunction) BTree_clear, METH_NOARGS, "clear()\n\nRemove all of the items from the BTree."}, {"add", (PyCFunction)TreeSet_insert, METH_VARARGS, "add(id) -- Add an item to the set"}, {"insert", (PyCFunction)TreeSet_insert, METH_VARARGS, "insert(id) -- Add an item to the set"}, {"update", (PyCFunction)TreeSet_update, METH_VARARGS, "update(collection)\n\n Add the items from the given collection."}, {"remove", (PyCFunction)TreeSet_remove, METH_VARARGS, "remove(id) -- Remove a key from the set"}, {"discard", (PyCFunction)TreeSet_discard, METH_VARARGS, "Remove an element from a set if it is a member.\n\n" "If the element is not a member, do nothing."}, {"pop", (PyCFunction)TreeSet_pop, METH_VARARGS, "pop() -- Remove and return a key from the set"}, {"_check", (PyCFunction) BTree_check, METH_NOARGS, "Perform sanity check on TreeSet, and raise exception if flawed."}, #ifdef PERSISTENT {"_p_resolveConflict", (PyCFunction) BTree__p_resolveConflict, METH_VARARGS, "_p_resolveConflict() -- Reinitialize from a newly created copy"}, {"_p_deactivate", (PyCFunction) BTree__p_deactivate, METH_VARARGS | METH_KEYWORDS, "_p_deactivate()\n\nReinitialize from a newly created copy."}, #endif {"isdisjoint", (PyCFunction)TreeSet_isdisjoint, METH_O, "Return True if two sets have a null intersection."}, {NULL, NULL} /* sentinel */ }; static PyMappingMethods TreeSet_as_mapping = { (lenfunc)BTree_length, /*mp_length*/ }; static PySequenceMethods TreeSet_as_sequence = { (lenfunc)0, /* sq_length */ (binaryfunc)0, /* sq_concat */ (ssizeargfunc)0, /* sq_repeat */ (ssizeargfunc)0, /* sq_item */ (ssizessizeargfunc)0, /* sq_slice */ (ssizeobjargproc)0, /* sq_ass_item */ (ssizessizeobjargproc)0, /* sq_ass_slice */ (objobjproc)BTree_contains, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; static int TreeSet_init(PyObject *self, PyObject *args, PyObject *kwds) { PyObject *v = NULL; if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "TreeSet", &v)) return -1; if (v) return _TreeSet_update((BTree *)self, v); else return 0; } /* * In-place operators. * The implementation is identical with Set, with the only * differences being the calls to insert/remove items and clear * the object. * * This implementation is naive and matches the Python versions, accepting * nearly any iterable. */ static PyObject* TreeSet_isub(BTree* self, PyObject* other) { PyObject* iter = NULL; PyObject* result = NULL; PyObject* v = NULL; if (other == (PyObject*)self) { v = BTree_clear(self); if (v == NULL) { goto err; } Py_DECREF(v); } else { iter = PyObject_GetIter(other); if (iter == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } if (_BTree_set(self, v, NULL, 0, 1) < 0) { if (BTree_ShouldSuppressKeyError()) { PyErr_Clear(); } else { Py_DECREF(v); goto err; } } Py_DECREF(v); } } Py_INCREF(self); result = (PyObject*)self; err: Py_XDECREF(iter); return result; } static PyObject* TreeSet_ior(BTree* self, PyObject* other) { PyObject* update_args = NULL; PyObject* result = NULL; update_args = PyTuple_Pack(1, other); if (!update_args) { return NULL; } result = TreeSet_update(self, update_args); Py_DECREF(update_args); if (!result) { return NULL; } Py_DECREF(result); Py_INCREF(self); return (PyObject*)self; } static PyObject* TreeSet_ixor(BTree* self, PyObject* other) { PyObject* iter = NULL; PyObject* result = NULL; PyObject* v = NULL; int contained = 0; if (other == (PyObject*)self) { v = BTree_clear(self); if (v == NULL) { goto err; } Py_DECREF(v); } else { iter = PyObject_GetIter(other); if (iter == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } /* contained is also used as an error flag for the removal/addition */ contained = BTree_contains(self, v); if (contained != -1) { /* If not present (contained == 0), add it, otherwise remove it. */ contained = _BTree_set(self, v, contained == 0 ? Py_None : NULL, contained == 0 ? 1 : 0, 1); } Py_DECREF(v); if (contained < 0) { goto err; } } } Py_INCREF(self); result = (PyObject*)self; err: Py_XDECREF(iter); return result; } static PyObject* TreeSet_iand(BTree* self, PyObject* other) { PyObject* iter = NULL; PyObject* v = NULL; PyObject* result = NULL; PyObject* tmp_list = NULL; int contained = 0; tmp_list = PyList_New(0); if (tmp_list == NULL) { return NULL; } iter = PyObject_GetIter(other); if (iter == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } while (1) { v = PyIter_Next(iter); if (v == NULL) { if (PyErr_Occurred()) { goto err; } else { break; } } contained = BTree_contains(self, v); if (contained == 1) { /* Yay, we had it and get to keep it. */ if (PyList_Append(tmp_list, v) < 0) { Py_DECREF(v); goto err; } } /* Done with the sequence value now Either it was already in the set, which is fine, or there was an error. */ Py_DECREF(v); if (contained == -1) { goto err; } } /* Replace our contents with the list of keys we built. */ v = BTree_clear(self); if (v == NULL) { goto err; } Py_DECREF(v); if (_TreeSet_update(self, tmp_list) < 0) { goto err; } Py_INCREF(self); result = (PyObject*)self; err: Py_DECREF(iter); Py_DECREF(tmp_list); return result; } static PyNumberMethods TreeSet_as_number = { 0, /* nb_add */ bucket_sub, /* nb_subtract */ 0, /* nb_multiply */ 0, /* nb_remainder */ 0, /* nb_divmod */ 0, /* nb_power */ 0, /* nb_negative */ 0, /* nb_positive */ 0, /* nb_absolute */ (inquiry)BTree_nonzero, /* nb_nonzero */ (unaryfunc)0, /* nb_invert */ (binaryfunc)0, /* nb_lshift */ (binaryfunc)0, /* nb_rshift */ bucket_and, /* nb_and */ (binaryfunc)Generic_set_xor, /* nb_xor */ bucket_or, /* nb_or */ 0, /*nb_int*/ 0, /*nb_reserved*/ 0, /*nb_float*/ 0, /*nb_inplace_add*/ (binaryfunc)TreeSet_isub, /*nb_inplace_subtract*/ 0, /*nb_inplace_multiply*/ 0, /*nb_inplace_remainder*/ 0, /*nb_inplace_power*/ 0, /*nb_inplace_lshift*/ 0, /*nb_inplace_rshift*/ (binaryfunc)TreeSet_iand, /*nb_inplace_and*/ (binaryfunc)TreeSet_ixor, /*nb_inplace_xor*/ (binaryfunc)TreeSet_ior, /*nb_inplace_or*/ }; static PyTypeObject TreeSetType = { PyVarObject_HEAD_INIT(NULL, 0) MODULE_NAME MOD_NAME_PREFIX "TreeSet", /* tp_name */ sizeof(BTree), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)BTree_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ &TreeSet_as_number, /* tp_as_number */ &TreeSet_as_sequence, /* tp_as_sequence */ &TreeSet_as_mapping, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ 0, /* tp_doc */ (traverseproc)BTree_traverse, /* tp_traverse */ (inquiry)BTree_tp_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)BTree_getiter, /* tp_iter */ 0, /* tp_iternext */ TreeSet_methods, /* tp_methods */ BTree_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ TreeSet_init, /* tp_init */ 0, /* tp_alloc */ 0, /*PyType_GenericNew,*/ /* tp_new */ }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_IFBTree.c0000644000076500000240000000176214355020716015134 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IFBTree - int key, float value BTree Implements a collection using int type keys and float type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "IF" #include "_compat.h" #include "intkeymacros.h" #include "floatvaluemacros.h" #define INITMODULE PyInit__IFBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_IIBTree.c0000644000076500000240000000175414355020716015140 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IIBTree - int key, int value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "II" #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__IIBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_IOBTree.c0000644000076500000240000000173014355020716015140 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IOBTree - int key, object value BTree Implements a collection using int type keys and object type values */ #define PERSISTENT #define MOD_NAME_PREFIX "IO" #include "_compat.h" #include "intkeymacros.h" #include "objectvaluemacros.h" #define INITMODULE PyInit__IOBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_IUBTree.c0000644000076500000240000000202714355020716015146 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IIBTree - int32_t key, uint32_t value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "IU" #define ZODB_UNSIGNED_VALUE_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__IUBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_LFBTree.c0000644000076500000240000000207214355020716015132 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IFBTree.c 67074 2006-04-17 19:13:39Z fdrake $\n" /* IFBTree - int key, float value BTree Implements a collection using int type keys and float type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "LF" #define ZODB_64BIT_INTS #include "_compat.h" #include "intkeymacros.h" #include "floatvaluemacros.h" #define INITMODULE PyInit__LFBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_LLBTree.c0000644000076500000240000000206114355020716015136 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IIBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* IIBTree - int key, int value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "LL" #define ZODB_64BIT_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__LLBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_LOBTree.c0000644000076500000240000000203514355020716015142 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IOBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* IOBTree - int key, object value BTree Implements a collection using int type keys and object type values */ #define PERSISTENT #define MOD_NAME_PREFIX "LO" #define ZODB_64BIT_INTS #include "_compat.h" #include "intkeymacros.h" #include "objectvaluemacros.h" #define INITMODULE PyInit__LOBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_LQBTree.c0000644000076500000240000000213314355020716015143 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IIBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* IIBTree - int64_t key, uint64_t value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "LQ" #define ZODB_64BIT_INTS #define ZODB_UNSIGNED_VALUE_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__LQBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_OIBTree.c0000644000076500000240000000173014355020716015140 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* OIBTree - object key, int value BTree Implements a collection using object type keys and int type values */ #define PERSISTENT #define MOD_NAME_PREFIX "OI" #include "_compat.h" #include "objectkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__OIBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_OLBTree.c0000644000076500000240000000203514355020716015142 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _OIBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* OIBTree - object key, int value BTree Implements a collection using object type keys and int type values */ #define PERSISTENT #define MOD_NAME_PREFIX "OL" #define ZODB_64BIT_INTS #include "_compat.h" #include "objectkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__OLBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_OOBTree.c0000644000076500000240000000174114355020716015150 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* OOBTree - object key, object value BTree Implements a collection using object type keys and object type values */ #define PERSISTENT #define MOD_NAME_PREFIX "OO" #include "_compat.h" #include "objectkeymacros.h" #include "objectvaluemacros.h" #define INITMODULE PyInit__OOBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_OQBTree.c0000644000076500000240000000210314355020716015143 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _OIBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* OQBTree - object key, uint64_t value BTree Implements a collection using object type keys and int type values */ #define PERSISTENT #define MOD_NAME_PREFIX "OQ" #define ZODB_64BIT_INTS #define ZODB_UNSIGNED_VALUE_INTS #include "_compat.h" #include "objectkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__OQBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_OUBTree.c0000644000076500000240000000177714355020716015167 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* OUBTree - object key, uint32_t value BTree Implements a collection using object type keys and int type values */ #define PERSISTENT #define MOD_NAME_PREFIX "OU" #define ZODB_UNSIGNED_VALUE_INTS #include "_compat.h" #include "objectkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__OUBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_QFBTree.c0000644000076500000240000000213614355020716015140 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IFBTree.c 67074 2006-04-17 19:13:39Z fdrake $\n" /* QFBTree - uint64_t key, float value BTree Implements a collection using int type keys and float type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "QF" #define ZODB_64BIT_INTS #define ZODB_UNSIGNED_KEY_INTS #include "_compat.h" #include "intkeymacros.h" #include "floatvaluemacros.h" #define INITMODULE PyInit__QFBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_QLBTree.c0000644000076500000240000000213114355020716015141 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IIBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* QLBTree - uint64_t key, int64_t value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "QL" #define ZODB_64BIT_INTS #define ZODB_UNSIGNED_KEY_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__QLBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_QOBTree.c0000644000076500000240000000210114355020716015141 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IOBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* QOBTree - uint64_t key, object value BTree Implements a collection using int type keys and object type values */ #define PERSISTENT #define MOD_NAME_PREFIX "QO" #define ZODB_64BIT_INTS #define ZODB_UNSIGNED_KEY_INTS #include "_compat.h" #include "intkeymacros.h" #include "objectvaluemacros.h" #define INITMODULE PyInit__QOBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_QQBTree.c0000644000076500000240000000217314355020716015154 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id: _IIBTree.c 25186 2004-06-02 15:07:33Z jim $\n" /* QQBTree - uint64_t key, uint64_t value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "QQ" #define ZODB_64BIT_INTS #define ZODB_UNSIGNED_KEY_INTS #define ZODB_UNSIGNED_VALUE_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__QQBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_UFBTree.c0000644000076500000240000000203214355020716015137 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IFBTree - unsigned int key, float value BTree Implements a collection using int type keys and float type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "UF" #define ZODB_UNSIGNED_KEY_INTS #include "_compat.h" #include "intkeymacros.h" #include "floatvaluemacros.h" #define INITMODULE PyInit__UFBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_UIBTree.c0000644000076500000240000000202414355020716015143 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IIBTree - unsigned int key, int value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "UI" #define ZODB_UNSIGNED_KEY_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__UIBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_UOBTree.c0000644000076500000240000000201014355020716015144 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* UOBTree - unsigned int key, object value BTree Implements a collection using unsigned int type keys and object type values */ #define PERSISTENT #define MOD_NAME_PREFIX "UO" #define ZODB_UNSIGNED_KEY_INTS #include "_compat.h" #include "intkeymacros.h" #include "objectvaluemacros.h" #define INITMODULE PyInit__UOBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_UUBTree.c0000644000076500000240000000207614355020716015166 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* IIBTree - unsigned int key, unsigned int value BTree Implements a collection using int type keys and int type values */ /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "UU" #define ZODB_UNSIGNED_KEY_INTS #define ZODB_UNSIGNED_VALUE_INTS #include "_compat.h" #include "intkeymacros.h" #include "intvaluemacros.h" #define INITMODULE PyInit__UUBTree #include "BTreeModuleTemplate.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/__init__.py0000644000076500000240000001015214626022106015507 0ustar00jensstaff############################################################################# # # Copyright (c) 2007 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################# import sys import zope.interface import BTrees.Interfaces from ._module_builder import create_module __all__ = [ 'family32', 'family64', ] _FAMILIES = ( # Signed 32-bit keys "IO", # object value "II", # self value "IF", # float value "IU", # opposite sign value # Unsigned 32-bit keys "UO", # object value "UU", # self value "UF", # float value "UI", # opposite sign value # Signed 64-bit keys "LO", # object value "LL", # self value "LF", # float value "LQ", # opposite sign value # Unsigned 64-bit keys "QO", # object value "QQ", # self value "QF", # float value "QL", # opposite sign value # Object keys "OO", # object "OI", # 32-bit signed "OU", # 32-bit unsigned "OL", # 64-bit signed "OQ", # 64-bit unsigned # Special purpose 'fs', # 2-byte -> 6-byte ) # XXX: Do this without completely ruining static analysis. for family in _FAMILIES: mod = create_module(family) name = vars(mod)['__name__'] sys.modules[name] = mod globals()[name.split('.', 1)[1]] = mod __all__.append(name) @zope.interface.implementer(BTrees.Interfaces.IBTreeFamily) class _Family: from BTrees import OOBTree as OO _BITSIZE = 0 minint = maxint = maxuint = None def __init__(self): self.maxint = int(2 ** (self._BITSIZE - 1) - 1) self.minint = int(-self.maxint - 1) self.maxuint = int(2 ** self._BITSIZE - 1) def __str__(self): return ( "BTree family using {} bits. " "Supports signed integer values from {:,} to {:,} " "and maximum unsigned integer value {:,}." ).format(self._BITSIZE, self.minint, self.maxint, self.maxuint) def __repr__(self): return "<%s>" % ( self ) class _Family32(_Family): _BITSIZE = 32 from BTrees import IFBTree as IF from BTrees import IIBTree as II from BTrees import IOBTree as IO from BTrees import IUBTree as IU from BTrees import OIBTree as OI from BTrees import OUBTree as OU from BTrees import UFBTree as UF from BTrees import UIBTree as UI from BTrees import UOBTree as UO from BTrees import UUBTree as UU def __reduce__(self): return _family32, () class _Family64(_Family): _BITSIZE = 64 from BTrees import LFBTree as IF from BTrees import LLBTree as II from BTrees import LOBTree as IO from BTrees import LQBTree as IU from BTrees import OLBTree as OI from BTrees import OQBTree as OU from BTrees import QFBTree as UF from BTrees import QLBTree as UI from BTrees import QOBTree as UO from BTrees import QQBTree as UU def __reduce__(self): return _family64, () def _family32(): return family32 _family32.__safe_for_unpickling__ = True # noqa E305 def _family64(): return family64 _family64.__safe_for_unpickling__ = True # noqa E305 #: 32-bit BTree family. family32 = _Family32() #: 64-bit BTree family. family64 = _Family64() for _family in family32, family64: for _mod_name in ( "OI", "OU", 'IO', "II", "IF", "IU", "UO", "UU", "UF", "UI", ): getattr(_family, _mod_name).family = _family # The IMergeBTreeModule interface specifies the ``family`` attribute, # and fsBTree implements IIntegerObjectBTreeModule, which extends that # interface. But for fsBTrees, no family makes particular sense, so we # arbitrarily pick one. globals()['fsBTree'].family = family64 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/_base.py0000644000076500000240000014613614626022106015035 0ustar00jensstaff############################################################################## # # Copyright 2011 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Python BTree implementation """ from persistent import Persistent from ._compat import compare from .Interfaces import BTreesConflictError _marker = object() class _Base(Persistent): __slots__ = () # This is used to allocate storage for the keys. # It's probably here so that we could, for example, use # an ``array.array`` for native types. But nothing actually does # that, everything is stored boxed. # TODO: Figure out why not. _key_type = list def __init__(self, items=None): self.clear() if items: self.update(items) try: # Detect the presence of the C extensions. # If they're NOT around, we don't need to do any of the # special pickle support to make Python versions look like # C---we just rename the classes. By not defining these methods, # we can (theoretically) avoid a bit of a slowdown. # If the C extensions are around, we do need these methods, but # these classes are unlikely to be used in production anyway. __import__('BTrees._OOBTree') except ImportError: # pragma: no cover pass else: def __reduce__(self): # Swap out the type constructor for the C version, if present. func, typ_gna, state = Persistent.__reduce__(self) # We ignore the returned type altogether in favor of # our calculated class (which allows subclasses but replaces our # exact type with the C equivalent) typ = self.__class__ gna = typ_gna[1:] return (func, (typ,) + gna, state) @property def __class__(self): type_self = type(self) return ( type_self._BTree_reduce_as if type_self._BTree_reduce_up_bound is type_self else type_self ) @property def _BTree_reduce_as(self): # Return the pickle replacement class for this object. # If the C extensions are available, this will be the # C type (setup by _fix_pickle), otherwise it will be the real # type of this object. # This implementation is replaced by _fix_pickle and exists for # testing purposes. return type(self) # pragma: no cover _BTree_reduce_up_bound = _BTree_reduce_as class _ArithmeticMixin: def __sub__(self, other): return difference(self.__class__, self, other) def __rsub__(self, other): return difference(self._set_type, type(self)(other), self) def __or__(self, other): return union(self._set_type, self, other) __ror__ = __or__ def __and__(self, other): return intersection(self._set_type, self, other) __rand__ = __and__ def __xor__(self, other): return (self - other) | (other - self) __rxor__ = __xor__ class _BucketBase(_ArithmeticMixin, _Base): __slots__ = ('_keys', '_next', '_to_key') def clear(self): self._keys = self._key_type() self._next = None def __len__(self): return len(self._keys) @property def size(self): return len(self._keys) def _deleteNextBucket(self): next = self._next if next is not None: self._next = next._next def _search(self, key): # Return non-negative index on success # return -(insertion_index + 1) on fail low = 0 keys = self._keys high = len(keys) while low < high: i = (low + high) // 2 k = keys[i] if k is key or k == key: return i if compare(k, key) < 0: low = i + 1 else: high = i return -1 - low def minKey(self, key=_marker): if key is _marker or key is None: return self._keys[0] key = self._to_key(key) index = self._search(key) if index >= 0: return key index = -index - 1 if index < len(self._keys): return self._keys[index] else: raise ValueError("no key satisfies the conditions") def maxKey(self, key=_marker): if key is _marker or key is None: return self._keys[-1] key = self._to_key(key) index = self._search(key) if index >= 0: return key else: index = -index-1 if index: return self._keys[index-1] else: raise ValueError("no key satisfies the conditions") def _range(self, min=_marker, max=_marker, excludemin=False, excludemax=False): if min is _marker or min is None: start = 0 if excludemin: start = 1 else: min = self._to_key(min) start = self._search(min) if start >= 0: if excludemin: start += 1 else: start = -start - 1 if max is _marker or max is None: end = len(self._keys) if excludemax: end -= 1 else: max = self._to_key(max) end = self._search(max) if end >= 0: if not excludemax: end += 1 else: end = -end - 1 return start, end def keys(self, *args, **kw): start, end = self._range(*args, **kw) return self._keys[start:end] def iterkeys(self, *args, **kw): if not (args or kw): return iter(self._keys) keys = self._keys return (keys[i] for i in range(*self._range(*args, **kw))) def __iter__(self): return iter(self._keys) def __contains__(self, key): try: tree_key = self._to_key(key) except TypeError: # Can't convert the key, so can't possibly be in the tree return False return (self._search(tree_key) >= 0) has_key = __contains__ def _repr_helper(self, items): type_self = type(self) mod = type_self.__module__ name = type_self.__name__ name = name[:-2] if name.endswith("Py") else name return "{}.{}({!r})".format(mod, name, items) class _SetIteration: __slots__ = ( 'to_iterate', 'useValues', '_iter', 'active', 'position', 'key', 'value', ) def __init__(self, to_iterate, useValues=False, default=None, sort=False): if to_iterate is None: to_iterate = () self.to_iterate = to_iterate if sort: # Sorting is required for arbitrary iterables in the # set functions like difference/union/intersection assert not useValues if not isinstance(to_iterate, _Base): # We know _Base (Set, Bucket, Tree, TreeSet) will all iterate # in sorted order. Other than that, we have no guarantee. self.to_iterate = to_iterate = sorted(self.to_iterate) if useValues: try: itmeth = to_iterate.iteritems except AttributeError: if isinstance(to_iterate, dict): itmeth = to_iterate.items().__iter__ else: itmeth = to_iterate.__iter__ useValues = False else: self.value = None else: itmeth = to_iterate.__iter__ self.useValues = useValues self._iter = itmeth() self.active = True self.position = 0 self.key = _marker self.value = default self.advance() def advance(self): try: if self.useValues: self.key, self.value = next(self._iter) else: self.key = next(self._iter) self.position += 1 except StopIteration: self.active = False self.position = -1 return self class _MutableMappingMixin: # Methods defined in collections.abc.MutableMapping that # Bucket and Tree should both implement and can implement # the same. We don't want to extend that class though, # as the C version cannot. def popitem(self): """ D.popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty. """ try: key = next(iter(self)) except StopIteration: raise KeyError value = self[key] del self[key] return key, value class Bucket(_MutableMappingMixin, _BucketBase): __slots__ = () _value_type = list VALUE_SAME_CHECK = False def _to_value(self, x): return x def setdefault(self, key, value): key, value = self._to_key(key), self._to_value(value) status, value = self._set(key, value, True) return value def pop(self, key, default=_marker): try: status, value = self._del(self._to_key(key)) except KeyError: if default is _marker: raise return default else: return value def update(self, items): if hasattr(items, 'iteritems'): items = items.iteritems() elif hasattr(items, 'items'): items = items.items() _si = self.__setitem__ try: for key, value in items: _si(key, value) except ValueError: raise TypeError('items must be a sequence of 2-tuples') def __setitem__(self, key, value): self._set(self._to_key(key), self._to_value(value)) def __delitem__(self, key): self._del(self._to_key(key)) def clear(self): _BucketBase.clear(self) self._values = self._value_type() def get(self, key, default=None): try: key = self._to_key(key) except TypeError: # Can't convert, cannot possibly be present. return default index = self._search(key) if index < 0: return default return self._values[index] def __getitem__(self, key): try: tree_key = self._to_key(key) except TypeError: # Can't convert, so cannot possibly be present. raise KeyError(key) index = self._search(tree_key) if index < 0: raise KeyError(key) return self._values[index] def _set(self, key, value, ifunset=False): """Set a value Return: status, value Status is: None if no change 0 if change, but not size change 1 if change and size change """ index = self._search(key) if index >= 0: if ( ifunset or self.VALUE_SAME_CHECK and value == self._values[index] ): return None, self._values[index] self._p_changed = True self._values[index] = value return 0, value else: self._p_changed = True index = -index - 1 self._keys.insert(index, key) self._values.insert(index, value) return 1, value def _del(self, key): index = self._search(key) if index >= 0: self._p_changed = True del self._keys[index] return 0, self._values.pop(index) raise KeyError(key) def _split(self, index=-1): if index < 0 or index >= len(self._keys): index = len(self._keys) // 2 new_instance = type(self)() new_instance._keys = self._keys[index:] new_instance._values = self._values[index:] del self._keys[index:] del self._values[index:] new_instance._next = self._next self._next = new_instance return new_instance def values(self, *args, **kw): start, end = self._range(*args, **kw) return self._values[start:end] def itervalues(self, *args, **kw): values = self._values return (values[i] for i in range(*self._range(*args, **kw))) def items(self, *args, **kw): keys = self._keys values = self._values return [ (keys[i], values[i]) for i in range(*self._range(*args, **kw)) ] def iteritems(self, *args, **kw): keys = self._keys values = self._values return ( (keys[i], values[i]) for i in range(*self._range(*args, **kw)) ) def __getstate__(self): keys = self._keys values = self._values data = [] for i in range(len(keys)): data.append(keys[i]) data.append(values[i]) data = tuple(data) if self._next is not None: return data, self._next return (data, ) def __setstate__(self, state): if not isinstance(state[0], tuple): raise TypeError("tuple required for first state element") self.clear() if len(state) == 2: state, self._next = state else: self._next = None state = state[0] keys = self._keys values = self._values for i in range(0, len(state), 2): keys.append(state[i]) values.append(state[i+1]) def _p_resolveConflict(self, s_old, s_com, s_new): b_old = type(self)() if s_old is not None: b_old.__setstate__(s_old) b_com = type(self)() if s_com is not None: b_com.__setstate__(s_com) b_new = type(self)() if s_new is not None: b_new.__setstate__(s_new) if ( b_com._next != b_old._next or b_new._next != b_old._next ): raise BTreesConflictError(-1, -1, -1, 0) if not b_com or not b_new: raise BTreesConflictError(-1, -1, -1, 12) i_old = _SetIteration(b_old, True) i_com = _SetIteration(b_com, True) i_new = _SetIteration(b_new, True) def merge_error(reason): return BTreesConflictError( i_old.position, i_com.position, i_new.position, reason) result = type(self)() def merge_output(it): result._keys.append(it.key) result._values.append(it.value) it.advance() while i_old.active and i_com.active and i_new.active: cmpOC = compare(i_old.key, i_com.key) cmpON = compare(i_old.key, i_new.key) if cmpOC == 0: if cmpON == 0: if i_com.value == i_old.value: result[i_old.key] = i_new.value elif i_new.value == i_old.value: result[i_old.key] = i_com.value else: raise merge_error(1) i_old.advance() i_com.advance() i_new.advance() elif (cmpON > 0): # insert in new merge_output(i_new) elif i_old.value == i_com.value: # deleted new if i_new.position == 1: # Deleted the first item. This will modify the # parent node, so we don't know if merging will be # safe raise merge_error(13) i_old.advance() i_com.advance() else: raise merge_error(2) elif cmpON == 0: if cmpOC > 0: # insert committed merge_output(i_com) elif i_old.value == i_new.value: # delete committed if i_com.position == 1: # Deleted the first item. This will modify the # parent node, so we don't know if merging will be # safe raise merge_error(13) i_old.advance() i_new.advance() else: raise merge_error(3) else: # both keys changed cmpCN = compare(i_com.key, i_new.key) if cmpCN == 0: # dueling insert raise merge_error(4) if cmpOC > 0: # insert committed if cmpCN > 0: # insert i_new first merge_output(i_new) else: merge_output(i_com) elif cmpON > 0: # insert i_new merge_output(i_new) else: raise merge_error(5) # both deleted same key while i_com.active and i_new.active: # new inserts cmpCN = compare(i_com.key, i_new.key) if cmpCN == 0: raise merge_error(6) # dueling insert if cmpCN > 0: # insert new merge_output(i_new) else: # insert committed merge_output(i_com) while i_old.active and i_com.active: # new deletes rest of original cmpOC = compare(i_old.key, i_com.key) if cmpOC > 0: # insert committed merge_output(i_com) elif cmpOC == 0 and (i_old.value == i_com.value): # del in new i_old.advance() i_com.advance() else: # dueling deletes or delete and change raise merge_error(7) while i_old.active and i_new.active: # committed deletes rest of original cmpON = compare(i_old.key, i_new.key) if cmpON > 0: # insert new merge_output(i_new) elif cmpON == 0 and (i_old.value == i_new.value): # deleted in committed i_old.advance() i_new.advance() else: # dueling deletes or delete and change raise merge_error(8) if i_old.active: # dueling deletes raise merge_error(9) while i_com.active: merge_output(i_com) while i_new.active: merge_output(i_new) if len(result._keys) == 0: # pragma: no cover # If the output bucket is empty, conflict resolution doesn't have # enough info to unlink it from its containing BTree correctly. # # XXX TS, 2012-11-16: I don't think this is possible # raise merge_error(10) result._next = b_old._next return result.__getstate__() def __repr__(self): return self._repr_helper(self.items()) class _MutableSetMixin: # Like _MutableMappingMixin, but for sets. def isdisjoint(self, other): """ Return True if two sets have a null intersection. """ for value in other: if value in self: return False return True def discard(self, key): """ Remove an element from the set if it is a member. If not, do nothing and raise no exception. """ # Written this way to avoid catching and accidentally # ignoring POSKeyError. if key in self: self.remove(key) def pop(self): """Return the popped value. Raise KeyError if empty.""" # Get our iter first to avoid catching and accidentally # ignoring POSKeyError it = iter(self) try: value = next(it) except StopIteration: raise KeyError self.discard(value) return value def __ior__(self, it): self.update(it) return self def __iand__(self, it): for value in (self - it): self.discard(value) return self def __isub__(self, it): if it is self: self.clear() else: for value in it: self.discard(value) return self def __ixor__(self, it): if it is self: self.clear() else: for value in it: if value in self: self.discard(value) else: self.add(value) return self class Set(_MutableSetMixin, _BucketBase): __slots__ = () def add(self, key): return self._set(self._to_key(key))[0] insert = add def remove(self, key): self._del(self._to_key(key)) def update(self, items): add = self.add for i in items: add(i) def __getstate__(self): data = tuple(self._keys) if self._next is not None: return data, self._next return (data, ) def __setstate__(self, state): if not isinstance(state[0], tuple): raise TypeError('tuple required for first state element') self.clear() if len(state) == 2: state, self._next = state else: self._next = None state = state[0] self._keys.extend(state) def _set(self, key, value=None, ifunset=False): index = self._search(key) if index < 0: index = -index - 1 self._p_changed = True self._keys.insert(index, key) return True, None return False, None def _del(self, key): index = self._search(key) if index >= 0: self._p_changed = True del self._keys[index] return 0, 0 raise KeyError(key) def __getitem__(self, i): return self._keys[i] def _split(self, index=-1): if index < 0 or index >= len(self._keys): index = len(self._keys) // 2 new_instance = type(self)() new_instance._keys = self._keys[index:] del self._keys[index:] new_instance._next = self._next self._next = new_instance return new_instance def _p_resolveConflict(self, s_old, s_com, s_new): b_old = type(self)() if s_old is not None: b_old.__setstate__(s_old) b_com = type(self)() if s_com is not None: b_com.__setstate__(s_com) b_new = type(self)() if s_new is not None: b_new.__setstate__(s_new) if ( b_com._next != b_old._next or b_new._next != b_old._next ): # conflict: com or new changed _next raise BTreesConflictError(-1, -1, -1, 0) if not b_com or not b_new: # conflict: com or new empty raise BTreesConflictError(-1, -1, -1, 12) i_old = _SetIteration(b_old, True) i_com = _SetIteration(b_com, True) i_new = _SetIteration(b_new, True) def merge_error(reason): return BTreesConflictError( i_old.position, i_com.position, i_new.position, reason) result = type(self)() def merge_output(it): result._keys.append(it.key) it.advance() while i_old.active and i_com.active and i_new.active: cmpOC = compare(i_old.key, i_com.key) cmpON = compare(i_old.key, i_new.key) if cmpOC == 0: if cmpON == 0: # all match merge_output(i_old) i_com.advance() i_new.advance() elif cmpON > 0: # insert in new merge_output(i_new) else: # deleted new if i_new.position == 1: # Deleted the first item. This will modify the # parent node, so we don't know if merging will be # safe raise merge_error(13) i_old.advance() i_com.advance() elif cmpON == 0: if cmpOC > 0: # insert committed merge_output(i_com) else: # delete committed if i_com.position == 1: # Deleted the first item. This will modify the # parent node, so we don't know if merging will be # safe raise merge_error(13) i_old.advance() i_new.advance() else: # both com and new keys changed cmpCN = compare(i_com.key, i_new.key) if cmpCN == 0: # both inserted same key raise merge_error(4) if cmpOC > 0: # insert committed if cmpCN > 0: # insert i_new first merge_output(i_new) else: merge_output(i_com) elif cmpON > 0: # insert i_new merge_output(i_new) else: # both com and new deleted same key raise merge_error(5) while i_com.active and i_new.active: # new inserts cmpCN = compare(i_com.key, i_new.key) if cmpCN == 0: # dueling insert raise merge_error(6) if cmpCN > 0: # insert new merge_output(i_new) else: # insert committed merge_output(i_com) while i_old.active and i_com.active: # new deletes rest of original cmpOC = compare(i_old.key, i_com.key) if cmpOC > 0: # insert committed merge_output(i_com) elif cmpOC == 0: # del in new i_old.advance() i_com.advance() else: # dueling deletes or delete and change raise merge_error(7) while i_old.active and i_new.active: # committed deletes rest of original cmpON = compare(i_old.key, i_new.key) if cmpON > 0: # insert new merge_output(i_new) elif cmpON == 0: # deleted in committed i_old.advance() i_new.advance() else: # dueling deletes or delete and change raise merge_error(8) if i_old.active: # dueling deletes raise merge_error(9) while i_com.active: merge_output(i_com) while i_new.active: merge_output(i_new) if len(result._keys) == 0: # pragma: no cover # If the output bucket is empty, conflict resolution doesn't have # enough info to unlink it from its containing BTree correctly. # # XXX TS, 2012-11-16: I don't think this is possible # raise merge_error(10) result._next = b_old._next return result.__getstate__() def __repr__(self): return self._repr_helper(self._keys) class _TreeItem: __slots__ = ('key', 'child') def __init__(self, key, child): self.key = key self.child = child class _Tree(_ArithmeticMixin, _Base): __slots__ = ('_data', '_firstbucket') def __new__(cls, *args): value = _Base.__new__(cls, *args) # Empty trees don't get their __setstate__ called upon # unpickling (or __init__, obviously), so clear() is never called # and _data and _firstbucket are never defined, unless we do it here. value._data = [] value._firstbucket = None return value def setdefault(self, key, value): return self._set(self._to_key(key), self._to_value(value), True)[1] def pop(self, key, default=_marker): try: return self._del(self._to_key(key))[1] except KeyError: if default is _marker: raise return default def update(self, items): if hasattr(items, 'iteritems'): items = items.iteritems() elif hasattr(items, 'items'): items = items.items() set = self.__setitem__ for i in items: set(*i) def __setitem__(self, key, value): self._set(self._to_key(key), self._to_value(value)) def __delitem__(self, key): self._del(self._to_key(key)) def clear(self): if self._data: # In the case of __init__, this was already set by __new__ self._data = [] self._firstbucket = None def __bool__(self): return bool(self._data) def __len__(self): accumulated = 0 bucket = self._firstbucket while bucket is not None: accumulated += len(bucket._keys) bucket = bucket._next return accumulated @property def size(self): return len(self._data) def _search(self, key): data = self._data if data: lo = 0 hi = len(data) i = hi // 2 while i > lo: cmp_ = compare(data[i].key, key) if cmp_ < 0: lo = i elif cmp_ > 0: hi = i else: break i = (lo + hi) // 2 return i return -1 def _findbucket(self, key): index = self._search(key) if index >= 0: child = self._data[index].child if isinstance(child, self._bucket_type): return child return child._findbucket(key) def __contains__(self, key): try: tree_key = self._to_key(key) except TypeError: # Can't convert the key, so can't possibly be in the tree return False return key in (self._findbucket(tree_key) or ()) def has_key(self, key): index = self._search(key) if index < 0: return False return self._data[index].child.has_key(key) def keys(self, min=_marker, max=_marker, excludemin=False, excludemax=False, itertype='iterkeys'): if not self._data: return () if min is not _marker and min is not None: min = self._to_key(min) bucket = self._findbucket(min) else: bucket = self._firstbucket iterargs = min, max, excludemin, excludemax return _TreeItems(bucket, itertype, iterargs) def iterkeys(self, min=_marker, max=_marker, excludemin=False, excludemax=False): return iter(self.keys(min, max, excludemin, excludemax)) def __iter__(self): return iter(self.keys()) def minKey(self, min=_marker): if min is _marker or min is None: bucket = self._firstbucket else: min = self._to_key(min) bucket = self._findbucket(min) if bucket is not None: return bucket.minKey(min) raise ValueError('empty tree') def maxKey(self, max=_marker): data = self._data if not data: raise ValueError('empty tree') if max is _marker or max is None: return data[-1].child.maxKey() max = self._to_key(max) index = self._search(max) if index and compare(data[index].child.minKey(), max) > 0: index -= 1 # pragma: no cover no idea how to provoke this return data[index].child.maxKey(max) def _set(self, key, value=None, ifunset=False): if ( self._p_jar is not None and self._p_oid is not None and self._p_serial is not None ): self._p_jar.readCurrent(self) data = self._data if data: index = self._search(key) child = data[index].child else: index = 0 child = self._bucket_type() self._firstbucket = child data.append(_TreeItem(None, child)) result = child._set(key, value, ifunset) grew = result[0] if grew: if type(child) is type(self): max_size = type(self).max_internal_size else: max_size = type(self).max_leaf_size if child.size > max_size: self._grow(child, index) # If a BTree contains only a single bucket, BTree.__getstate__() # includes the bucket's entire state, and the bucket doesn't get # an oid of its own. So if we have a single oid-less bucket that # changed, it's *our* oid that should be marked as changed -- the # bucket doesn't have one. if ( grew is not None and type(child) is self._bucket_type and len(data) == 1 and child._p_oid is None ): self._p_changed = 1 return result def _grow(self, child, index): self._p_changed = True new_child = child._split() self._data.insert(index+1, _TreeItem(new_child.minKey(), new_child)) if len(self._data) >= type(self).max_internal_size * 2: self._split_root() def _split_root(self): child = type(self)() child._data = self._data child._firstbucket = self._firstbucket self._data = [_TreeItem(None, child)] self._grow(child, 0) def _split(self, index=None): data = self._data if index is None: index = len(data) // 2 next = type(self)() next._data = data[index:] first = data[index] del data[index:] if len(data) == 0: self._firstbucket = None # lost our bucket, can't buy no beer if isinstance(first.child, type(self)): next._firstbucket = first.child._firstbucket else: next._firstbucket = first.child return next def _del(self, key): if ( self._p_jar is not None and self._p_oid is not None and self._p_serial is not None ): self._p_jar.readCurrent(self) data = self._data if not data: raise KeyError(key) index = self._search(key) child = data[index].child removed_first_bucket, value = child._del(key) # See comment in _set about small trees if ( len(data) == 1 and type(child) is self._bucket_type and child._p_oid is None ): self._p_changed = True # fix up the node key, but not for the 0'th one. if index > 0 and child.size and compare(key, data[index].key) == 0: self._p_changed = True data[index].key = child.minKey() if removed_first_bucket: if index: data[index-1].child._deleteNextBucket() removed_first_bucket = False # clear flag else: self._firstbucket = child._firstbucket if not child.size: if type(child) is self._bucket_type: if index: data[index-1].child._deleteNextBucket() else: self._firstbucket = child._next removed_first_bucket = True del data[index] self._p_changed = True return removed_first_bucket, value def _deleteNextBucket(self): self._data[-1].child._deleteNextBucket() def __getstate__(self): data = self._data if not data: # Note: returning None here causes our __setstate__ # to not be called on unpickling return None if ( len(data) == 1 and type(data[0].child) is not type(self) and data[0].child._p_oid is None ): return ((data[0].child.__getstate__(), ), ) data = iter(data) sdata = [next(data).child] for item in data: sdata.append(item.key) sdata.append(item.child) return tuple(sdata), self._firstbucket def __setstate__(self, state): if state and not isinstance(state[0], tuple): raise TypeError('tuple required for first state element') self.clear() if state is None: return if len(state) == 1: bucket = self._bucket_type() bucket.__setstate__(state[0][0]) state = [bucket], bucket data, self._firstbucket = state data = list(reversed(data)) # verify children are either tree or bucket nodes. # NOTE for tree-kind node type is compared as "is", not as # "isinstance", to match C version. for child in data[::2]: if not ((type(child) is type(self)) or isinstance(child, self._bucket_type)): raise TypeError("tree child %s is neither %s nor %s" % (_tp_name(type(child)), _tp_name(type(self)), _tp_name(self._bucket_type))) self._data.append(_TreeItem(None, data.pop())) while data: key = data.pop() child = data.pop() self._data.append(_TreeItem(key, child)) def _assert(self, condition, message): if not condition: raise AssertionError(message) def _check(self, nextbucket=None): data = self._data assert_ = self._assert if not data: assert_(self._firstbucket is None, "Empty BTree has non-NULL firstbucket") return assert_(self._firstbucket is not None, "Non-empty BTree has NULL firstbucket") child_class = type(data[0].child) for i in data: assert_(i.child is not None, "BTree has NULL child") assert_(type(i.child) is child_class, "BTree children have different types") assert_(i.child.size, "Bucket length < 1") if child_class is type(self): assert_( self._firstbucket is data[0].child._firstbucket, "BTree has firstbucket different than " "its first child's firstbucket" ) for i in range(len(data)-1): data[i].child._check(data[i+1].child._firstbucket) data[-1].child._check(nextbucket) elif child_class is self._bucket_type: assert_( self._firstbucket is data[0].child, "Bottom-level BTree node has inconsistent firstbucket belief" ) for i in range(len(data)-1): assert_( data[i].child._next is data[i+1].child, "Bucket next pointer is damaged" ) assert_( data[-1].child._next is nextbucket, "Bucket next pointer is damaged" ) else: assert_(False, "Incorrect child type") def _p_resolveConflict(self, old, com, new): s_old = _get_simple_btree_bucket_state(old) s_com = _get_simple_btree_bucket_state(com) s_new = _get_simple_btree_bucket_state(new) return (( self._bucket_type()._p_resolveConflict(s_old, s_com, s_new), ), ) def __repr__(self): r = super().__repr__() r = r.replace('Py', '') return r def _get_simple_btree_bucket_state(state): if state is None: return state if not isinstance(state, tuple): raise TypeError("_p_resolveConflict: expected tuple or None for state") if len(state) == 2: # non-degenerate BTree, can't resolve raise BTreesConflictError(-1, -1, -1, 11) # Peel away wrapper to get to only-bucket state. if len(state) != 1: raise TypeError("_p_resolveConflict: expected 1- or 2-tuple for state") state = state[0] if not isinstance(state, tuple) or len(state) != 1: raise TypeError("_p_resolveConflict: expected 1-tuple containing " "bucket state") state = state[0] if not isinstance(state, tuple): raise TypeError("_p_resolveConflict: expected tuple for bucket state") return state class _TreeItems: __slots__ = ( 'firstbucket', 'itertype', 'iterargs', 'index', 'it', 'v', '_len', ) def __init__(self, firstbucket, itertype, iterargs): self.firstbucket = firstbucket self.itertype = itertype self.iterargs = iterargs self.index = -1 self.it = iter(self) self.v = None self._len = None def __getitem__(self, i): if isinstance(i, slice): return list(self)[i] if i < 0: i = len(self) + i if i < 0: raise IndexError(i) if i < self.index: self.index = -1 self.it = iter(self) while i > self.index: try: self.v = next(self.it) except StopIteration: raise IndexError(i) else: self.index += 1 return self.v def __len__(self): if self._len is None: i = 0 for _ in self: i += 1 self._len = i return self._len def __iter__(self): bucket = self.firstbucket itertype = self.itertype iterargs = self.iterargs done = 0 # Note that we don't mind if the first bucket yields no # results due to an idiosyncrasy in how range searches are done. while bucket is not None: for k in getattr(bucket, itertype)(*iterargs): yield k done = 0 if done: return bucket = bucket._next done = 1 class _TreeIterator: """ Faux implementation for BBB only. """ def __init__(self, items): # pragma: no cover raise TypeError( "TreeIterators are private implementation details " "of the C-based BTrees.\n\n" "Please use 'iter(tree)', rather than instantiating " "one directly." ) class Tree(_MutableMappingMixin, _Tree): __slots__ = () def get(self, key, default=None): bucket = self._findbucket(key) if bucket: return bucket.get(key, default) return default def __getitem__(self, key): bucket = self._findbucket(key) if bucket: return bucket[key] raise KeyError(key) def values(self, min=_marker, max=_marker, excludemin=False, excludemax=False): return self.keys(min, max, excludemin, excludemax, 'itervalues') def itervalues(self, min=_marker, max=_marker, excludemin=False, excludemax=False): return iter(self.values(min, max, excludemin, excludemax)) def items(self, min=_marker, max=_marker, excludemin=False, excludemax=False): return self.keys(min, max, excludemin, excludemax, 'iteritems') def iteritems(self, min=_marker, max=_marker, excludemin=False, excludemax=False): return iter(self.items(min, max, excludemin, excludemax)) def byValue(self, min): return reversed( sorted((v, k) for (k, v) in self.iteritems() if v >= min)) def insert(self, key, value): return bool(self._set(key, value, True)[0]) class TreeSet(_MutableSetMixin, _Tree): __slots__ = () def add(self, key): return self._set(self._to_key(key))[0] insert = add def remove(self, key): self._del(self._to_key(key)) def update(self, items): add = self.add for i in items: add(i) _p_resolveConflict = _Tree._p_resolveConflict class set_operation: __slots__ = ( 'func', 'set_type', '__name__', '_module', ) def __init__(self, func, set_type): self.func = func self.set_type = set_type self.__name__ = func.__name__ self._module = func.__module__ __module__ = property( lambda self: self._module, lambda self, nv: setattr(self, '_module', nv) ) def __call__(self, *a, **k): return self.func(self.set_type, *a, **k) def difference(set_type, o1, o2): if o1 is None or o2 is None: return o1 i1 = _SetIteration(o1, True, 0) i2 = _SetIteration(o2, False, 0, True) if i1.useValues: result = o1._mapping_type() def copy(i): result._keys.append(i.key) result._values.append(i.value) else: result = o1._set_type() def copy(i): result._keys.append(i.key) while i1.active and i2.active: cmp_ = compare(i1.key, i2.key) if cmp_ < 0: copy(i1) i1.advance() elif cmp_ == 0: i1.advance() i2.advance() else: i2.advance() while i1.active: copy(i1) i1.advance() return result def union(set_type, o1, o2): if o1 is None: return o2 if o2 is None: return o1 i1 = _SetIteration(o1, False, 0, True) i2 = _SetIteration(o2, False, 0, True) result = set_type() def copy(i): result._keys.append(i.key) while i1.active and i2.active: cmp_ = compare(i1.key, i2.key) if cmp_ < 0: copy(i1) i1.advance() elif cmp_ == 0: copy(i1) i1.advance() i2.advance() else: copy(i2) i2.advance() while i1.active: copy(i1) i1.advance() while i2.active: copy(i2) i2.advance() return result def intersection(set_type, o1, o2): if o1 is None: return o2 if o2 is None: return o1 i1 = _SetIteration(o1, False, 0, True) i2 = _SetIteration(o2, False, 0, True) result = set_type() def copy(i): result._keys.append(i.key) while i1.active and i2.active: cmp_ = compare(i1.key, i2.key) if cmp_ < 0: i1.advance() elif cmp_ == 0: copy(i1) i1.advance() i2.advance() else: i2.advance() return result def _prepMergeIterators(o1, o2): MERGE_DEFAULT = getattr(o1, 'MERGE_DEFAULT', None) if MERGE_DEFAULT is None: raise TypeError("invalid set operation") i1 = _SetIteration(o1, True, MERGE_DEFAULT) i2 = _SetIteration(o2, True, MERGE_DEFAULT) return i1, i2 def weightedUnion(set_type, o1, o2, w1=1, w2=1): if o1 is None: if o2 is None: return 0, None return w2, o2 if o2 is None: return w1, o1 i1, i2 = _prepMergeIterators(o1, o2) MERGE = getattr(o1, 'MERGE', None) if MERGE is None and i1.useValues and i2.useValues: raise TypeError("invalid set operation") MERGE_WEIGHT = getattr(o1, 'MERGE_WEIGHT') if (not i1.useValues) and i2.useValues: i1, i2 = i2, i1 w1, w2 = w2, w1 _merging = i1.useValues or i2.useValues if _merging: result = o1._mapping_type() def copy(i, w): result._keys.append(i.key) result._values.append(MERGE_WEIGHT(i.value, w)) else: result = o1._set_type() def copy(i, w): result._keys.append(i.key) while i1.active and i2.active: cmp_ = compare(i1.key, i2.key) if cmp_ < 0: copy(i1, w1) i1.advance() elif cmp_ == 0: result._keys.append(i1.key) if _merging: result._values.append(MERGE(i1.value, w1, i2.value, w2)) i1.advance() i2.advance() else: copy(i2, w2) i2.advance() while i1.active: copy(i1, w1) i1.advance() while i2.active: copy(i2, w2) i2.advance() return 1, result def weightedIntersection(set_type, o1, o2, w1=1, w2=1): if o1 is None: if o2 is None: return 0, None return w2, o2 if o2 is None: return w1, o1 i1, i2 = _prepMergeIterators(o1, o2) MERGE = getattr(o1, 'MERGE', None) if MERGE is None and i1.useValues and i2.useValues: raise TypeError("invalid set operation") if (not i1.useValues) and i2.useValues: i1, i2 = i2, i1 w1, w2 = w2, w1 _merging = i1.useValues or i2.useValues if _merging: result = o1._mapping_type() else: result = o1._set_type() while i1.active and i2.active: cmp_ = compare(i1.key, i2.key) if cmp_ < 0: i1.advance() elif cmp_ == 0: result._keys.append(i1.key) if _merging: result._values.append(MERGE(i1.value, w1, i2.value, w2)) i1.advance() i2.advance() else: i2.advance() if isinstance(result, (Set, TreeSet)): return w1 + w2, result return 1, result def multiunion(set_type, seqs): # XXX simple/slow implementation. Goal is just to get tests to pass. result = set_type() for s in seqs: try: iter(s) except TypeError: s = set_type((s, )) result.update(s) return result def MERGE(self, value1, weight1, value2, weight2): return (value1 * weight1) + (value2 * weight2) def MERGE_WEIGHT_default(self, value, weight): return value def MERGE_WEIGHT_numeric(self, value, weight): return value * weight def _fix_pickle(mod_dict, mod_name): # Make the pure-Python objects pickle with the same # class names and types as the C extensions by setting the appropriate # _BTree_reduce_as attribute. # If the C extensions are not available, we also change the # __name__ attribute of the type to match the C name (otherwise # we wind up with *Py in the pickles) # Each module must call this as `_fix_pickle(globals(), __name__)` # at the bottom. mod_prefix = mod_name.split('.')[-1][:2] # BTrees.OOBTree -> 'OO' bucket_name = mod_prefix + 'Bucket' py_bucket_name = bucket_name + 'Py' have_c_extensions = mod_dict[bucket_name] is not mod_dict[py_bucket_name] for name in 'Bucket', 'Set', 'BTree', 'TreeSet', 'TreeIterator': raw_name = mod_prefix + name py_name = raw_name + 'Py' try: py_type = mod_dict[py_name] except KeyError: if name == 'TreeIterator': # Optional continue raise # pragma: no cover raw_type = mod_dict[raw_name] # Could be C or Python py_type._BTree_reduce_as = raw_type py_type._BTree_reduce_up_bound = py_type if not have_c_extensions: # pragma: no cover # Set FooPy to have the __name__ of simply Foo. # We can't do this if the C extension is available, # because then mod_dict[FooPy.__name__] is not FooPy # and pickle refuses to save something like that. # On the other hand (no C extension) this makes our # Python pickle match the C version by default py_type.__name__ = raw_name py_type.__qualname__ = raw_name # Py 3.3+ # tp_name returns full name of a type in the same way as how it is provided by # typ->tp_name in C. def _tp_name(typ): return '.'.join([typ.__module__, typ.__name__]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_compat.h0000644000076500000240000000221314355020716015174 0ustar00jensstaff#ifndef BTREES__COMPAT_H #define BTREES__COMPAT_H #include "Python.h" #ifdef INTERN #undef INTERN #endif #ifdef INT_FROM_LONG #undef INT_FROM_LONG #endif #ifdef INT_CHECK #undef INT_CHECK #endif #ifndef Py_RETURN_NOTIMPLEMENTED #define Py_RETURN_NOTIMPLEMENTED \ return Py_INCREF(Py_NotImplemented), Py_NotImplemented #endif #define INTERN PyUnicode_InternFromString #define INT_FROM_LONG(x) PyLong_FromLong(x) #define INT_CHECK(x) PyLong_Check(x) #define INT_AS_LONG(x) PyLong_AsLong(x) #define UINT_FROM_LONG(x) PyLong_FromUnsignedLong(x) #define UINT_AS_LONG(x) PyLong_AsUnsignedLong(x) #define TEXT_FROM_STRING PyUnicode_FromString #define TEXT_FORMAT PyUnicode_Format /* Note that the second comparison is skipped if the first comparison returns: 1 -> There was no error and the answer is -1 -1 -> There was an error, which the caller will detect with PyError_Occurred. */ #define COMPARE(lhs, rhs) \ (lhs == Py_None ? (rhs == Py_None ? 0 : -1) : (rhs == Py_None ? 1 : \ (PyObject_RichCompareBool((lhs), (rhs), Py_LT) != 0 ? -1 : \ (PyObject_RichCompareBool((lhs), (rhs), Py_EQ) > 0 ? 0 : 1)))) #endif /* BTREES__COMPAT_H */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/_compat.py0000644000076500000240000000774314626022106015406 0ustar00jensstaff############################################################################## # # Copyright (c) 2001-2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import os import sys PYPY = hasattr(sys, 'pypy_version_info') def compare(x, y): if x is None: if y is None: return 0 else: return -1 elif y is None: return 1 else: return (x > y) - (y > x) def _ascii(x): return bytes(x, 'ascii') def _c_optimizations_required(): """Return a true value if the C optimizations are required. Uses the ``PURE_PYTHON`` variable as documented in `import_c_extension`. """ pure_env = os.environ.get('PURE_PYTHON') require_c = pure_env == "0" return require_c def _c_optimizations_available(module_name): """ Return the C optimization module, if available, otherwise a false value. If the optimizations are required but not available, this raises the ImportError. This does not say whether they should be used or not. """ import importlib catch = () if _c_optimizations_required() else (ImportError,) try: return importlib.import_module('BTrees._' + module_name) except catch: # pragma: no cover return False def _c_optimizations_ignored(): """ The opposite of `_c_optimizations_required`. """ pure_env = os.environ.get('PURE_PYTHON') return pure_env != "0" if pure_env is not None else PYPY def _should_attempt_c_optimizations(): """ Return a true value if we should attempt to use the C optimizations. This takes into account whether we're on PyPy and the value of the ``PURE_PYTHON`` environment variable, as defined in `import_c_extension`. """ if PYPY: return False if _c_optimizations_required(): return True return not _c_optimizations_ignored() def import_c_extension(mod_globals): """ Call this function with the globals of a module that implements Python versions of a BTree family to find the C optimizations. If the ``PURE_PYTHON`` environment variable is set to any value other than ``"0"``, or we're on PyPy, ignore the C implementation. If the C implementation cannot be imported, return the Python version. If ``PURE_PYTHON`` is set to ``"0"``, *require* the C implementation (let the ImportError propagate); the exception again is PyPy, where we never use the C extension (although it builds here, the ``persistent`` library doesn't provide native extensions for PyPy). """ c_module = None module_name = mod_globals['__name__'] assert module_name.startswith('BTrees.') module_name = module_name.split('.')[1] if _should_attempt_c_optimizations(): c_module = _c_optimizations_available(module_name) if c_module: new_values = dict(c_module.__dict__) new_values.pop("__name__", None) new_values.pop('__file__', None) new_values.pop('__doc__', None) mod_globals.update(new_values) else: # No C extension, make the Py versions available without that # extension. The list comprehension both filters and prevents # concurrent modification errors. for py in [k for k in mod_globals if k.endswith('Py')]: mod_globals[py[:-2]] = mod_globals[py] # Assign the global aliases prefix = module_name[:2] for name in ('Bucket', 'Set', 'BTree', 'TreeSet'): mod_globals[name] = mod_globals[prefix + name] # Cleanup mod_globals.pop('import_c_extension', None) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/_datatypes.py0000644000076500000240000003240514626022106016112 0ustar00jensstaff############################################################################## # # Copyright Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ Descriptions of the datatypes supported by this package. """ import abc import operator import struct from .utils import Lazy class DataType: """ Describes a data type used as a value. Subclasses will be defined for each particular supported type. """ # The name for this datatype as used in interface names. long_name = None # The prefix code for this data type. Usually a single letter. prefix_code = None # The multiplication identity for this data type. Used in # combining (merging) data types. Leave undefined if this is # not a valid operation. multiplication_identity = None # Does the data take up 64-bits? Currently only relevant for the # integer key types. using64bits = False def __init__(self): if not self.prefix_code: self.prefix_code = type(self).__name__ def __call__(self, item): """ Verify *item* is in the correct format (or "close" enough) and return the item or its suitable conversion. If this cannot be done, raise a :exc:`TypeError`. The definition of "close" varies according to the datatypes. For example, integer datatypes will accept anything that can be converted into an integer using normal python coercion rules (calling ``__index__``) and where the integer fits into the required native type size (e.g., 4 bytes). """ raise NotImplementedError def coerce(self, item): """ Coerce *item* into something that can be used with ``__call__`` and return it. The coercion rules will vary by datatype. This exists only for test cases. The default is to perform the same validation as ``__call__``. """ return self(item) def apply_weight(self, item, weight): """ Apply a *weight* multiplier to *item*. Used when merging data structures. The *item* will be a value. """ return item def as_value_type(self): # Because ``O'`` is used for both key and value, # we can override this to get the less restrictive value type. return self def supports_value_union(self): raise NotImplementedError def getTwoExamples(self): """ Provide two distinct (non equal) examples acceptable to `__call__`. This is for testing. """ return "object1", "object2" def get_lower_bound(self): """ If there is a lower bound (inclusive) on the data type, return it. Otherwise, return ``None``. For integer types, this will only depend on whether it supports signed or unsigned values, and the answer will be 0 or a negative number. For object types, ``None`` is always defined to sort as the lowest bound. This can be relevant for both key and value types. """ return None def get_upper_bound(self): """ If there is an upper bound (inclusive) on the data type, return it. Otherwise, return ``None``. Remarks are as for `get_lower_bound`. """ return None def add_extra_methods(self, base_name, cls): """ Hook method called on the key datatype to add zero or more desired arbitrary additional, non-standard, methods to the *cls* being constructed. *base_name* will be a string identifying the particular family of class being constructed, such as 'Bucket' or 'BTree'. """ class KeyDataType(DataType): """ Describes a data type that has additional restrictions allowing it to be used as a key. """ # When used as the key, this number determines the # max_internal_size. tree_size = 500 default_bucket_size = 120 def __call__(self, item): raise NotImplementedError def bucket_size_for_value(self, value_type): """ What should the bucket (``max_leaf_size``) be when this data type is used with the given *value_type*? """ if isinstance(value_type, Any): return self.default_bucket_size // 2 return self.default_bucket_size class Any(DataType): """ Arbitrary Python objects. """ prefix_code = 'O' long_name = 'Object' def __call__(self, item): return item def supports_value_union(self): return False class _HasDefaultComparison(abc.ABC): """ An `abc.ABC _` for checking whether an item has default comparison. All we have to do is override ``__subclasshook__`` to implement an algorithm determining whether a class has default comparison. Python and the ABC machinery will take care of translating ``isinstance(thing, _HasDefaultComparison)`` into something like ``_HasDefaultComparison.__subclasshook__(type(thing))``. The ABC handles caching the answer (based on exact classes, no MRO), and getting the type from ``thing`` (including mostly dealing with old-style) classes on Python 2. """ # Comparisons only use special methods defined on the # type, not instance variables. # On CPython 3, classes inherit __lt__ with ``__objclass__`` of ``object``. # On PyPy3, they do. # # Test these conditions at runtime and define the method variant # appropriately. # # Remember the method is checking if the object has default comparison assert '__lt__' not in DataType.__dict__ if getattr(DataType.__lt__, '__objclass__', None) is object: # CPython 3 @classmethod def __subclasshook__(cls, C, _NoneType=type(None)): if C is _NoneType: return False defining_class = getattr(C.__lt__, '__objclass__', None) if defining_class is None: # Implemented in Python return False return C.__lt__.__objclass__ is object else: # PyPy3 @classmethod def __subclasshook__( cls, C, _object_lt=object.__lt__, _NoneType=type(None) ): if C is _NoneType: return False return C.__lt__ is _object_lt class O(KeyDataType): # noqa E742 """ Arbitrary (sortable) Python objects. """ long_name = 'Object' tree_size = 250 default_bucket_size = 60 def as_value_type(self): return Any() def supports_value_union(self): return False def __call__(self, item): if isinstance(item, _HasDefaultComparison): raise TypeError( "Object of class {} has default comparison".format( type(item).__name__ ) ) return item class _AbstractNativeDataType(KeyDataType): """ Uses `struct.Struct` to verify that the data can fit into a native type. """ _struct_format = None _as_python_type = NotImplementedError _required_python_type = object _error_description = None _as_packable = operator.index @Lazy def _check_native(self): return struct.Struct(self._struct_format).pack def __call__(self, item): try: self._check_native(self._as_packable(item)) except (struct.error, TypeError, ValueError): # PyPy can raise ValueError converting a negative number to a # unsigned value. if isinstance(item, int): raise TypeError("Value out of range", item) raise TypeError(self._error_description) return self._as_python_type(item) def apply_weight(self, item, weight): return item * weight def supports_value_union(self): return True class _AbstractIntDataType(_AbstractNativeDataType): _as_python_type = int _required_python_type = int multiplication_identity = 1 long_name = "Integer" def getTwoExamples(self): return 1, 2 def get_lower_bound(self): exp = 64 if self.using64bits else 32 exp -= 1 return int(-(2 ** exp)) def get_upper_bound(self): exp = 64 if self.using64bits else 32 exp -= 1 return int(2 ** exp - 1) class _AbstractUIntDataType(_AbstractIntDataType): long_name = 'Unsigned' def get_lower_bound(self): return 0 def get_upper_bound(self): exp = 64 if self.using64bits else 32 return int(2 ** exp - 1) class I(_AbstractIntDataType): # noqa E742 _struct_format = 'i' _error_description = "32-bit integer expected" class U(_AbstractUIntDataType): _struct_format = 'I' _error_description = 'non-negative 32-bit integer expected' class F(_AbstractNativeDataType): _struct_format = 'f' _as_python_type = float _error_description = 'float expected' multiplication_identity = 1.0 long_name = 'Float' def _as_packable(self, k): # identity return k def getTwoExamples(self): return 0.5, 1.5 class L(_AbstractIntDataType): _struct_format = 'q' _error_description = '64-bit integer expected' using64bits = True class Q(_AbstractUIntDataType): _struct_format = 'Q' _error_description = 'non-negative 64-bit integer expected' using64bits = True class _AbstractBytes(KeyDataType): """ An exact-length byte string type. This must be subclassed to provide the actual byte length. """ tree_size = 500 default_bucket_size = 500 _length = None def __call__(self, item): if not isinstance(item, bytes) or len(item) != self._length: raise TypeError( "{}-byte array expected, not {!r}".format(self._length, item) ) return item def supports_value_union(self): # We don't implement 'multiunion' for fsBTree. return False class f(_AbstractBytes): """ The key type for an ``fs`` tree. This is a two-byte prefix of an overall 8-byte value like a ZODB object ID or transaction ID. """ # Our keys are treated like integers; the module # implements IIntegerObjectBTreeModule long_name = 'Integer' prefix_code = 'f' _length = 2 # Check it can be converted to a two-byte # value. Note that even though we allow negative values # that can break test assumptions: -1 < 0 < 1, but the byte # values for those are \xff\xff > \x00\x00 < \x00\x01. _as_2_bytes = struct.Struct('>h').pack def coerce(self, item): try: return self(item) except TypeError: try: return self._as_2_bytes(operator.index(item)) except struct.error as e: raise TypeError(e) @staticmethod def _make_Bucket_toString(): def toString(self): return b''.join(self._keys) + b''.join(self._values) return toString @staticmethod def _make_Bucket_fromString(): def fromString(self, v): length = len(v) if length % 8 != 0: raise ValueError() count = length // 8 keys, values = v[:count*2], v[count*2:] self.clear() while keys and values: key, keys = keys[:2], keys[2:] value, values = values[:6], values[6:] self._keys.append(key) self._values.append(value) return self return fromString def add_extra_methods(self, base_name, cls): if base_name == 'Bucket': cls.toString = self._make_Bucket_toString() cls.fromString = self._make_Bucket_fromString() class s(_AbstractBytes): """ The value type for an ``fs`` tree. This is a 6-byte suffix of an overall 8-byte value like a ZODB object ID or transaction ID. """ # Our values are treated like objects; the # module implements IIntegerObjectBTreeModule long_name = 'Object' prefix_code = 's' _length = 6 def get_lower_bound(self): # Negative values have the high bit set, which is incompatible # with our transformation. return 0 # To coerce an integer, as used in tests, first convert to 8 bytes # in big-endian order, then ensure the first two # are 0 and cut them off. _as_8_bytes = struct.Struct('>q').pack def coerce(self, item): try: return self(item) except TypeError: try: as_bytes = self._as_8_bytes(operator.index(item)) except struct.error as e: raise TypeError(e) if as_bytes[:2] != b'\x00\x00': raise TypeError( "Cannot convert {!r} to 6 bytes ({!r})".format( item, as_bytes ) ) return as_bytes[2:] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/_fsBTree.c0000644000076500000240000001110114355020716015232 0ustar00jensstaff/*############################################################################ # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################*/ #define MASTER_ID "$Id$\n" /* fsBTree - FileStorage index BTree This BTree implements a mapping from 2-character strings to six-character strings. This allows us to efficiently store a FileStorage index as a nested mapping of 6-character oid prefix to mapping of 2-character oid suffix to 6-character (byte) file positions. */ typedef unsigned char char2[2]; typedef unsigned char char6[6]; /* Setup template macros */ #define PERSISTENT #define MOD_NAME_PREFIX "fs" #include "_compat.h" /*#include "intkeymacros.h"*/ #define KEYMACROS_H "$Id$\n" #define KEY_TYPE char2 #undef KEY_TYPE_IS_PYOBJECT #define KEY_CHECK(K) (PyBytes_Check(K) && PyBytes_GET_SIZE(K)==2) #define TEST_KEY_SET_OR(V, K, T) if ( ( (V) = ((*(K) < *(T) || (*(K) == *(T) && (K)[1] < (T)[1])) ? -1 : ((*(K) == *(T) && (K)[1] == (T)[1]) ? 0 : 1)) ), 0 ) #define DECREF_KEY(KEY) #define INCREF_KEY(k) #define COPY_KEY(KEY, E) (*(KEY)=*(E), (KEY)[1]=(E)[1]) #define COPY_KEY_TO_OBJECT(O, K) O=PyBytes_FromStringAndSize((const char*)K,2) #define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \ if (KEY_CHECK(ARG)) memcpy(TARGET, PyBytes_AS_STRING(ARG), 2); else { \ PyErr_SetString(PyExc_TypeError, "expected two-character string key"); \ (STATUS)=0; } /*#include "intvaluemacros.h"*/ #define VALUEMACROS_H "$Id$\n" #define VALUE_TYPE char6 #undef VALUE_TYPE_IS_PYOBJECT #define TEST_VALUE(K, T) memcmp(K,T,6) #define DECREF_VALUE(k) #define INCREF_VALUE(k) #define COPY_VALUE(V, E) (memcpy(V, E, 6)) #define COPY_VALUE_TO_OBJECT(O, K) O=PyBytes_FromStringAndSize((const char*)K,6) #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ if ((PyBytes_Check(ARG) && PyBytes_GET_SIZE(ARG)==6)) \ memcpy(TARGET, PyBytes_AS_STRING(ARG), 6); else { \ PyErr_SetString(PyExc_TypeError, "expected six-character string key"); \ (STATUS)=0; } #define NORMALIZE_VALUE(V, MIN) #include "Python.h" static PyObject *bucket_toBytes(PyObject *self); static PyObject *bucket_fromBytes(PyObject *self, PyObject *state); #define EXTRA_BUCKET_METHODS \ {"toBytes", (PyCFunction) bucket_toBytes, METH_NOARGS, \ "toBytes() -- Return the state as a bytes array"}, \ {"fromBytes", (PyCFunction) bucket_fromBytes, METH_O, \ "fromSBytes(s) -- Set the state of the object from a bytes array"}, \ {"toString", (PyCFunction) bucket_toBytes, METH_NOARGS, \ "toString() -- Deprecated alias for 'toBytes'"}, \ {"fromString", (PyCFunction) bucket_fromBytes, METH_O, \ "fromString(s) -- Deprecated alias for 'fromBytes'"}, \ #define INITMODULE PyInit__fsBTree #include "BTreeModuleTemplate.c" static PyObject * bucket_toBytes(PyObject *oself) { Bucket *self = (Bucket *)oself; PyObject *items = NULL; int len; PER_USE_OR_RETURN(self, NULL); len = self->len; items = PyBytes_FromStringAndSize(NULL, len*8); if (items == NULL) goto err; memcpy(PyBytes_AS_STRING(items), self->keys, len*2); memcpy(PyBytes_AS_STRING(items)+len*2, self->values, len*6); PER_UNUSE(self); return items; err: PER_UNUSE(self); Py_XDECREF(items); return NULL; } static PyObject * bucket_fromBytes(PyObject *oself, PyObject *state) { Bucket *self = (Bucket *)oself; int len; KEY_TYPE *keys; VALUE_TYPE *values; len = PyBytes_Size(state); if (len < 0) return NULL; if (len%8) { PyErr_SetString(PyExc_ValueError, "state string of wrong size"); return NULL; } len /= 8; if (self->next) { Py_DECREF(self->next); self->next = NULL; } if (len > self->size) { keys = BTree_Realloc(self->keys, sizeof(KEY_TYPE)*len); if (keys == NULL) return NULL; values = BTree_Realloc(self->values, sizeof(VALUE_TYPE)*len); if (values == NULL) return NULL; self->keys = keys; self->values = values; self->size = len; } memcpy(self->keys, PyBytes_AS_STRING(state), len*2); memcpy(self->values, PyBytes_AS_STRING(state)+len*2, len*6); self->len = len; Py_INCREF(self); return (PyObject *)self; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/_module_builder.py0000644000076500000240000001720414626022106017107 0ustar00jensstaff############################################################################## # # Copyright Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ Support functions to eliminate the boilerplate involved in defining BTree modules. """ import sys from zope.interface import classImplements from zope.interface import directlyProvides def _create_classes( module_name, key_datatype, value_datatype, ): from ._base import MERGE # Won't always want this. from ._base import Bucket from ._base import Set from ._base import Tree from ._base import TreeSet from ._base import _TreeItems as TreeItems from ._base import _TreeIterator classes = {} prefix = key_datatype.prefix_code + value_datatype.prefix_code classes['TreeItems'] = classes['TreeItemsPy'] = TreeItems for base in ( Bucket, Set, (Tree, 'BTree'), TreeSet, (_TreeIterator, 'TreeIterator'), ): if isinstance(base, tuple): base, base_name = base else: base_name = base.__name__ # XXX: Consider defining these with their natural names # now and only aliasing them to 'Py' instead of the # opposite. That should make pickling easier. name = prefix + base_name + 'Py' cls = type(name, (base,), dict( _to_key=key_datatype, _to_value=value_datatype, MERGE=MERGE, MERGE_WEIGHT=value_datatype.apply_weight, MERGE_DEFAULT=value_datatype.multiplication_identity, # max_leaf_size and max_internal_size are set # for BTree and TreeSet later, when we do the same thing # for C. )) cls.__module__ = module_name key_datatype.add_extra_methods(base_name, cls) classes[cls.__name__] = cls # Importing the C extension does this for the non-py # classes. # TODO: Unify that. classes[base_name + 'Py'] = cls for cls in classes.values(): cls._mapping_type = classes['BucketPy'] cls._set_type = classes['SetPy'] if 'Set' in cls.__name__: cls._bucket_type = classes['SetPy'] else: cls._bucket_type = classes['BucketPy'] return classes def _create_set_operations(module_name, key_type, value_type, set_type): from ._base import difference from ._base import intersection from ._base import multiunion from ._base import set_operation from ._base import union from ._base import weightedIntersection from ._base import weightedUnion ops = { op.__name__ + 'Py': set_operation(op, set_type) for op in ( difference, intersection, union, ) + ( (weightedIntersection, weightedUnion,) if value_type.supports_value_union() else () ) + ( (multiunion,) if key_type.supports_value_union() else () ) } for key, op in ops.items(): op.__module__ = module_name op.__name__ = key # TODO: Pickling. These things should be looked up by name. return ops def _create_globals(module_name, key_datatype, value_datatype): classes = _create_classes(module_name, key_datatype, value_datatype) set_type = classes['SetPy'] set_ops = _create_set_operations( module_name, key_datatype, value_datatype, set_type, ) classes.update(set_ops) return classes def populate_module(mod_globals, key_datatype, value_datatype, interface, module=None): import collections.abc from . import Interfaces as interfaces from ._base import _fix_pickle from ._compat import import_c_extension module_name = mod_globals['__name__'] # Define the Python implementations mod_globals.update( _create_globals(module_name, key_datatype, value_datatype) ) # Import the C versions, if possible. Whether or not this is possible, # this currently makes the non-`Py' suffixed names available. This should # change if we start defining the Python classes with their natural name, # only aliased to the 'Py` suffix (which simplifies pickling) import_c_extension(mod_globals) # Next, define __all__ after all the name aliasing is done. # XXX: Maybe derive this from the values we create. mod_all = ( 'Bucket', 'Set', 'BTree', 'TreeSet', 'union', 'intersection', 'difference', 'weightedUnion', 'weightedIntersection', 'multiunion', ) prefix = key_datatype.prefix_code + value_datatype.prefix_code mod_all += tuple(prefix + c for c in ('Bucket', 'Set', 'BTree', 'TreeSet')) mod_globals['__all__'] = tuple(c for c in mod_all if c in mod_globals) mod_globals['using64bits'] = ( key_datatype.using64bits or value_datatype.using64bits ) # XXX: We can probably do better than fix_pickle now; # we can know if we're going to be renaming classes # ahead of time. See above. _fix_pickle(mod_globals, module_name) # Apply interface definitions. directlyProvides(module or sys.modules[module_name], interface) for cls_name, iface in { 'BTree': interfaces.IBTree, 'Bucket': interfaces.IMinimalDictionary, 'Set': interfaces.ISet, 'TreeSet': interfaces.ITreeSet, 'TreeItems': interfaces.IMinimalSequence, }.items(): classImplements(mod_globals[cls_name], iface) classImplements(mod_globals[cls_name + 'Py'], iface) for cls_name, abc in { 'BTree': collections.abc.MutableMapping, 'Bucket': collections.abc.MutableMapping, 'Set': collections.abc.MutableSet, 'TreeSet': collections.abc.MutableSet, }.items(): abc.register(mod_globals[cls_name]) # Because of some quirks in the implementation of # ABCMeta.__instancecheck__, and the shenanigans we currently do to # make Python classes pickle without the 'Py' suffix, it's not actually # necessary to register the Python version of the class. Specifically, # ABCMeta asks for the object's ``__class__`` instead of using # ``type()``, and our objects have a ``@property`` for ``__class__`` # that returns the C version. # # That's too many coincidences to rely on though. abc.register(mod_globals[cls_name + 'Py']) # Set node sizes. for cls_name in ('BTree', 'TreeSet'): for suffix in ('', 'Py'): cls = mod_globals[cls_name + suffix] cls.max_leaf_size = key_datatype.bucket_size_for_value( value_datatype ) cls.max_internal_size = key_datatype.tree_size def create_module(prefix): import types from . import Interfaces from . import _datatypes as datatypes mod = types.ModuleType('BTrees.' + prefix + 'BTree') key_type = getattr(datatypes, prefix[0])() val_type = getattr(datatypes, prefix[1])().as_value_type() iface_name = 'I' + key_type.long_name + val_type.long_name + 'BTreeModule' iface = getattr(Interfaces, iface_name) populate_module(vars(mod), key_type, val_type, iface, mod) return mod ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/check.py0000644000076500000240000004204214626022106015030 0ustar00jensstaff############################################################################## # # Copyright (c) 2003 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## # isort: skip_file """ Utilities for working with BTrees (TreeSets, Buckets, and Sets) at a low level. The primary function is check(btree), which performs value-based consistency checks of a kind ``BTree._Tree._check()`` does not perform. See the function docstring for details. display(btree) displays the internal structure of a BTree (TreeSet, etc) to stdout. CAUTION: When a BTree node has only a single bucket child, it can be impossible to get at the bucket from Python code (__getstate__() may squash the bucket object out of existence, as a pickling storage optimization). In such a case, the code here synthesizes a temporary bucket with the same keys (and values, if the bucket is of a mapping type). This has no first-order consequences, but can mislead if you pay close attention to reported object addresses and/or object identity (the synthesized bucket has an address that doesn't exist in the actual BTree). """ # 32-bit signed int from BTrees.IFBTree import IFBTree, IFBucket, IFSet, IFTreeSet from BTrees.IFBTree import IFBTreePy, IFBucketPy, IFSetPy, IFTreeSetPy from BTrees.IIBTree import IIBTree, IIBucket, IISet, IITreeSet from BTrees.IIBTree import IIBTreePy, IIBucketPy, IISetPy, IITreeSetPy from BTrees.IOBTree import IOBTree, IOBucket, IOSet, IOTreeSet from BTrees.IOBTree import IOBTreePy, IOBucketPy, IOSetPy, IOTreeSetPy from BTrees.IUBTree import IUBTree, IUBucket, IUSet, IUTreeSet from BTrees.IUBTree import IUBTreePy, IUBucketPy, IUSetPy, IUTreeSetPy # 32-bit unsigned int from BTrees.UFBTree import UFBTree, UFBucket, UFSet, UFTreeSet from BTrees.UFBTree import UFBTreePy, UFBucketPy, UFSetPy, UFTreeSetPy from BTrees.UIBTree import UIBTree, UIBucket, UISet, UITreeSet from BTrees.UIBTree import UIBTreePy, UIBucketPy, UISetPy, UITreeSetPy from BTrees.UOBTree import UOBTree, UOBucket, UOSet, UOTreeSet from BTrees.UOBTree import UOBTreePy, UOBucketPy, UOSetPy, UOTreeSetPy from BTrees.UUBTree import UUBTree, UUBucket, UUSet, UUTreeSet from BTrees.UUBTree import UUBTreePy, UUBucketPy, UUSetPy, UUTreeSetPy # 64-bit signed int from BTrees.LFBTree import LFBTree, LFBucket, LFSet, LFTreeSet from BTrees.LFBTree import LFBTreePy, LFBucketPy, LFSetPy, LFTreeSetPy from BTrees.LLBTree import LLBTree, LLBucket, LLSet, LLTreeSet from BTrees.LLBTree import LLBTreePy, LLBucketPy, LLSetPy, LLTreeSetPy from BTrees.LOBTree import LOBTree, LOBucket, LOSet, LOTreeSet from BTrees.LOBTree import LOBTreePy, LOBucketPy, LOSetPy, LOTreeSetPy from BTrees.LQBTree import LQBTree, LQBucket, LQSet, LQTreeSet from BTrees.LQBTree import LQBTreePy, LQBucketPy, LQSetPy, LQTreeSetPy # 64-bit unsigned int from BTrees.QFBTree import QFBTree, QFBucket, QFSet, QFTreeSet from BTrees.QFBTree import QFBTreePy, QFBucketPy, QFSetPy, QFTreeSetPy from BTrees.QLBTree import QLBTree, QLBucket, QLSet, QLTreeSet from BTrees.QLBTree import QLBTreePy, QLBucketPy, QLSetPy, QLTreeSetPy from BTrees.QOBTree import QOBTree, QOBucket, QOSet, QOTreeSet from BTrees.QOBTree import QOBTreePy, QOBucketPy, QOSetPy, QOTreeSetPy from BTrees.QQBTree import QQBTree, QQBucket, QQSet, QQTreeSet from BTrees.QQBTree import QQBTreePy, QQBucketPy, QQSetPy, QQTreeSetPy from BTrees.OIBTree import OIBTree, OIBucket, OISet, OITreeSet from BTrees.OIBTree import OIBTreePy, OIBucketPy, OISetPy, OITreeSetPy from BTrees.OLBTree import OLBTree, OLBucket, OLSet, OLTreeSet from BTrees.OLBTree import OLBTreePy, OLBucketPy, OLSetPy, OLTreeSetPy from BTrees.OOBTree import OOBTree, OOBucket, OOSet, OOTreeSet from BTrees.OOBTree import OOBTreePy, OOBucketPy, OOSetPy, OOTreeSetPy from BTrees.OUBTree import OUBTree, OUBucket, OUSet, OUTreeSet from BTrees.OUBTree import OUBTreePy, OUBucketPy, OUSetPy, OUTreeSetPy from BTrees.OQBTree import OQBTree, OQBucket, OQSet, OQTreeSet from BTrees.OQBTree import OQBTreePy, OQBucketPy, OQSetPy, OQTreeSetPy from BTrees.fsBTree import fsBTree, fsBucket, fsSet, fsTreeSet from BTrees.fsBTree import fsBTreePy, fsBucketPy, fsSetPy, fsTreeSetPy from BTrees.utils import positive_id from BTrees.utils import oid_repr from BTrees._compat import compare TYPE_UNKNOWN, TYPE_BTREE, TYPE_BUCKET = range(3) _type2kind = {} _FAMILIES = ( 'OO', 'OI', 'OU', 'OL', 'OQ', 'II', 'IO', 'IF', 'IU', 'LL', 'LO', 'LF', 'LQ', 'UU', 'UO', 'UF', 'UI', 'QQ', 'QO', 'QF', 'QL', 'fs', ) for kv in _FAMILIES: for name, kind in ( ('BTree', (TYPE_BTREE, True)), ('Bucket', (TYPE_BUCKET, True)), ('TreeSet', (TYPE_BTREE, False)), ('Set', (TYPE_BUCKET, False)), ): _type2kind[globals()[kv + name]] = kind py = kv + name + 'Py' _type2kind[globals()[py]] = kind # Return pair # # TYPE_BTREE or TYPE_BUCKET, is_mapping def classify(obj): return _type2kind[type(obj)] BTREE_EMPTY, BTREE_ONE, BTREE_NORMAL = range(3) # If the BTree is empty, returns # # BTREE_EMPTY, [], [] # # If the BTree has only one bucket, sometimes returns # # BTREE_ONE, bucket_state, None # # Else returns # # BTREE_NORMAL, list of keys, list of kids # # and the list of kids has one more entry than the list of keys. # # BTree.__getstate__() docs: # # For an empty BTree (self->len == 0), None. # # For a BTree with one child (self->len == 1), and that child is a bucket, # and that bucket has a NULL oid, a one-tuple containing a one-tuple # containing the bucket's state: # # ( # ( # child[0].__getstate__(), # ), # ) # # Else a two-tuple. The first element is a tuple interleaving the BTree's # keys and direct children, of size 2*self->len - 1 (key[0] is unused and # is not saved). The second element is the firstbucket: # # ( # (child[0], key[1], child[1], key[2], child[2], ..., # key[len-1], child[len-1]), # self->firstbucket # ) _btree2bucket = {} for kv in _FAMILIES: _btree2bucket[globals()[kv+'BTree']] = globals()[kv+'Bucket'] py = kv + 'BTreePy' _btree2bucket[globals()[py]] = globals()[kv+'BucketPy'] _btree2bucket[globals()[kv+'TreeSet']] = globals()[kv+'Set'] py = kv + 'TreeSetPy' _btree2bucket[globals()[kv+'TreeSetPy']] = globals()[kv+'SetPy'] def crack_btree(t, is_mapping): state = t.__getstate__() if state is None: return BTREE_EMPTY, [], [] assert isinstance(state, tuple) if len(state) == 1: state = state[0] assert isinstance(state, tuple) and len(state) == 1 state = state[0] return BTREE_ONE, state, None assert len(state) == 2 data, firstbucket = state n = len(data) assert n & 1 kids = [] keys = [] i = 0 for x in data: if i & 1: keys.append(x) else: kids.append(x) i += 1 return BTREE_NORMAL, keys, kids # Returns # # keys, values # for a mapping; len(keys) == len(values) in this case # or # keys, [] # for a set # # bucket.__getstate__() docs: # # For a set bucket (self->values is NULL), a one-tuple or two-tuple. The # first element is a tuple of keys, of length self->len. The second element # is the next bucket, present if and only if next is non-NULL: # # ( # (keys[0], keys[1], ..., keys[len-1]), # next iff non-NULL> # ) # # For a mapping bucket (self->values is not NULL), a one-tuple or two-tuple. # The first element is a tuple interleaving keys and values, of length # 2 * self->len. The second element is the next bucket, present iff next is # non-NULL: # # ( # (keys[0], values[0], keys[1], values[1], ..., # keys[len-1], values[len-1]), # next iff non-NULL> # ) def crack_bucket(b, is_mapping): state = b.__getstate__() assert isinstance(state, tuple) assert 1 <= len(state) <= 2 data = state[0] if not is_mapping: return data, [] keys = [] values = [] n = len(data) assert n & 1 == 0 i = 0 for x in data: if i & 1: values.append(x) else: keys.append(x) i += 1 return keys, values def type_and_adr(obj): if hasattr(obj, '_p_oid'): oid = oid_repr(obj._p_oid) else: oid = 'None' return "{} (0x{:x} oid={})".format( type(obj).__name__, positive_id(obj), oid ) class Walker: # Walker implements a depth-first search of a BTree (or TreeSet or Set or # Bucket). Subclasses must implement the visit_btree() and visit_bucket() # methods, and arrange to call the walk() method. walk() calls the # visit_XYZ() methods once for each node in the tree, in depth-first # left-to-right order. def __init__(self, obj): self.obj = obj # obj is the BTree (BTree or TreeSet). # path is a list of indices, from the root. For example, if a BTree node # is child[5] of child[3] of the root BTree, [3, 5]. # parent is the parent BTree object, or None if this is the root BTree. # is_mapping is True for a BTree and False for a TreeSet. # keys is a list of the BTree's internal keys. # kids is a list of the BTree's children. # If the BTree is an empty root node, keys == kids == []. # Else len(kids) == len(keys) + 1. # lo and hi are slice bounds on the values the elements of keys *should* # lie in (lo inclusive, hi exclusive). lo is None if there is no lower # bound known, and hi is None if no upper bound is known. def visit_btree(self, obj, path, parent, is_mapping, keys, kids, lo, hi): raise NotImplementedError # obj is the bucket (Bucket or Set). # path is a list of indices, from the root. For example, if a bucket # node is child[5] of child[3] of the root BTree, [3, 5]. # parent is the parent BTree object. # is_mapping is True for a Bucket and False for a Set. # keys is a list of the bucket's keys. # values is a list of the bucket's values. # If is_mapping is false, values == []. Else len(keys) == len(values). # lo and hi are slice bounds on the values the elements of keys *should* # lie in (lo inclusive, hi exclusive). lo is None if there is no lower # bound known, and hi is None if no upper bound is known. def visit_bucket(self, obj, path, parent, is_mapping, keys, values, lo, hi): raise NotImplementedError def walk(self): obj = self.obj path = [] stack = [(obj, path, None, None, None)] while stack: obj, path, parent, lo, hi = stack.pop() kind, is_mapping = classify(obj) if kind is TYPE_BTREE: bkind, keys, kids = crack_btree(obj, is_mapping) if bkind is BTREE_NORMAL: # push the kids, in reverse order (so they're popped off # the stack in forward order) n = len(kids) for i in range(len(kids)-1, -1, -1): newlo, newhi = lo, hi if i < n-1: newhi = keys[i] if i > 0: newlo = keys[i-1] stack.append((kids[i], path + [i], obj, newlo, newhi)) elif bkind is BTREE_EMPTY: pass else: assert bkind is BTREE_ONE # Yuck. There isn't a bucket object to pass on, as # the bucket state is embedded directly in the BTree # state. Synthesize a bucket. assert kids is None # "keys" is really the bucket state bucket = _btree2bucket[type(obj)]() bucket.__setstate__(keys) stack.append((bucket, path + [0], obj, lo, hi)) keys = [] kids = [bucket] self.visit_btree(obj, path, parent, is_mapping, keys, kids, lo, hi) else: assert kind is TYPE_BUCKET keys, values = crack_bucket(obj, is_mapping) self.visit_bucket(obj, path, parent, is_mapping, keys, values, lo, hi) class Checker(Walker): def __init__(self, obj): Walker.__init__(self, obj) self.errors = [] def check(self): self.walk() if self.errors: s = "Errors found in %s:" % type_and_adr(self.obj) self.errors.insert(0, s) s = "\n".join(self.errors) raise AssertionError(s) def visit_btree(self, obj, path, parent, is_mapping, keys, kids, lo, hi): self.check_sorted(obj, path, keys, lo, hi) def visit_bucket(self, obj, path, parent, is_mapping, keys, values, lo, hi): self.check_sorted(obj, path, keys, lo, hi) def check_sorted(self, obj, path, keys, lo, hi): i, n = 0, len(keys) for x in keys: # lo or hi are ommitted by supplying None. Thus the not # None checkes below. if lo is not None and not compare(lo, x) <= 0: s = "key %r < lower bound %r at index %d" % (x, lo, i) self.complain(s, obj, path) if hi is not None and not compare(x, hi) < 0: s = "key %r >= upper bound %r at index %d" % (x, hi, i) self.complain(s, obj, path) if i < n-1 and not compare(x, keys[i+1]) < 0: s = "key %r at index %d >= key %r at index %d" % ( x, i, keys[i+1], i+1) self.complain(s, obj, path) i += 1 def complain(self, msg, obj, path): s = "{}, in {}, path from root {}".format( msg, type_and_adr(obj), ".".join(map(str, path))) self.errors.append(s) class Printer(Walker): # pragma: no cover def __init__(self, obj): Walker.__init__(self, obj) def display(self): self.walk() def visit_btree(self, obj, path, parent, is_mapping, keys, kids, lo, hi): indent = " " * len(path) print("%s%s %s with %d children" % ( indent, ".".join(map(str, path)), type_and_adr(obj), len(kids))) indent += " " n = len(keys) for i in range(n): print("%skey %d: %r" % (indent, i, keys[i])) def visit_bucket(self, obj, path, parent, is_mapping, keys, values, lo, hi): indent = " " * len(path) print("%s%s %s with %d keys" % ( indent, ".".join(map(str, path)), type_and_adr(obj), len(keys))) indent += " " n = len(keys) for i in range(n): print("%skey %d: %r" % (indent, i, keys[i]),) if is_mapping: print("value {!r}".format(values[i])) def check(btree): """Check internal value-based invariants in a BTree or TreeSet. The ``BTrees._base._Tree._check`` method checks internal C-level pointer consistency. The :func:`~BTrees.check.check` function here checks value-based invariants: whether the keys in leaf bucket and internal nodes are in strictly increasing order, and whether they all lie in their expected range. The latter is a subtle invariant that can't be checked locally -- it requires propagating range info down from the root of the tree, and modifying it at each level for each child. Raises :class:`AssertionError` if anything is wrong, with a string detail explaining the problems. The entire tree is checked before :class:`AssertionError` is raised, and the string detail may be large (depending on how much went wrong). """ Checker(btree).check() def display(btree): # pragma: no cover "Display the internal structure of a BTree, Bucket, TreeSet or Set." Printer(btree).display() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/floatvaluemacros.h0000644000076500000240000000160314330745562017131 0ustar00jensstaff #define VALUEMACROS_H "$Id$\n" #define VALUE_TYPE float #undef VALUE_TYPE_IS_PYOBJECT #define TEST_VALUE(K, T) (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0)) #define VALUE_SAME(VALUE, TARGET) ( (VALUE) == (TARGET) ) #define DECLARE_VALUE(NAME) VALUE_TYPE NAME #define VALUE_PARSE "f" #define DECREF_VALUE(k) #define INCREF_VALUE(k) #define COPY_VALUE(V, E) (V=(E)) #define COPY_VALUE_TO_OBJECT(O, K) O=PyFloat_FromDouble(K) #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ if (PyFloat_Check(ARG)) TARGET = (float)PyFloat_AsDouble(ARG); \ else if (INT_CHECK(ARG)) TARGET = (float)INT_AS_LONG(ARG); \ else { \ PyErr_SetString(PyExc_TypeError, "expected float or int value"); \ (STATUS)=0; (TARGET)=0; } #define NORMALIZE_VALUE(V, MIN) ((MIN) > 0) ? ((V)/=(MIN)) : 0 #define MERGE_DEFAULT 1.0f #define MERGE(O1, w1, O2, w2) ((O1)*(w1)+(O2)*(w2)) #define MERGE_WEIGHT(O, w) ((O)*(w)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/intkeymacros.h0000644000076500000240000000775014330745562016303 0ustar00jensstaff #define KEYMACROS_H "$Id$\n" #ifndef ZODB_UNSIGNED_KEY_INTS /* signed keys */ #ifdef ZODB_64BIT_INTS /* PY_LONG_LONG as key */ #define NEED_LONG_LONG_SUPPORT #define NEED_LONG_LONG_KEYS #define KEY_TYPE PY_LONG_LONG #define KEY_CHECK longlong_check #define COPY_KEY_TO_OBJECT(O, K) O=longlong_as_object(K) #define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \ if (!longlong_convert((ARG), &TARGET)) \ { \ (STATUS)=0; (TARGET)=0; \ } #else /* C int as key */ #define KEY_TYPE int #define KEY_CHECK INT_CHECK #define COPY_KEY_TO_OBJECT(O, K) O=INT_FROM_LONG(K) #define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \ if (INT_CHECK(ARG)) { \ long vcopy = INT_AS_LONG(ARG); \ if (PyErr_Occurred()) { \ if (PyErr_ExceptionMatches(PyExc_OverflowError)) { \ PyErr_Clear(); \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ } \ (STATUS)=0; (TARGET)=0; \ } \ else if ((int)vcopy != vcopy) { \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ (STATUS)=0; (TARGET)=0; \ } \ else TARGET = vcopy; \ } else { \ PyErr_SetString(PyExc_TypeError, "expected integer key"); \ (STATUS)=0; (TARGET)=0; } #endif #else /* Unsigned keys */ #ifdef ZODB_64BIT_INTS /* PY_LONG_LONG as key */ #define NEED_LONG_LONG_SUPPORT #define NEED_LONG_LONG_KEYS #define KEY_TYPE unsigned PY_LONG_LONG #define KEY_CHECK ulonglong_check #define COPY_KEY_TO_OBJECT(O, K) O=ulonglong_as_object(K) #define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \ if (!ulonglong_convert((ARG), &TARGET)) \ { \ (STATUS)=0; (TARGET)=0; \ } #else /* C int as key */ #define KEY_TYPE unsigned int #define KEY_CHECK INT_CHECK #define COPY_KEY_TO_OBJECT(O, K) O=UINT_FROM_LONG(K) #define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \ if (INT_CHECK(ARG)) { \ long vcopy = INT_AS_LONG(ARG); \ if (PyErr_Occurred()) { \ if (PyErr_ExceptionMatches(PyExc_OverflowError)) { \ PyErr_Clear(); \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ } \ (STATUS)=0; (TARGET)=0; \ } \ else if (vcopy < 0) { \ PyErr_SetString(PyExc_TypeError, "can't convert negative value to unsigned int"); \ (STATUS)=0; (TARGET)=0; \ } \ else if ((unsigned int)vcopy != vcopy) { \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ (STATUS)=0; (TARGET)=0; \ } \ else TARGET = vcopy; \ } else { \ PyErr_SetString(PyExc_TypeError, "expected integer key"); \ (STATUS)=0; (TARGET)=0; } #endif #endif /* ZODB_SIGNED_KEY_INTS */ #undef KEY_TYPE_IS_PYOBJECT #define TEST_KEY_SET_OR(V, K, T) if ( ( (V) = (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0)) ) , 0 ) #define DECREF_KEY(KEY) #define INCREF_KEY(k) #define COPY_KEY(KEY, E) (KEY=(E)) #define MULTI_INT_UNION 1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/intvaluemacros.h0000644000076500000240000001073614330745562016625 0ustar00jensstaff #define VALUEMACROS_H "$Id$\n" /* VALUE_PARSE is used exclusively in SetOpTemplate.c to accept the weight values for merging. The PyArg_ParseTuple function it uses has no trivial way to express "unsigned with check", so in the unsigned case, passing negative values as weights will produce weird output no matter what VALUE_PARSE we use (because it will immediately get cast to an unsigned). */ #ifndef ZODB_UNSIGNED_VALUE_INTS /*signed values */ #ifdef ZODB_64BIT_INTS #define NEED_LONG_LONG_SUPPORT #define VALUE_TYPE PY_LONG_LONG #define VALUE_PARSE "L" #define COPY_VALUE_TO_OBJECT(O, K) O=longlong_as_object(K) #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ if (!longlong_convert((ARG), &TARGET)) \ { \ (STATUS)=0; (TARGET)=0; \ } #else #define VALUE_TYPE int #define VALUE_PARSE "i" #define COPY_VALUE_TO_OBJECT(O, K) O=INT_FROM_LONG(K) #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ if (INT_CHECK(ARG)) { \ long vcopy = INT_AS_LONG(ARG); \ if (PyErr_Occurred()) { \ if (PyErr_ExceptionMatches(PyExc_OverflowError)) { \ PyErr_Clear(); \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ } \ (STATUS)=0; (TARGET)=0; \ } \ else if ((int)vcopy != vcopy) { \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ (STATUS)=0; (TARGET)=0; \ } \ else TARGET = vcopy; \ } else { \ PyErr_SetString(PyExc_TypeError, "expected integer key"); \ (STATUS)=0; (TARGET)=0; } #endif #else /* unsigned values */ #ifdef ZODB_64BIT_INTS /* unsigned, 64-bit values */ #define NEED_LONG_LONG_SUPPORT #define VALUE_TYPE unsigned PY_LONG_LONG #define VALUE_PARSE "K" #define COPY_VALUE_TO_OBJECT(O, K) O=ulonglong_as_object(K) #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ if (!ulonglong_convert((ARG), &TARGET)) \ { \ (STATUS)=0; (TARGET)=0; \ } #else /* unsigned, 32-bit values */ #define VALUE_TYPE unsigned int #define VALUE_PARSE "I" #define COPY_VALUE_TO_OBJECT(O, K) O=UINT_FROM_LONG(K) #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ if (INT_CHECK(ARG)) { \ long vcopy = INT_AS_LONG(ARG); \ if (PyErr_Occurred()) { \ if (PyErr_ExceptionMatches(PyExc_OverflowError)) { \ PyErr_Clear(); \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ } \ (STATUS)=0; (TARGET)=0; \ } \ else if (vcopy < 0) { \ PyErr_SetString(PyExc_TypeError, "can't convert negative value to unsigned int"); \ (STATUS)=0; (TARGET)=0; \ } \ else if ((unsigned int)vcopy != vcopy) { \ PyErr_SetString(PyExc_TypeError, "integer out of range"); \ (STATUS)=0; (TARGET)=0; \ } \ else TARGET = vcopy; \ } else { \ PyErr_SetString(PyExc_TypeError, "expected integer key"); \ (STATUS)=0; (TARGET)=0; } #endif #endif #undef VALUE_TYPE_IS_PYOBJECT #define TEST_VALUE(K, T) (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0)) #define VALUE_SAME(VALUE, TARGET) ( (VALUE) == (TARGET) ) #define DECLARE_VALUE(NAME) VALUE_TYPE NAME #define DECREF_VALUE(k) #define INCREF_VALUE(k) #define COPY_VALUE(V, E) (V=(E)) #define NORMALIZE_VALUE(V, MIN) ((MIN) > 0) ? ((V)/=(MIN)) : 0 #define MERGE_DEFAULT 1 #define MERGE(O1, w1, O2, w2) ((O1)*(w1)+(O2)*(w2)) #define MERGE_WEIGHT(O, w) ((O)*(w)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/objectkeymacros.h0000644000076500000240000000225114355020716016740 0ustar00jensstaff#define KEYMACROS_H "$Id$\n" #define KEY_TYPE PyObject * #define KEY_TYPE_IS_PYOBJECT #include "Python.h" #include "_compat.h" static PyObject *object_; /* initialized in BTreeModuleTemplate init */ static int check_argument_cmp(PyObject *arg) { /* printf("check cmp %p %p %p %p\n", */ /* arg->ob_type->tp_richcompare, */ /* ((PyTypeObject *)object_)->ob_type->tp_richcompare, */ /* arg->ob_type->tp_compare, */ /* ((PyTypeObject *)object_)->ob_type->tp_compare); */ if (arg == Py_None) { return 1; } if (Py_TYPE(arg)->tp_richcompare == Py_TYPE(object_)->tp_richcompare) { PyErr_Format(PyExc_TypeError, "Object of class %s has default comparison", Py_TYPE(arg)->tp_name); return 0; } return 1; } #define TEST_KEY_SET_OR(V, KEY, TARGET) \ if ( ( (V) = COMPARE((KEY),(TARGET)) ), PyErr_Occurred() ) #define INCREF_KEY(k) Py_INCREF(k) #define DECREF_KEY(KEY) Py_DECREF(KEY) #define COPY_KEY(KEY, E) KEY=(E) #define COPY_KEY_TO_OBJECT(O, K) O=(K); Py_INCREF(O) #define COPY_KEY_FROM_ARG(TARGET, ARG, S) \ TARGET=(ARG); \ (S) = 1; #define KEY_CHECK_ON_SET check_argument_cmp ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/objectvaluemacros.h0000644000076500000240000000071414330745562017274 0ustar00jensstaff#define VALUEMACROS_H "$Id$\n" #define VALUE_TYPE PyObject * #define VALUE_TYPE_IS_PYOBJECT #define TEST_VALUE(VALUE, TARGET) (COMPARE((VALUE),(TARGET))) #define DECLARE_VALUE(NAME) VALUE_TYPE NAME #define INCREF_VALUE(k) Py_INCREF(k) #define DECREF_VALUE(k) Py_DECREF(k) #define COPY_VALUE(k,e) k=(e) #define COPY_VALUE_TO_OBJECT(O, K) O=(K); Py_INCREF(O) #define COPY_VALUE_FROM_ARG(TARGET, ARG, S) TARGET=(ARG) #define NORMALIZE_VALUE(V, MIN) Py_INCREF(V) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/sorters.c0000644000076500000240000003556214330745562015271 0ustar00jensstaff/***************************************************************************** Copyright (c) 2002 Zope Foundation and Contributors. All Rights Reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ****************************************************************************/ /* Revision information: $Id$ */ /* The only routine here intended to be used outside the file is size_t sort_int_nodups(int *p, size_t n) Sort the array of n ints pointed at by p, in place, and also remove duplicates. Return the number of unique elements remaining, which occupy a contiguous and monotonically increasing slice of the array starting at p. Example: If the input array is [3, 1, 2, 3, 1, 5, 2], sort_int_nodups returns 4, and the first 4 elements of the array are changed to [1, 2, 3, 5]. The content of the remaining array positions is not defined. Notes: + This is specific to n-byte signed ints, with endianness natural to the platform. `n` is determined based on ZODB_64BIT_INTS. + 4*n bytes of available heap memory are required for best speed (8*n when ZODB_64BIT_INTS is defined). */ #include #include #include #include #include /* The type of array elements to be sorted. Most of the routines don't care about the type, and will work fine for any scalar C type (provided they're recompiled with element_type appropriately redefined). However, the radix sort has to know everything about the type's internal representation. */ typedef KEY_TYPE element_type; /* The radixsort is faster than the quicksort for large arrays, but radixsort has high fixed overhead, making it a poor choice for small arrays. The crossover point isn't critical, and is sensitive to things like compiler and machine cache structure, so don't worry much about this. */ #define QUICKSORT_BEATS_RADIXSORT 800U /* In turn, the quicksort backs off to an insertion sort for very small slices. MAX_INSERTION is the largest slice quicksort leaves entirely to insertion. Because this version of quicksort uses a median-of-3 rule for selecting a pivot, MAX_INSERTION must be at least 2 (so that quicksort has at least 3 values to look at in a slice). Again, the exact value here isn't critical. */ #define MAX_INSERTION 25U #if MAX_INSERTION < 2U # error "MAX_INSERTION must be >= 2" #endif /* LSB-first radix sort of the n elements in 'in'. 'work' is work storage at least as large as 'in'. Depending on how many swaps are done internally, the final result may come back in 'in' or 'work'; and that pointer is returned. radixsort_int is specific to signed n-byte ints, with natural machine endianness. `n` is determined based on ZODB_64BIT_INTS. */ static element_type* radixsort_int(element_type *in, element_type *work, size_t n) { /* count[i][j] is the number of input elements that have byte value j in byte position i, where byte position 0 is the LSB. Note that holding i fixed, the sum of count[i][j] over all j in range(256) is n. */ #ifdef ZODB_64BIT_INTS size_t count[8][256]; #else size_t count[4][256]; #endif size_t i; int offset, offsetinc; /* Which byte position are we working on now? 0=LSB, 1, 2, ... */ size_t bytenum; #ifdef ZODB_64BIT_INTS assert(sizeof(element_type) == 8); #else assert(sizeof(element_type) == 4); #endif assert(in); assert(work); /* Compute all of count in one pass. */ memset(count, 0, sizeof(count)); for (i = 0; i < n; ++i) { element_type const x = in[i]; ++count[0][(x ) & 0xff]; ++count[1][(x >> 8) & 0xff]; ++count[2][(x >> 16) & 0xff]; ++count[3][(x >> 24) & 0xff]; #ifdef ZODB_64BIT_INTS ++count[4][(x >> 32) & 0xff]; ++count[5][(x >> 40) & 0xff]; ++count[6][(x >> 48) & 0xff]; ++count[7][(x >> 56) & 0xff]; #endif } /* For p an element_type* cast to char*, offset is how much farther we have to go to get to the LSB of the element; this is 0 for little- endian boxes and sizeof(element_type)-1 for big-endian. offsetinc is 1 or -1, respectively, telling us which direction to go from p+offset to get to the element's more-significant bytes. */ { element_type one = 1; if (*(char*)&one) { /* Little endian. */ offset = 0; offsetinc = 1; } else { /* Big endian. */ offset = sizeof(element_type) - 1; offsetinc = -1; } } /* The radix sort. */ for (bytenum = 0; bytenum < sizeof(element_type); ++bytenum, offset += offsetinc) { /* Do a stable distribution sort on byte position bytenum, from in to work. index[i] tells us the work index at which to store the next in element with byte value i. pinbyte points to the correct byte in the input array. */ size_t index[256]; unsigned char* pinbyte; size_t total = 0; size_t *pcount = count[bytenum]; /* Compute the correct output starting index for each possible byte value. */ if (bytenum < sizeof(element_type) - 1) { for (i = 0; i < 256; ++i) { const size_t icount = pcount[i]; index[i] = total; total += icount; if (icount == n) break; } if (i < 256) { /* All bytes in the current position have value i, so there's nothing to do on this pass. */ continue; } } else { /* The MSB of signed ints needs to be distributed differently than the other bytes, in order 0x80, 0x81, ... 0xff, 0x00, 0x01, ... 0x7f */ for (i = 128; i < 256; ++i) { const size_t icount = pcount[i]; index[i] = total; total += icount; if (icount == n) break; } if (i < 256) continue; for (i = 0; i < 128; ++i) { const size_t icount = pcount[i]; index[i] = total; total += icount; if (icount == n) break; } if (i < 128) continue; } assert(total == n); /* Distribute the elements according to byte value. Note that this is where most of the time is spent. Note: The loop is unrolled 4x by hand, for speed. This may be a pessimization someday, but was a significant win on my MSVC 6.0 timing tests. */ pinbyte = (unsigned char *)in + offset; i = 0; /* Reduce number of elements to copy to a multiple of 4. */ while ((n - i) & 0x3) { unsigned char byte = *pinbyte; work[index[byte]++] = in[i]; ++i; pinbyte += sizeof(element_type); } for (; i < n; i += 4, pinbyte += 4 * sizeof(element_type)) { unsigned char byte1 = *(pinbyte ); unsigned char byte2 = *(pinbyte + sizeof(element_type)); unsigned char byte3 = *(pinbyte + 2 * sizeof(element_type)); unsigned char byte4 = *(pinbyte + 3 * sizeof(element_type)); element_type in1 = in[i ]; element_type in2 = in[i+1]; element_type in3 = in[i+2]; element_type in4 = in[i+3]; work[index[byte1]++] = in1; work[index[byte2]++] = in2; work[index[byte3]++] = in3; work[index[byte4]++] = in4; } /* Swap in and work (just a pointer swap). */ { element_type *temp = in; in = work; work = temp; } } return in; } /* Remove duplicates from sorted array in, storing exactly one of each distinct element value into sorted array out. It's OK (and expected!) for in == out, but otherwise the n elements beginning at in must not overlap with the n beginning at out. Return the number of elements in out. */ static size_t uniq(element_type *out, element_type *in, size_t n) { size_t i; element_type lastelt; element_type *pout; assert(out); assert(in); if (n == 0) return 0; /* i <- first index in 'in' that contains a duplicate. in[0], in[1], ... in[i-1] are unique, but in[i-1] == in[i]. Set i to n if everything is unique. */ for (i = 1; i < n; ++i) { if (in[i-1] == in[i]) break; } /* in[:i] is unique; copy to out[:i] if needed. */ assert(i > 0); if (in != out) memcpy(out, in, i * sizeof(element_type)); pout = out + i; lastelt = in[i-1]; /* safe even when i == n */ for (++i; i < n; ++i) { element_type elt = in[i]; if (elt != lastelt) *pout++ = lastelt = elt; } return pout - out; } #if 0 /* insertionsort is no longer referenced directly, but I'd like to keep * the code here just in case. */ /* Straight insertion sort of the n elements starting at 'in'. */ static void insertionsort(element_type *in, size_t n) { element_type *p, *q; element_type minimum; /* smallest seen so far */ element_type *plimit = in + n; assert(in); if (n < 2) return; minimum = *in; for (p = in+1; p < plimit; ++p) { /* *in <= *(in+1) <= ... <= *(p-1). Slide *p into place. */ element_type thiselt = *p; if (thiselt < minimum) { /* This is a new minimum. This saves p-in compares when it happens, but should happen so rarely that it's not worth checking for its own sake: the point is that the far more popular 'else' branch can exploit that thiselt is *not* the smallest so far. */ memmove(in+1, in, (p - in) * sizeof(*in)); *in = minimum = thiselt; } else { /* thiselt >= minimum, so the loop will find a q with *q <= thiselt. This saves testing q >= in on each trip. It's such a simple loop that saving a per-trip test is a major speed win. */ for (q = p-1; *q > thiselt; --q) *(q+1) = *q; *(q+1) = thiselt; } } } #endif /* The maximum number of elements in the pending-work stack quicksort maintains. The maximum stack depth is approximately log2(n), so arrays of size up to approximately MAX_INSERTION * 2**STACKSIZE can be sorted. The memory burden for the stack is small, so better safe than sorry. */ #define STACKSIZE 60 /* A _stacknode remembers a contiguous slice of an array that needs to sorted. lo must be <= hi, and, unlike Python array slices, this includes both ends. */ struct _stacknode { element_type *lo; element_type *hi; }; static void quicksort(element_type *plo, size_t n) { element_type *phi; /* Swap two array elements. */ element_type _temp; #define SWAP(P, Q) (_temp = *(P), *(P) = *(Q), *(Q) = _temp) /* Stack of pending array slices to be sorted. */ struct _stacknode stack[STACKSIZE]; struct _stacknode *stackfree = stack; /* available stack slot */ /* Push an array slice on the pending-work stack. */ #define PUSH(PLO, PHI) \ do { \ assert(stackfree - stack < STACKSIZE); \ assert((PLO) <= (PHI)); \ stackfree->lo = (PLO); \ stackfree->hi = (PHI); \ ++stackfree; \ } while(0) assert(plo); phi = plo + n - 1; for (;;) { element_type pivot; element_type *pi, *pj; assert(plo <= phi); n = phi - plo + 1; if (n <= MAX_INSERTION) { /* Do a small insertion sort. Contra Knuth, we do this now instead of waiting until the end, because this little slice is likely still in cache now. */ element_type *p, *q; element_type minimum = *plo; for (p = plo+1; p <= phi; ++p) { /* *plo <= *(plo+1) <= ... <= *(p-1). Slide *p into place. */ element_type thiselt = *p; if (thiselt < minimum) { /* New minimum. */ memmove(plo+1, plo, (p - plo) * sizeof(*p)); *plo = minimum = thiselt; } else { /* thiselt >= minimum, so the loop will find a q with *q <= thiselt. */ for (q = p-1; *q > thiselt; --q) *(q+1) = *q; *(q+1) = thiselt; } } /* Pop another slice off the stack. */ if (stack == stackfree) break; /* no more slices -- we're done */ --stackfree; plo = stackfree->lo; phi = stackfree->hi; continue; } /* Parition the slice. For pivot, take the median of the leftmost, rightmost, and middle elements. First sort those three; then the median is the middle one. For technical reasons, the middle element is swapped to plo+1 first (see Knuth Vol 3 Ed 2 section 5.2.2 exercise 55 -- reverse-sorted arrays can take quadratic time otherwise!). */ { element_type *plop1 = plo + 1; element_type *pmid = plo + (n >> 1); assert(plo < pmid && pmid < phi); SWAP(plop1, pmid); /* Sort plo, plop1, phi. */ /* Smaller of rightmost two -> middle. */ if (*plop1 > *phi) SWAP(plop1, phi); /* Smallest of all -> left; if plo is already the smallest, the sort is complete. */ if (*plo > *plop1) { SWAP(plo, plop1); /* Largest of all -> right. */ if (*plop1 > *phi) SWAP(plop1, phi); } pivot = *plop1; pi = plop1; } assert(*plo <= pivot); assert(*pi == pivot); assert(*phi >= pivot); pj = phi; /* Partition wrt pivot. This is the time-critical part, and nearly every decision in the routine aims at making this loop as fast as possible -- even small points like arranging that all loop tests can be done correctly at the bottoms of loops instead of the tops, and that pointers can be derefenced directly as-is (without fiddly +1 or -1). The aim is to make the C here so simple that a compiler has a good shot at doing as well as hand-crafted assembler. */ for (;;) { /* Invariants: 1. pi < pj. 2. All elements at plo, plo+1 .. pi are <= pivot. 3. All elements at pj, pj+1 .. phi are >= pivot. 4. There is an element >= pivot to the right of pi. 5. There is an element <= pivot to the left of pj. Note that #4 and #5 save us from needing to check that the pointers stay in bounds. */ assert(pi < pj); do { ++pi; } while (*pi < pivot); assert(pi <= pj); do { --pj; } while (*pj > pivot); assert(pj >= pi - 1); if (pi < pj) SWAP(pi, pj); else break; } assert(plo+1 < pi && pi <= phi); assert(plo < pj && pj < phi); assert(*pi >= pivot); assert( (pi == pj && *pj == pivot) || (pj + 1 == pi && *pj <= pivot) ); /* Swap pivot into its final position, pj. */ assert(plo[1] == pivot); plo[1] = *pj; *pj = pivot; /* Subfiles are from plo to pj-1 inclusive, and pj+1 to phi inclusive. Push the larger one, and loop back to do the smaller one directly. */ if (pj - plo >= phi - pj) { PUSH(plo, pj-1); plo = pj+1; } else { PUSH(pj+1, phi); phi = pj-1; } } #undef PUSH #undef SWAP } /* Sort p and remove duplicates, as fast as we can. */ static size_t sort_int_nodups(KEY_TYPE *p, size_t n) { size_t nunique; element_type *work; assert(sizeof(KEY_TYPE) == sizeof(element_type)); assert(p); /* Use quicksort if the array is small, OR if malloc can't find enough temp memory for radixsort. */ work = NULL; if (n > QUICKSORT_BEATS_RADIXSORT) work = (element_type *)malloc(n * sizeof(element_type)); if (work) { element_type *out = radixsort_int(p, work, n); nunique = uniq(p, out, n); free(work); } else { quicksort(p, n); nunique = uniq(p, p, n); } return nunique; } ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1717060227.496628 BTrees-6.0/src/BTrees/tests/0000755000076500000240000000000014626041203014540 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/tests/__init__.py0000644000076500000240000000002714330745562016663 0ustar00jensstaff# Make this a package. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/_test_builder.py0000644000076500000240000002617614626022106017753 0ustar00jensstaff############################################################################## # # Copyright (c) Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import unittest from .common import BTreeTests from .common import ExtendedSetTests from .common import I_SetsBase from .common import InternalKeysMappingTest from .common import MappingBase from .common import MappingConflictTestBase from .common import ModuleTest from .common import MultiUnion from .common import NormalSetTests from .common import SetConflictTestBase from .common import SetResult from .common import TestLongIntKeys from .common import TestLongIntValues from .common import Weighted from .common import itemsToSet from .common import makeMapBuilder from .common import makeSetBuilder class _FilteredModuleProxy: """ Accesses either ```` or ``Py`` from a module. This conveniently lets us avoid lots of 'getattr' calls. Accessing ``def_`` returns a callable that returns ````. This is suitable for use as class attributes. """ # Lets us easily access by name a particular attribute # in either the Python or C implementation, based on the # suffix def __init__(self, btree_module, suffix): self.btree_module = btree_module self.suffix = suffix def __getattr__(self, name): attr_name = name[4:] if name.startswith('def_') else name attr_name += self.suffix attr = getattr(self.btree_module, attr_name) if name.startswith('def_'): return staticmethod(lambda: attr) return attr def _flattened(*args): def f(tuple_or_klass): if isinstance(tuple_or_klass, tuple): for x in tuple_or_klass: yield from f(x) else: yield tuple_or_klass return tuple(f(args)) class ClassBuilder: # Use TestAuto as a prefix to avoid clashing with manual tests TESTCASE_PREFIX = 'TestAuto' def __init__(self, btree_module, btree_tests_base=BTreeTests): self.btree_module = btree_module # These will be instances of _datatypes.DataType self.key_type = btree_module.BTreePy._to_key self.value_type = btree_module.BTreePy._to_value class _BoundsMixin: # For test purposes, we can only support negative keys if they are # ordered like integers. Our int -> 2 byte conversion for fsBTree # doesn't do this. # # -1 is \xff\xff which is the largest possible key. SUPPORTS_NEGATIVE_KEYS = ( self.key_type.get_lower_bound() != 0 and self.key_type.coerce(-1) < self.key_type.coerce(0) ) SUPPORTS_NEGATIVE_VALUES = self.value_type.get_lower_bound() != 0 if SUPPORTS_NEGATIVE_KEYS: KEY_RANDRANGE_ARGS = (-2000, 2001) else: KEY_RANDRANGE_ARGS = (0, 4002) coerce_to_key = self.key_type.coerce coerce_to_value = self.value_type.coerce KEYS = tuple(self.key_type.coerce(x) for x in range(2001)) VALUES = tuple(self.value_type.coerce(x) for x in range(2001)) self.bounds_mixin = _BoundsMixin self.btree_tests_base = btree_tests_base self.prefix = btree_module.__name__.split('.', )[-1][:2] self.test_module = 'BTrees.tests.test_' + self.prefix + 'BTree' self.test_classes = {} # Keep track of tested classes so that we don't # double test in PURE_PYTHON mode (e.g., BTreePy is BTree) self.tested_classes = set() def _store_class(self, test_cls): assert test_cls.__name__ not in self.test_classes assert isinstance(test_cls, type) assert issubclass(test_cls, unittest.TestCase) self.test_classes[test_cls.__name__] = test_cls def _fixup_and_store_class(self, btree_module, fut, test_cls): base = [x for x in test_cls.__bases__ if x.__module__ != __name__ and x.__module__ != 'unittest'][0] test_name = self._name_for_test(btree_module, fut, base) test_cls.__name__ = test_name test_cls.__module__ = self.test_module test_cls.__qualname__ = self.test_module + '.' + test_name self._store_class(test_cls) def _name_for_test(self, btree_module, fut, test_base): fut = getattr(fut, '__name__', fut) fut = str(fut) if isinstance(test_base, tuple): test_base = test_base[0] test_name = ( self.TESTCASE_PREFIX + (self.prefix if not fut.startswith(self.prefix) else '') + fut + test_base.__name__ + btree_module.suffix ) return test_name def _needs_test(self, fut, test_base): key = (fut, test_base) if key in self.tested_classes: return False self.tested_classes.add(key) return True def _create_set_op_test(self, btree_module, base): tree = btree_module.BTree if not self._needs_test(tree, base): return class Test(self.bounds_mixin, base, unittest.TestCase): # There are two set operation tests, # Weighted and MultiUnion. # These attributes are used in both mkbucket = btree_module.Bucket # Weighted uses union as a factory, self.union()(...). # MultiUnion calls it directly. __union = btree_module.def_union def union(self, *args): if args: return self.__union()(*args) return self.__union() intersection = btree_module.def_intersection # These are specific to Weighted; modules that # don't have weighted values can'd do them. if base is Weighted: weightedUnion = btree_module.def_weightedUnion weightedIntersection = btree_module.def_weightedIntersection # These are specific to MultiUnion, and may not exist # in key types that don't support unions (``'O'``) multiunion = getattr(btree_module, 'multiunion', None) mkset = btree_module.Set mktreeset = btree_module.TreeSet mkbtree = tree def builders(self): return ( btree_module.Bucket, btree_module.BTree, itemsToSet(btree_module.Set), itemsToSet(btree_module.TreeSet) ) self._fixup_and_store_class(btree_module, '', Test) def _create_set_result_test(self, btree_module): tree = btree_module.BTree base = SetResult if not self._needs_test(tree, base): return class Test(self.bounds_mixin, base, unittest.TestCase): union = btree_module.union intersection = btree_module.intersection difference = btree_module.difference def builders(self): return ( makeSetBuilder(self, btree_module.Set), makeSetBuilder(self, btree_module.TreeSet), makeMapBuilder(self, btree_module.BTree), makeMapBuilder(self, btree_module.Bucket) ) self._fixup_and_store_class(btree_module, '', Test) def _create_module_test(self): from BTrees import Interfaces as interfaces mod = self.btree_module iface_name = ( f'I{self.key_type.long_name}{self.value_type.long_name}' f'BTreeModule' ) iface = getattr(interfaces, iface_name) class Test(ModuleTest, unittest.TestCase): prefix = self.prefix key_type = self.key_type value_type = self.value_type def _getModule(self): return mod def _getInterface(self): return iface self._fixup_and_store_class( _FilteredModuleProxy(self.btree_module, ''), '', Test ) def _create_type_tests(self, btree_module, type_name, test_bases): from BTrees import Interfaces as interfaces tree = getattr(btree_module, type_name) iface = { 'BTree': interfaces.IBTree, 'Bucket': interfaces.IMinimalDictionary, 'Set': interfaces.ISet, 'TreeSet': interfaces.ITreeSet }[type_name] for test_base in test_bases: if not self._needs_test(tree, test_base): continue test_name = self._name_for_test(btree_module, tree, test_base) bases = _flattened(self.bounds_mixin, test_base, unittest.TestCase) test_cls = type(test_name, bases, { '__module__': self.test_module, '_getTargetClass': lambda _, t=tree: t, '_getTargetInterface': lambda _, i=iface: i, 'getTwoKeys': self.key_type.getTwoExamples, 'getTwoValues': self.value_type.getTwoExamples, 'key_type': self.key_type, 'value_type': self.value_type, }) self._store_class(test_cls) def create_classes(self): self._create_module_test() btree_tests_base = (self.btree_tests_base,) if self.key_type.using64bits: btree_tests_base += (TestLongIntKeys,) if self.value_type.using64bits: btree_tests_base += (TestLongIntValues,) set_ops = () if self.key_type.supports_value_union(): set_ops += (MultiUnion,) if self.value_type.supports_value_union(): set_ops += (Weighted,) for suffix in ('', 'Py'): btree_module = _FilteredModuleProxy(self.btree_module, suffix) for type_name, test_bases in ( ('BTree', (InternalKeysMappingTest, MappingConflictTestBase, btree_tests_base)), ('Bucket', (MappingBase, MappingConflictTestBase,)), ('Set', (ExtendedSetTests, I_SetsBase, SetConflictTestBase,)), ('TreeSet', (I_SetsBase, NormalSetTests, SetConflictTestBase,)) ): self._create_type_tests(btree_module, type_name, test_bases) for test_base in set_ops: self._create_set_op_test(btree_module, test_base) self._create_set_result_test(btree_module) def update_module(test_module_globals, btree_module, *args, **kwargs): builder = ClassBuilder(btree_module, *args, **kwargs) builder.create_classes() test_module_globals.update(builder.test_classes) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/common.py0000644000076500000240000036031514626022106016413 0ustar00jensstaff############################################################################## # # Copyright (c) 2001-2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import functools import platform import sys import unittest from unittest import skip from BTrees._base import _tp_name from BTrees._compat import PYPY from BTrees._compat import _c_optimizations_ignored def _no_op(test_method): return test_method try: __import__('ZODB') except ImportError: _skip_wo_ZODB = skip('ZODB not available') else: _skip_wo_ZODB = _no_op if platform.architecture()[0] == '32bit': _skip_on_32_bits = skip("32-bit platform") else: _skip_on_32_bits = _no_op if _c_optimizations_ignored(): skipOnPurePython = skip("Not on Pure Python") else: skipOnPurePython = _no_op def _skip_if_pure_py_and_py_test(self): if _c_optimizations_ignored() and 'Py' in type(self).__name__: # No need to run this again. The "C" tests will catch it. # This relies on the fact that we always define tests in pairs, # one normal/C and one with Py in the name for the Py test. raise unittest.SkipTest("Redundant with the C test") #: The exceptions that can be raised by failed #: unsigned conversions. The OverflowError is raised #: by the interpreter and is nicer than the manual error. UnsignedError = (TypeError, OverflowError) def uses_negative_keys_and_values(func): """ Apply this decorator to tests that use negative keys and values. If the underlying mapping doesn't support that, it will be expected to raise a TypeError or OverflowError. """ @functools.wraps(func) def test(self): if not (self.SUPPORTS_NEGATIVE_KEYS and self.SUPPORTS_NEGATIVE_VALUES): with self.assertRaises(UnsignedError): func(self) else: func(self) return test class SignedMixin: SUPPORTS_NEGATIVE_KEYS = True SUPPORTS_NEGATIVE_VALUES = True #: The values to pass to ``random.randrange()`` to generate #: valid keys. KEY_RANDRANGE_ARGS = (-2000, 2001) class ZODBAccess: db = None def tearDown(self): if self.db is not None: self.db.close() del self.db def _getRoot(self): from ZODB import DB from ZODB.MappingStorage import MappingStorage if self.db is None: # Unclear: On the next line, the ZODB4 flavor of this routine # [asses a cache_size argument: # self.db = DB(MappingStorage(), cache_size=1) # If that's done here, though, testLoadAndStore() and # testGhostUnghost() both nail the CPU and seemingly # never finish. self.db = DB(MappingStorage()) return self.db.open().root() def _closeRoot(self, root): import transaction # If we don't commit/abort the transaction, then # closing the Connection tends to fail with # "Cannot close connection joined to transaction" transaction.abort() root._p_jar.close() class Base(ZODBAccess, SignedMixin): # Tests common to all types: sets, buckets, and BTrees def _getTargetClass(self): raise NotImplementedError("subclass should return the target type") def _getTargetInterface(self): raise NotImplementedError( "subclass must return the expected interface " ) def _makeOne(self): return self._getTargetClass()() def setUp(self): super().setUp() _skip_if_pure_py_and_py_test(self) def coerce_to_key(self, item): return item def coerce_to_value(self, value): return value # These are constant tuples. Indexing them produces a key/value # corresponding to the index. KEYS = tuple(range(2001)) VALUES = tuple(range(2001)) def testSubclassesCanHaveAttributes(self): # https://github.com/zopefoundation/BTrees/issues/168 class Subclass(self._getTargetClass()): pass Subclass.foo = 1 self.assertIn('foo', Subclass.__dict__) self.assertNotIn('foo', self._getTargetClass().__dict__) @skipOnPurePython def testCannotSetArbitraryAttributeOnBase(self): if 'Py' in self._getTargetClass().__name__: # pure-python classes can have arbitrary attributes self.skipTest("Not on Pure Python.") with self.assertRaises(TypeError): self._getTargetClass().foo = 1 def testProvidesInterface(self): from zope.interface import providedBy from zope.interface.common.sequence import IMinimalSequence from zope.interface.verify import verifyObject t = self._makeOne() self._populate(t, 10) # reprs are usually the same in the Python and C implementations, # so you need the actual class to be sure of what you're dealing with __traceback_info__ = type(t) verifyObject(self._getTargetInterface(), t) for meth in ('keys', 'values'): if providedBy(t).get(meth): # The interface says it should be here, # make sure it is. This will be things # like Tree, Bucket, Set. seq = getattr(t, meth)() if type(seq) not in (tuple, list): verifyObject(IMinimalSequence, seq) def _getColectionsABC(self): raise NotImplementedError("subclass should return the collection ABC") def testIsinstanceCollectionsABC(self): abc = self._getCollectionsABC() t = self._makeOne() self.assertIsInstance(t, abc) # Now make sure that it actually has the required methods. # First, get the required methods: abc_attrs = set(dir(abc)) # If the method was None, that means it's not required; # if it's not callable, it's not a method (None is not callable) # If it's a private attribute (starting with only one _), it's # an implementation detail to ignore. abc_attrs -= { x for x in abc_attrs if (x[0] == '_' and x[1] != '_') or not callable(getattr(abc, x, None)) } # Drop things from Python typing and zope.interface that may or may not # be present. abc_attrs -= { '__provides__', '__implemented__', '__providedBy__', '__class_getitem__', # Python 3.9+ # Also the equality and comparison operators; # we don't implement those methods, but the ABC does. '__lt__', '__le__', '__eq__', '__gt__', '__ge__', '__ne__', } btr_attrs = set(dir(type(t))) missing_attrs = abc_attrs - btr_attrs self.assertFalse( sorted(missing_attrs), "Class {!r} is missing these methods: {}".format( type(t), missing_attrs) ) def testPersistentSubclass(self): # Can we subclass this and Persistent? # https://github.com/zopefoundation/BTrees/issues/78 import persistent class PersistentSubclass(persistent.Persistent): pass __traceback_info__ = self._getTargetClass(), persistent.Persistent type('Subclass', (self._getTargetClass(), PersistentSubclass), {}) def testPurePython(self): import importlib kind = self._getTargetClass() class_name = kind.__name__ module_name = kind.__module__ module = importlib.import_module(module_name) # If we're in pure python mode, our target class module # should not have an '_' in it (fix_pickle changes the name # to remove the 'Py') # If we're in the C extension mode, our target class # module still doesn't have the _ in it, but we should be able to find # a Py class that's different self.assertNotIn('_', module_name) self.assertIs(getattr(module, class_name), kind) if not _c_optimizations_ignored() and 'Py' not in type(self).__name__: self.assertIsNot(getattr(module, class_name + 'Py'), kind) @_skip_wo_ZODB def testLoadAndStore(self): import transaction for i in 0, 10, 1000: t = self._makeOne() self._populate(t, i) root = None root = self._getRoot() root[i] = t transaction.commit() root2 = self._getRoot() if hasattr(t, 'items'): self.assertEqual(list(root2[i].items()), list(t.items())) else: self.assertEqual(list(root2[i].keys()), list(t.keys())) self._closeRoot(root) self._closeRoot(root2) def testSetstateArgumentChecking(self): try: self._makeOne().__setstate__(('',)) except TypeError as v: self.assertEqual(str(v), 'tuple required for first state element') else: raise AssertionError("Expected exception") @_skip_wo_ZODB def testGhostUnghost(self): import transaction for i in 0, 10, 1000: t = self._makeOne() self._populate(t, i) root = self._getRoot() root[i] = t transaction.commit() root2 = self._getRoot() root2[i]._p_deactivate() transaction.commit() if hasattr(t, 'items'): self.assertEqual(list(root2[i].items()), list(t.items())) else: self.assertEqual(list(root2[i].keys()), list(t.keys())) self._closeRoot(root) self._closeRoot(root2) def testSimpleExclusiveKeyRange(self): t = self._makeOne() K = self.KEYS self.assertEqual(list(t.keys()), []) self.assertEqual(list(t.keys(excludemin=True)), []) self.assertEqual(list(t.keys(excludemax=True)), []) self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), []) self._populate(t, 1) self.assertEqual(list(t.keys()), [K[0]]) self.assertEqual(list(t.keys(excludemin=True)), []) self.assertEqual(list(t.keys(excludemax=True)), []) self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), []) t.clear() self._populate(t, 2) self.assertEqual(list(t.keys()), [K[0], K[1]]) self.assertEqual(list(t.keys(excludemin=True)), [K[1]]) self.assertEqual(list(t.keys(excludemax=True)), [K[0]]) self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), []) t.clear() self._populate(t, 3) self.assertEqual(list(t.keys()), [K[0], K[1], K[2]]) self.assertEqual(list(t.keys(excludemin=True)), [K[1], K[2]]) self.assertEqual(list(t.keys(excludemax=True)), [K[0], K[1]]) self.assertEqual( list(t.keys(excludemin=True, excludemax=True)), [K[1]] ) for low, high, expected in ((-1, 3, [0, 1, 2]), (-1, 2, [0, 1])): if self.SUPPORTS_NEGATIVE_KEYS: self.assertEqual( list(t.keys(low, high, excludemin=True, excludemax=True)), expected ) else: with self.assertRaises(UnsignedError): t.keys(low, high, excludemin=True, excludemax=True) self.assertEqual( list(t.keys(K[0], K[3], excludemin=True, excludemax=True)), [K[1], K[2]] ) self.assertEqual( list(t.keys(K[0], K[2], excludemin=True, excludemax=True)), [K[1]] ) @_skip_wo_ZODB def test_UpdatesDoReadChecksOnInternalNodes(self): import transaction from ZODB import DB from ZODB.MappingStorage import MappingStorage t = self._makeOne() K = self.KEYS if not hasattr(t, '_firstbucket'): return self._populate(t, 1000) store = MappingStorage() db = DB(store) conn = db.open() conn.root.t = t transaction.commit() read = [] def readCurrent(ob): read.append(ob) conn.__class__.readCurrent(conn, ob) return 1 conn.readCurrent = readCurrent try: _add = t.add _remove = t.remove except AttributeError: def add(i): t[self.coerce_to_key(i)] = self.coerce_to_value(i) def remove(i): del t[self.coerce_to_key(i)] else: def add(i): _add(self.coerce_to_key(i)) def remove(i): _remove(self.coerce_to_key(i)) # Modifying a thing remove(100) self.assertTrue(t in read) del read[:] add(100) self.assertTrue(t in read) del read[:] transaction.abort() conn.cacheMinimize() list(t) self.assertTrue(K[100] in t) self.assertTrue(not read) def test_impl_pickle(self): # Issue #2 # Nothing we pickle should include the 'Py' suffix of # implementation classes, and unpickling should give us # back the best available type import pickle made_one = self._makeOne() for proto in range(1, pickle.HIGHEST_PROTOCOL + 1): dumped_str = pickle.dumps(made_one, proto) self.assertTrue(b'Py' not in dumped_str, repr(dumped_str)) loaded_one = pickle.loads(dumped_str) # If we're testing the pure-Python version, but we have the # C extension available, then the loaded type will be the C # extension but the made type will be the Python version. # Otherwise, they match. (Note that if we don't have C extensions # available, the __name__ will be altered to not have Py in it. # See _fix_pickle) if 'Py' in type(made_one).__name__: self.assertTrue(type(loaded_one) is not type(made_one)) else: self.assertTrue(type(loaded_one) is type(made_one)) self.assertTrue(type(loaded_one) is self._getTargetClass()) dumped_str2 = pickle.dumps(loaded_one, proto) self.assertEqual(dumped_str, dumped_str2) def test_pickle_empty(self): # Issue #2 # Pickling an empty object and unpickling it should result # in an object that can be pickled, yielding an identical # pickle (and not an AttributeError) import pickle t = self._makeOne() s = pickle.dumps(t) t2 = pickle.loads(s) s2 = pickle.dumps(t2) self.assertEqual(s, s2) if hasattr(t2, '__len__'): # checks for _firstbucket self.assertEqual(0, len(t2)) # This doesn't hold for things like Bucket and Set, sadly # self.assertEqual(t, t2) def test_pickle_subclass(self): # Issue #2: Make sure our class swizzling doesn't break # pickling subclasses # We need a globally named subclass for pickle, but it needs # to be unique in case tests run in parallel base_class = type(self._makeOne()) class_name = 'PickleSubclassOf' + base_class.__name__ PickleSubclass = type(class_name, (base_class,), {}) globals()[class_name] = PickleSubclass import pickle loaded = pickle.loads(pickle.dumps(PickleSubclass())) self.assertTrue(type(loaded) is PickleSubclass, type(loaded)) self.assertTrue(PickleSubclass().__class__ is PickleSubclass) def test_isinstance_subclass(self): # Issue #2: # In some cases we define a __class__ attribute that gets # invoked for isinstance and *lies*. Check that isinstance still # works (almost) as expected. t = self._makeOne() # It's a little bit weird, but in the fibbing case, # we're an instance of two unrelated classes self.assertTrue(isinstance(t, type(t)), (t, type(t))) self.assertTrue(isinstance(t, t.__class__)) class Sub(type(t)): pass self.assertTrue(issubclass(Sub, type(t))) if type(t) is not t.__class__: # We're fibbing; this breaks issubclass of itself, # contrary to the usual mechanism self.assertFalse(issubclass(t.__class__, type(t))) class NonSub: pass self.assertFalse(issubclass(NonSub, type(t))) self.assertFalse(isinstance(NonSub(), type(t))) class MappingBase(Base): # Tests common to mappings (buckets, btrees) SUPPORTS_NEGATIVE_VALUES = True def _populate(self, t, largest): # Make some data to_key = self.coerce_to_key to_value = self.coerce_to_value for i in range(largest): t[to_key(i)] = to_value(i) def _getCollectionsABC(self): import collections.abc return collections.abc.MutableMapping def test_popitem(self): t = self._makeOne() K = self.KEYS V = self.VALUES # Empty with self.assertRaises(KeyError): t.popitem() self._populate(t, 2000) self.assertEqual(len(t), 2000) for i in range(2000): self.assertEqual(t.popitem(), (K[i], V[i])) self.assertEqual(len(t), 2000 - i - 1) # Back to empty self.assertEqual(len(t), 0) with self.assertRaises(KeyError): t.popitem() def testShortRepr(self): # test the repr because buckets have a complex repr implementation # internally the cutoff from a stack allocated buffer to a heap # allocated buffer is 10000. t = self._makeOne() to_key = self.coerce_to_key to_value = self.coerce_to_value for i in range(5): t[to_key(i)] = to_value(i) t._p_oid = b'12345678' r = repr(t) # Make sure the repr is **not* 10000 bytes long for a shrort bucket. # (the buffer must be terminated when copied). self.assertTrue(len(r) < 10000) # Make sure the repr is human readable if it's a bucket if 'Bucket' in r: self.assertTrue(r.startswith("BTrees")) self.assertTrue(r.endswith(repr(t.items()) + ')'), r) else: # persistent-4.4 changed the default reprs, adding # oid and jar reprs self.assertIn(" 10000) def testGetItemFails(self): self.assertRaises(KeyError, self._getitemfail) def _getitemfail(self): return self._makeOne()[1] def testGetReturnsDefault(self): self.assertEqual(self._makeOne().get(1), None) self.assertEqual(self._makeOne().get(1, 'foo'), 'foo') def testGetReturnsDefaultWrongTypes(self): self.assertIsNone(self._makeOne().get('abc')) self.assertEqual(self._makeOne().get('abc', 'def'), 'def') def testGetReturnsDefaultOverflowRanges(self): too_big = 2 ** 64 + 1 self.assertIsNone(self._makeOne().get(too_big)) self.assertEqual(self._makeOne().get(too_big, 'def'), 'def') too_small = -too_big self.assertIsNone(self._makeOne().get(too_small)) self.assertEqual(self._makeOne().get(too_small, 'def'), 'def') def testSetItemGetItemWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[1]] = V[1] a = t[K[1]] self.assertEqual(a, V[1], repr(a)) def testReplaceWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[1]] = V[1] self.assertEqual(t[K[1]], V[1], t[K[1]]) t[K[1]] = V[2] self.assertEqual(t[K[1]], V[2], t[K[1]]) def testLen(self): import random t = self._makeOne() added = {} r = list(range(1000)) for x in r: k = random.choice(r) k = self.coerce_to_key(k) t[k] = self.coerce_to_value(x) added[k] = self.coerce_to_value(x) addl = added.keys() self.assertEqual(len(t), len(addl), len(t)) def testHasKeyWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[1]] = V[1] self.assertTrue(t.has_key(K[1])) self.assertIn(K[1], t) self.assertNotIn(K[0], t) self.assertNotIn(K[2], t) def testHasKeyOverflowAndTypes(self): t = self._makeOne() too_big = 2 ** 64 + 1 too_small = -too_big self.assertNotIn(too_big, t) self.assertNotIn(too_small, t) self.assertFalse(t.has_key(too_big)) self.assertFalse(t.has_key(too_small)) self.assertFalse(t.has_key('abc')) def testValuesWorks(self): t = self._makeOne() K = self.KEYS for x in range(100): t[K[x]] = self.coerce_to_value(x * x) values = t.values() for i in range(100): v = self.coerce_to_value(i * i) self.assertEqual(values[i], v) self.assertRaises(IndexError, lambda: values[i + 1]) i = 0 for value in t.itervalues(): self.assertEqual(value, self.coerce_to_value(i * i)) i += 1 def testValuesWorks1(self): t = self._makeOne() K = self.KEYS V = self.VALUES for x in range(100): k = self.coerce_to_key(99 - x) t[k] = V[x] for x in range(40): lst = sorted(t.values(K[0 + x], K[99 - x])) self.assertEqual(lst, [V[i] for i in range(0 + x, 99 - x + 1)]) lst = sorted(t.values(max=K[99 - x], min=K[0 + x])) self.assertEqual(lst, [V[i] for i in range(0 + x, 99 - x + 1)]) @uses_negative_keys_and_values def testValuesNegativeIndex(self): t = self._makeOne() L = [-3, 6, -11, 4] for i in L: t[i] = self.coerce_to_value(i) L = sorted(L) vals = t.values() for i in range(-1, -5, -1): self.assertEqual(vals[i], L[i]) self.assertRaises(IndexError, lambda: vals[-5]) def testKeysWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES for x in range(100): t[K[x]] = V[x] v = t.keys() i = 0 for x in v: self.assertEqual(x, K[i]) i = i + 1 self.assertRaises(IndexError, lambda: v[i]) for x in range(40): lst = t.keys(K[0 + x], K[99 - x]) self.assertEqual( list(lst), [K[x] for x in range(0 + x, 99 - x + 1)] ) lst = t.keys(max=K[99-x], min=K[0+x]) self.assertEqual(list(lst), [K[x] for x in range(0+x, 99-x+1)]) self.assertEqual(len(v), 100) @uses_negative_keys_and_values def testKeysNegativeIndex(self): t = self._makeOne() L = [-3, 6, -11, 4] for i in L: t[i] = self.coerce_to_value(i) L = sorted(L) keys = t.keys() for i in range(-1, -5, -1): self.assertEqual(keys[i], L[i]) self.assertRaises(IndexError, lambda: keys[-5]) def testItemsWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES for x in range(100): t[K[x]] = V[2*x] v = t.items() i = 0 for x in v: self.assertEqual(x[0], K[i]) self.assertEqual(x[1], V[2*i]) i += 1 self.assertRaises(IndexError, lambda: v[i+1]) i = 0 for x in t.iteritems(): self.assertEqual(x, (K[i], V[2*i])) i += 1 items = list(t.items(min=K[12], max=K[20])) self.assertEqual(items, list(zip( (K[i] for i in range(12, 21)), (V[i] for i in range(24, 43, 2)) ))) items = list(t.iteritems(min=K[12], max=K[20])) self.assertEqual(items, list(zip( (K[i] for i in range(12, 21)), (V[i] for i in range(24, 43, 2)) ))) def testItemsNegativeIndex(self): if not (self.SUPPORTS_NEGATIVE_KEYS and self.SUPPORTS_NEGATIVE_VALUES): self.skipTest("Needs negative keys and values") t = self._makeOne() L = [-3, 6, -11, 4] for i in L: t[i] = self.coerce_to_value(i) L = sorted(L) items = t.items() for i in range(-1, -5, -1): self.assertEqual(items[i], (L[i], L[i])) self.assertRaises(IndexError, lambda: items[-5]) def testDeleteInvalidKeyRaisesKeyError(self): self.assertRaises(KeyError, self._deletefail) def _deletefail(self): t = self._makeOne() del t[self.KEYS[1]] def testMaxKeyMinKey(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[7]] = V[6] t[K[3]] = V[10] t[K[8]] = V[12] t[K[1]] = V[100] t[K[5]] = V[200] t[K[10]] = V[500] t[K[6]] = V[99] t[K[4]] = V[150] del t[K[7]] K = self.KEYS self.assertEqual(t.maxKey(), K[10]) self.assertEqual(t.maxKey(None), K[10]) self.assertEqual(t.maxKey(K[6]), K[6]) self.assertEqual(t.maxKey(K[9]), K[8]) self.assertEqual(t.minKey(), K[1]) self.assertEqual(t.minKey(None), K[1]) self.assertEqual(t.minKey(K[3]), K[3]) self.assertEqual(t.minKey(K[9]), K[10]) try: t.minKey() - 1 except TypeError: # we can't do arithmetic with the key type; # must be fsBTree. return try: t.maxKey(t.minKey() - 1) except ValueError as err: self.assertEqual(str(err), "no key satisfies the conditions") else: self.fail("expected ValueError") try: t.minKey(t.maxKey() + 1) except ValueError as err: self.assertEqual(str(err), "no key satisfies the conditions") else: self.fail("expected ValueError") def testClear(self): import random t = self._makeOne() r = list(range(100)) for x in r: rnd = random.choice(r) t[self.coerce_to_key(rnd)] = self.VALUES[0] t.clear() diff = lsubtract(list(t.keys()), []) self.assertEqual(diff, []) def testUpdate(self): import random t = self._makeOne() d = {} items = [] for i in range(10000): k = random.randrange(*self.KEY_RANDRANGE_ARGS) k = self.coerce_to_key(k) v = self.coerce_to_value(i) d[k] = v items.append((k, v)) items = sorted(d.items()) t.update(d) self.assertEqual(list(t.items()), items) t.clear() self.assertEqual(list(t.items()), []) t.update(items) self.assertEqual(list(t.items()), items) # Before ZODB 3.4.2, update/construction from PersistentMapping failed. def testUpdateFromPersistentMapping(self): from persistent.mapping import PersistentMapping t = self._makeOne() K = self.KEYS V = self.VALUES pm = PersistentMapping({K[1]: V[2]}) t.update(pm) self.assertEqual(list(t.items()), [(K[1], V[2])]) # Construction goes thru the same internals as .update(). t = t.__class__(pm) self.assertEqual(list(t.items()), [(K[1], V[2])]) def testEmptyRangeSearches(self): t = self._makeOne() K = self.KEYS V = self.VALUES t.update([(K[1], V[1]), (K[5], V[5]), (K[9], V[9])]) if self.SUPPORTS_NEGATIVE_KEYS and self.SUPPORTS_NEGATIVE_VALUES: self.assertEqual( list(t.keys(self.coerce_to_key(-6), self.coerce_to_key(-4))), [] ) self.assertEqual(list(t.keys(K[2], K[4])), []) self.assertEqual(list(t.keys(K[6], K[8])), []) self.assertEqual(list(t.keys(K[10], K[12])), []) self.assertEqual(list(t.keys(K[9], K[1])), []) # For IITreeSets, this one was returning 31 for len(keys), and # list(keys) produced a list with 100 elements. t.clear() t.update(list(zip( (self.coerce_to_key(x) for x in range(300)), (self.coerce_to_value(x) for x in range(300))))) two_hundred = K[200] fifty = K[50] keys = t.keys(two_hundred, fifty) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) self.assertEqual(list(t.iterkeys(two_hundred, fifty)), []) keys = t.keys(max=fifty, min=two_hundred) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) self.assertEqual(list(t.iterkeys(max=fifty, min=two_hundred)), []) def testSlicing(self): # Test that slicing of .keys()/.values()/.items() works exactly the # same way as slicing a Python list with the same contents. # This tests fixes to several bugs in this area, starting with # http://collector.zope.org/Zope/419, # "BTreeItems slice contains 1 too many elements". t = self._makeOne() val_multiplier = -2 if self.SUPPORTS_NEGATIVE_VALUES else 2 K = self.KEYS V = self.VALUES for n in range(10): t.clear() self.assertEqual(len(t), 0) keys = [] values = [] items = [] for key in range(n): value = key * val_multiplier key = K[key] value = self.coerce_to_value(value) t[key] = value keys.append(key) values.append(value) items.append((key, value)) self.assertEqual(len(t), n) kslice = t.keys() vslice = t.values() islice = t.items() self.assertEqual(len(kslice), n) self.assertEqual(len(vslice), n) self.assertEqual(len(islice), n) # Test whole-structure slices. x = kslice[:] self.assertEqual(list(x), keys[:]) x = vslice[:] self.assertEqual(list(x), values[:]) x = islice[:] self.assertEqual(list(x), items[:]) for lo in range(-2*n, 2*n+1): # Test one-sided slices. x = kslice[:lo] self.assertEqual(list(x), keys[:lo]) x = kslice[lo:] self.assertEqual(list(x), keys[lo:]) x = vslice[:lo] self.assertEqual(list(x), values[:lo]) x = vslice[lo:] self.assertEqual(list(x), values[lo:]) x = islice[:lo] self.assertEqual(list(x), items[:lo]) x = islice[lo:] self.assertEqual(list(x), items[lo:]) for hi in range(-2*n, 2*n+1): # Test two-sided slices. x = kslice[lo:hi] self.assertEqual(list(x), keys[lo:hi]) x = vslice[lo:hi] self.assertEqual(list(x), values[lo:hi]) x = islice[lo:hi] self.assertEqual(list(x), items[lo:hi]) # The specific test case from Zope collector 419. t.clear() for i in range(100): t[K[i]] = self.VALUES[1] tslice = t.items()[20:80] self.assertEqual(len(tslice), 60) self.assertEqual(list(tslice), list(zip( [K[x] for x in range(20, 80)], [V[1]] * 60 ))) @uses_negative_keys_and_values def testIterators(self): t = self._makeOne() for keys in [], [-2], [1, 4], list(range(-170, 2000, 6)): t.clear() for k in keys: val = -3 * k t[k] = self.coerce_to_value(val) self.assertEqual(list(t), keys) x = [] for k in t: x.append(k) self.assertEqual(x, keys) it = iter(t) self.assertTrue(it is iter(it)) x = [] try: while 1: x.append(next(it)) except StopIteration: pass self.assertEqual(x, keys) self.assertEqual(list(t.iterkeys()), keys) self.assertEqual(list(t.itervalues()), list(t.values())) self.assertEqual(list(t.iteritems()), list(t.items())) @uses_negative_keys_and_values def testRangedIterators(self): t = self._makeOne() for keys in [], [-2], [1, 4], list(range(-170, 2000, 13)): t.clear() values = [] for k in keys: value = -3 * k t[k] = self.coerce_to_value(value) values.append(value) items = list(zip(keys, values)) self.assertEqual(list(t.iterkeys()), keys) self.assertEqual(list(t.itervalues()), values) self.assertEqual(list(t.iteritems()), items) if not keys: continue min_mid_max = (keys[0], keys[len(keys) >> 1], keys[-1]) for key1 in min_mid_max: for lo in range(key1 - 1, key1 + 2): # Test one-sided range iterators. goodkeys = [k for k in keys if lo <= k] got = t.iterkeys(lo) self.assertEqual(goodkeys, list(got)) goodvalues = [t[k] for k in goodkeys] got = t.itervalues(lo) self.assertEqual(goodvalues, list(got)) gooditems = list(zip(goodkeys, goodvalues)) got = t.iteritems(lo) self.assertEqual(gooditems, list(got)) for key2 in min_mid_max: for hi in range(key2 - 1, key2 + 2): goodkeys = [k for k in keys if lo <= k <= hi] got = t.iterkeys(min=lo, max=hi) self.assertEqual(goodkeys, list(got)) goodvalues = [t[k] for k in goodkeys] got = t.itervalues(lo, max=hi) self.assertEqual(goodvalues, list(got)) gooditems = list(zip(goodkeys, goodvalues)) got = t.iteritems(max=hi, min=lo) self.assertEqual(gooditems, list(got)) def testBadUpdateTupleSize(self): t = self._makeOne() # This one silently ignored the excess in Zope3. key = self.KEYS[1] value = self.VALUES[2] self.assertRaises(TypeError, t.update, [(key, value, value)]) # This one dumped core in Zope3. self.assertRaises(TypeError, t.update, [(key,)]) # This one should simply succeed. t.update([(key, value)]) self.assertEqual(list(t.items()), [(key, value)]) def testSimpleExclusivRanges(self): K = self.KEYS V = self.VALUES def list_keys(x): return [K[k] for k in x] def list_values(x): return [V[k] for k in x] def as_items(x): return [(K[k], V[k]) for k in x] for methodname, f in (("keys", list_keys), ("values", list_values), ("items", as_items), ("iterkeys", list_keys), ("itervalues", list_values), ("iteritems", as_items)): t = self._makeOne() meth = getattr(t, methodname, None) if meth is None: continue __traceback_info__ = meth supports_negative = self.SUPPORTS_NEGATIVE_KEYS self.assertEqual(list(meth()), []) self.assertEqual(list(meth(excludemin=True)), []) self.assertEqual(list(meth(excludemax=True)), []) self.assertEqual(list(meth(excludemin=True, excludemax=True)), []) self._populate(t, 1) self.assertEqual(list(meth()), f([0])) self.assertEqual(list(meth(excludemin=True)), []) self.assertEqual(list(meth(excludemax=True)), []) self.assertEqual(list(meth(excludemin=True, excludemax=True)), []) t.clear() self._populate(t, 2) self.assertEqual(list(meth()), f([0, 1])) self.assertEqual(list(meth(excludemin=True)), f([1])) self.assertEqual(list(meth(excludemax=True)), f([0])) self.assertEqual(list(meth(excludemin=True, excludemax=True)), []) t.clear() self._populate(t, 3) self.assertEqual(list(meth()), f([0, 1, 2])) self.assertEqual(list(meth(excludemin=True)), f([1, 2])) self.assertEqual(list(meth(excludemax=True)), f([0, 1])) self.assertEqual(list(meth(excludemin=True, excludemax=True)), f([1])) if supports_negative: self.assertEqual( list(meth( self.coerce_to_key(-1), K[2], excludemin=True, excludemax=True, )), f([0, 1]) ) self.assertEqual( list(meth( self.coerce_to_key(-1), K[3], excludemin=True, excludemax=True )), f([0, 1, 2]) ) self.assertEqual( list(meth(K[0], K[3], excludemin=True, excludemax=True)), f([1, 2]) ) self.assertEqual( list(meth(K[0], K[2], excludemin=True, excludemax=True)), f([1]) ) def testSetdefault(self): t = self._makeOne() K = self.KEYS V = self.VALUES self.assertEqual(t.setdefault(K[1], V[2]), V[2]) # That should also have associated 1 with 2 in the tree. self.assertTrue(K[1] in t) self.assertEqual(t[K[1]], V[2]) # And trying to change it again should have no effect. self.assertEqual(t.setdefault(K[1], self.coerce_to_value(666)), V[2]) self.assertEqual(t[K[1]], V[2]) # Not enough arguments. self.assertRaises(TypeError, t.setdefault) self.assertRaises(TypeError, t.setdefault, K[1]) # Too many arguments. self.assertRaises(TypeError, t.setdefault, K[1], V[2], V[3]) def testPop(self): t = self._makeOne() K = self.KEYS V = self.VALUES # Empty container. # If no default given, raises KeyError. self.assertRaises(KeyError, t.pop, K[1]) # But if default given, returns that instead. self.assertEqual(t.pop(K[1], 42), 42) t[K[1]] = V[3] # KeyError when key is not in container and default is not passed. self.assertRaises(KeyError, t.pop, K[5]) self.assertEqual(list(t.items()), [(K[1], V[3])]) # If key is in container, returns the value and deletes the key. self.assertEqual(t.pop(K[1]), V[3]) self.assertEqual(len(t), 0) # If key is present, return value bypassing default. t[K[1]] = V[3] self.assertEqual(t.pop(K[1], 7), V[3]) self.assertEqual(len(t), 0) # Pop only one item. t[K[1]] = V[3] t[K[2]] = V[4] self.assertEqual(len(t), 2) self.assertEqual(t.pop(K[1]), V[3]) self.assertEqual(len(t), 1) self.assertEqual(t[K[2]], V[4]) self.assertEqual(t.pop(K[1], 3), 3) # Too few arguments. self.assertRaises(TypeError, t.pop) # Too many arguments. self.assertRaises(TypeError, t.pop, K[1], 2, 3) def __test_key_or_value_type(self, k, v, to_test, kvtype): try: kvtype(to_test) except Exception as e: with self.assertRaises(type(e)): self._makeOne()[k] = self.coerce_to_value(v) else: self._makeOne()[k] = self.coerce_to_value(v) def __test_key(self, k): v = self.getTwoValues()[0] self.__test_key_or_value_type(k, v, k, self.key_type) def __test_value(self, v): k = self.getTwoKeys()[0] self.__test_key_or_value_type(k, v, v, self.value_type) def test_assign_key_type_str(self): self.__test_key('c') # Assigning a str may or may not work; but querying for # one will always return a correct answer, not raise # a TypeError. def testStringAllowedInContains(self): self.assertFalse('key' in self._makeOne()) def testStringKeyRaisesKeyErrorWhenMissing(self): self.assertRaises(KeyError, self._makeOne().__getitem__, 'key') def testStringKeyReturnsDefaultFromGetWhenMissing(self): self.assertEqual(self._makeOne().get('key', 42), 42) def test_assign_key_type_float(self): self.__test_key(2.5) def test_assign_key_type_None(self): self.__test_key(None) def test_assign_value_type_str(self): self.__test_value('c') def test_assign_value_type_float(self): self.__test_value(2.5) def test_assign_value_type_None(self): self.__test_value(None) def testNewStyleClassAsKeyNotAllowed(self): m = self._makeOne() class New: pass with self.assertRaises(TypeError): m[New] = self.getTwoValues()[0] def testClassAsKeyNotAllowed(self): m = self._makeOne() class Cls: pass with self.assertRaises(TypeError): m[Cls] = self.getTwoValues()[0] def testNewStyleClassWithCustomMetaClassNotAllowed(self): class Meta(type): pass cls = Meta('Class', (object,), {}) m = self._makeOne() with self.assertRaises(TypeError): m[cls] = self.getTwoValues()[0] def testEmptyFirstBucketReportedByGuido(self): # This was for Integer keys b = self._makeOne() for i in range(29972): # reduce to 29971 and it works b[self.coerce_to_key(i)] = self.coerce_to_value(i) for i in range(30): # reduce to 29 and it works del b[self.coerce_to_key(i)] try: big_key = self.coerce_to_key(i + 40000) except TypeError: # fsBtree only has a two-byte key self.skipTest('Key to big') b[big_key] = self.coerce_to_value(i) self.assertEqual(b.keys()[0], self.KEYS[30]) def testKeyAndValueOverflow(self): if ( self.key_type.get_upper_bound() is None or self.value_type.get_upper_bound() is None ): self.skipTest("Needs bounded key and value") import struct good = set() b = self._makeOne() # Some platforms (Windows) use a 32-bit value for long, # meaning that PyInt_AsLong and such can throw OverflowError # for values that are in range on most other platforms. And on Python # 2, PyInt_Check can fail with a TypeError starting at small values # like 2147483648. So we look for small longs and catch those errors # even when we think we should be in range. In all cases, our code # catches the unexpected error (OverflowError) and turns it into # TypeError. long_is_32_bit = struct.calcsize('@l') < 8 in_range_errors = TypeError out_of_range_errors = TypeError V = self.VALUES def trial(i): i = int(i) __traceback_info__ = i, type(i) # As key if i > self.key_type.get_upper_bound(): with self.assertRaises(out_of_range_errors): b[i] = V[0] elif i < self.key_type.get_lower_bound(): with self.assertRaises(out_of_range_errors): b[i] = V[0] else: try: b[i] = V[0] except in_range_errors: pass else: good.add(i) self.assertEqual(b[i], 0) # As value if i > self.value_type.get_upper_bound(): with self.assertRaises(out_of_range_errors): b[V[0]] = self.coerce_to_value(i) elif i < self.value_type.get_lower_bound(): with self.assertRaises(out_of_range_errors): b[V[0]] = self.coerce_to_value(i) else: try: b[V[0]] = self.coerce_to_value(i) except in_range_errors: pass else: self.assertEqual(b[V[0]], i) for i in range(self.key_type.get_upper_bound() - 3, self.key_type.get_upper_bound() + 3): trial(i) trial(-i) if 0 in b: del b[0] self.assertEqual(sorted(good), sorted(b)) if not long_is_32_bit: if self.key_type.get_lower_bound() == 0: # None of the negative values got in self.assertEqual(4, len(b)) else: # 9, not 4 * 2, because of the asymmetry # of twos complement binary integers self.assertEqual(9, len(b)) @_skip_wo_ZODB def testAccessRaisesPOSKeyErrorOnSelf(self): # We don't hide a POSKeyError that happens when # accessing the object itself in `get()`. # See https://github.com/zopefoundation/BTrees/issues/161 import transaction from ZODB.POSException import POSKeyError transaction.begin() m = self._makeOne() root = self._getRoot() root.m = m transaction.commit() conn = root._p_jar # Ghost the object conn.cacheMinimize() self.assertEqual(m._p_status, "ghost") # Remove the backing data self.db._storage._data.clear() transaction.begin() K = self.KEYS try: with self.assertRaises(POSKeyError): m.get(K[1]) with self.assertRaises(POSKeyError): m.setdefault(K[1], self.VALUES[1]) with self.assertRaises(POSKeyError): _ = K[1] in m with self.assertRaises(POSKeyError): m.pop(K[1]) finally: self._closeRoot(root) class BTreeTests(MappingBase): # Tests common to all BTrees def _getTargetClass(self): # Most of the subclasses override _makeOne and not # _getTargetClass, so we can get the type that way. # TODO: This could change for less repetition in the subclasses, # using the name of the class to import the module and find # the type. if type(self)._makeOne is not BTreeTests._makeOne: return type(self._makeOne()) raise NotImplementedError() def _makeOne(self, *args): return self._getTargetClass()(*args) def _checkIt(self, t): from BTrees.check import check t._check() check(t) @_skip_wo_ZODB def testAccessRaisesPOSKeyErrorOnNested(self): # We don't hide a POSKeyError that happens when # accessing sub objects in `get()`. # See https://github.com/zopefoundation/BTrees/issues/161 import transaction from ZODB.POSException import POSKeyError transaction.begin() m = self._makeOne() root = self._getRoot() root.m = m self._populate(m, 1000) transaction.commit() conn = root._p_jar # Ghost the objects... conn.cacheMinimize() # ...but activate the tree itself, leaving the # buckets ghosted m._p_activate() # Remove the backing data self.db._storage._data.clear() to_key = self.coerce_to_key to_value = self.coerce_to_value transaction.begin() try: with self.assertRaises(POSKeyError): m.get(to_key(1)) with self.assertRaises(POSKeyError): m.setdefault(to_key(1), to_value(1)) with self.assertRaises(POSKeyError): _ = to_key(1) in m with self.assertRaises(POSKeyError): m.pop(to_key(1)) finally: self._closeRoot(root) def testDeleteNoChildrenWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[5]] = V[6] t[K[2]] = V[10] t[K[6]] = V[12] t[K[1]] = V[100] t[K[3]] = V[200] t[K[10]] = self.coerce_to_value(500) t[K[4]] = V[99] del t[K[4]] keys = [ self.coerce_to_key(x) for x in (1, 2, 3, 5, 6, 10) ] diff = lsubtract(t.keys(), keys) self.assertEqual(diff, [], diff) self._checkIt(t) def testDeleteOneChildWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[5]] = V[6] t[K[2]] = V[10] t[K[6]] = V[12] t[K[1]] = V[100] t[K[3]] = V[200] t[K[10]] = self.coerce_to_value(500) t[K[4]] = V[99] del t[K[3]] keys = [ self.coerce_to_key(x) for x in (1, 2, 4, 5, 6, 10) ] diff = lsubtract(t.keys(), keys) self.assertEqual(diff, [], diff) self._checkIt(t) def testDeleteTwoChildrenNoInorderSuccessorWorks(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[5]] = V[6] t[K[2]] = V[10] t[K[6]] = V[12] t[K[1]] = V[100] t[K[3]] = V[200] t[K[10]] = self.coerce_to_value(500) t[K[4]] = V[99] del t[K[2]] keys = [ self.coerce_to_key(x) for x in (1, 3, 4, 5, 6, 10) ] diff = lsubtract(t.keys(), keys) self.assertEqual(diff, [], diff) self._checkIt(t) def testDeleteTwoChildrenInorderSuccessorWorks(self): # 7, 3, 8, 1, 5, 10, 6, 4 -- del 3 t = self._makeOne() K = self.KEYS V = self.VALUES t[K[7]] = V[6] t[K[3]] = V[10] t[K[8]] = V[12] t[K[1]] = V[100] t[K[5]] = V[200] t[K[10]] = self.coerce_to_value(500) t[K[6]] = V[99] t[K[4]] = V[150] del t[K[3]] keys = [ self.coerce_to_key(x) for x in (1, 4, 5, 6, 7, 8, 10) ] diff = lsubtract(t.keys(), keys) self.assertEqual(diff, [], diff) self._checkIt(t) def testDeleteRootWorks(self): # 7, 3, 8, 1, 5, 10, 6, 4 -- del 7 t = self._makeOne() K = self.KEYS V = self.VALUES t[K[7]] = V[6] t[K[3]] = V[10] t[K[8]] = V[12] t[K[1]] = V[100] t[K[5]] = V[200] t[K[10]] = self.coerce_to_value(500) t[K[6]] = V[99] t[K[4]] = V[150] del t[K[7]] keys = [ self.coerce_to_key(x) for x in (1, 3, 4, 5, 6, 8, 10) ] diff = lsubtract(t.keys(), keys) self.assertEqual(diff, [], diff) self._checkIt(t) def testRandomNonOverlappingInserts(self): import random t = self._makeOne() added = {} r = list(range(100)) K = self.KEYS V = self.VALUES for x in r: k = random.choice(r) k = K[k] if k not in added: t[k] = V[x] added[k] = V[1] addl = sorted(added.keys()) diff = lsubtract(list(t.keys()), addl) self.assertEqual(diff, [], (diff, addl, list(t.keys()))) self._checkIt(t) def testRandomOverlappingInserts(self): import random t = self._makeOne() added = {} r = list(range(100)) K = self.KEYS V = self.VALUES for x in r: k = random.choice(r) k = K[k] t[k] = V[x] added[k] = V[1] addl = sorted(added.keys()) diff = lsubtract(t.keys(), addl) self.assertEqual(diff, [], diff) self._checkIt(t) def testRandomDeletes(self): import random t = self._makeOne() K = self.KEYS V = self.VALUES r = list(range(1000)) added = [] for x in r: k = random.choice(r) k = K[k] t[k] = V[x] added.append(k) deleted = [] for x in r: k = random.choice(r) k = K[k] if k in t: self.assertTrue(k in t) del t[k] deleted.append(k) if k in t: self.fail("had problems deleting %s" % k) badones = [] for x in deleted: if x in t: badones.append(x) self.assertEqual(badones, [], (badones, added, deleted)) self._checkIt(t) def testTargetedDeletes(self): import random t = self._makeOne() r = list(range(1000)) K = self.KEYS V = self.VALUES for x in r: k = random.choice(r) k = K[k] v = V[x] t[k] = v for x in r: k = K[x] try: del t[k] except KeyError: pass self.assertEqual(realseq(t.keys()), [], realseq(t.keys())) self._checkIt(t) def testPathologicalRightBranching(self): t = self._makeOne() K = self.KEYS V = self.VALUES r = [K[k] for k in range(1000)] for x in r: t[x] = V[1] self.assertEqual(realseq(t.keys()), r, realseq(t.keys())) for x in r: del t[x] self.assertEqual(realseq(t.keys()), [], realseq(t.keys())) self._checkIt(t) def testPathologicalLeftBranching(self): t = self._makeOne() r = [self.coerce_to_key(k) for k in range(1000)] revr = list(reversed(r[:])) for x in revr: t[x] = self.VALUES[1] self.assertEqual(realseq(t.keys()), r, realseq(t.keys())) for x in revr: del t[x] self.assertEqual(realseq(t.keys()), [], realseq(t.keys())) self._checkIt(t) def testSuccessorChildParentRewriteExerciseCase(self): t = self._makeOne() K = self.KEYS V = self.VALUES add_order = [ 85, 73, 165, 273, 215, 142, 233, 67, 86, 166, 235, 225, 255, 73, 175, 171, 285, 162, 108, 28, 283, 258, 232, 199, 260, 298, 275, 44, 261, 291, 4, 181, 285, 289, 216, 212, 129, 243, 97, 48, 48, 159, 22, 285, 92, 110, 27, 55, 202, 294, 113, 251, 193, 290, 55, 58, 239, 71, 4, 75, 129, 91, 111, 271, 101, 289, 194, 218, 77, 142, 94, 100, 115, 101, 226, 17, 94, 56, 18, 163, 93, 199, 286, 213, 126, 240, 245, 190, 195, 204, 100, 199, 161, 292, 202, 48, 165, 6, 173, 40, 218, 271, 228, 7, 166, 173, 138, 93, 22, 140, 41, 234, 17, 249, 215, 12, 292, 246, 272, 260, 140, 58, 2, 91, 246, 189, 116, 72, 259, 34, 120, 263, 168, 298, 118, 18, 28, 299, 192, 252, 112, 60, 277, 273, 286, 15, 263, 141, 241, 172, 255, 52, 89, 127, 119, 255, 184, 213, 44, 116, 231, 173, 298, 178, 196, 89, 184, 289, 98, 216, 115, 35, 132, 278, 238, 20, 241, 128, 179, 159, 107, 206, 194, 31, 260, 122, 56, 144, 118, 283, 183, 215, 214, 87, 33, 205, 183, 212, 221, 216, 296, 40, 108, 45, 188, 139, 38, 256, 276, 114, 270, 112, 214, 191, 147, 111, 299, 107, 101, 43, 84, 127, 67, 205, 251, 38, 91, 297, 26, 165, 187, 19, 6, 73, 4, 176, 195, 90, 71, 30, 82, 139, 210, 8, 41, 253, 127, 190, 102, 280, 26, 233, 32, 257, 194, 263, 203, 190, 111, 218, 199, 29, 81, 207, 18, 180, 157, 172, 192, 135, 163, 275, 74, 296, 298, 265, 105, 191, 282, 277, 83, 188, 144, 259, 6, 173, 81, 107, 292, 231, 129, 65, 161, 113, 103, 136, 255, 285, 289, 1 ] delete_order = [ 276, 273, 12, 275, 2, 286, 127, 83, 92, 33, 101, 195, 299, 191, 22, 232, 291, 226, 110, 94, 257, 233, 215, 184, 35, 178, 18, 74, 296, 210, 298, 81, 265, 175, 116, 261, 212, 277, 260, 234, 6, 129, 31, 4, 235, 249, 34, 289, 105, 259, 91, 93, 119, 7, 183, 240, 41, 253, 290, 136, 75, 292, 67, 112, 111, 256, 163, 38, 126, 139, 98, 56, 282, 60, 26, 55, 245, 225, 32, 52, 40, 271, 29, 252, 239, 89, 87, 205, 213, 180, 97, 108, 120, 218, 44, 187, 196, 251, 202, 203, 172, 28, 188, 77, 90, 199, 297, 282, 141, 100, 161, 216, 73, 19, 17, 189, 30, 258 ] for x in add_order: t[K[x]] = V[1] for x in delete_order: try: del t[K[x]] except KeyError: if K[x] in t: self.assertEqual(1, 2, "failed to delete %s" % x) self._checkIt(t) def testRangeSearchAfterSequentialInsert(self): t = self._makeOne() K = self.KEYS V = self.VALUES r = [K[k] for k in range(100)] for x in r: t[x] = V[0] diff = lsubtract(list(t.keys(K[0], K[100])), r) self.assertEqual(diff, [], diff) # The same thing with no bounds diff = lsubtract(list(t.keys(None, None)), r) self.assertEqual(diff, [], diff) # The same thing with each bound set and the other # explicitly None diff = lsubtract(list(t.keys(K[0], None)), r) self.assertEqual(diff, [], diff) diff = lsubtract(list(t.keys(None, K[100])), r) self.assertEqual(diff, [], diff) self._checkIt(t) def testRangeSearchAfterRandomInsert(self): import random t = self._makeOne() r = range(100) a = {} V = self.VALUES K = self.KEYS for x in r: rnd = random.choice(r) rnd = K[rnd] t[rnd] = V[0] a[rnd] = V[0] diff = lsubtract(list(t.keys(K[0], K[100])), a.keys()) self.assertEqual(diff, [], diff) self._checkIt(t) def testPathologicalRangeSearch(self): # XXX: This test needs some work to be able to handle fsBTree # objects. It makes assumptions about bucket sizes and key ordering # that doesn't hold. if self._getTargetClass().__name__[:2] == 'fs': self.skipTest("XXX: Needs ported for fsBTree") t = self._makeOne() # Build a 2-level tree with at least two buckets. if self.SUPPORTS_NEGATIVE_KEYS: range_begin = 0 firstkey_offset = 1 else: range_begin = 1 firstkey_offset = 0 before_range_begin = range_begin - 1 for i in range(range_begin, 200 + range_begin): t[self.coerce_to_key(i)] = self.coerce_to_value(i) items, dummy = t.__getstate__() self.assertTrue(len(items) > 2) # at least two buckets and a key # All values in the first bucket are < firstkey. All in the # second bucket are >= firstkey, and firstkey is the first key in # the second bucket. firstkey = items[1] therange = t.keys(self.coerce_to_key(before_range_begin), firstkey) self.assertEqual(len(therange), firstkey + firstkey_offset) self.assertEqual( list(therange), list(range(range_begin, firstkey + 1)) ) # Now for the tricky part. If we delete firstkey, the second bucket # loses its smallest key, but firstkey remains in the BTree node. # If we then do a high-end range search on firstkey, the BTree node # directs us to look in the second bucket, but there's no longer any # key <= firstkey in that bucket. The correct answer points to the # end of the *first* bucket. The algorithm has to be smart enough # to "go backwards" in the BTree then; if it doesn't, it will # erroneously claim that the range is empty. del t[firstkey] therange = t.keys(min=before_range_begin, max=firstkey) self.assertEqual(len(therange), firstkey - range_begin) self.assertEqual(list(therange), list(range(range_begin, firstkey))) self._checkIt(t) def testInsertMethod(self): t = self._makeOne() K = self.KEYS V = self.VALUES t[K[0]] = V[1] self.assertEqual(t.insert(K[0], V[1]), 0) self.assertEqual(t.insert(K[1], V[1]), 1) self.assertEqual(lsubtract(list(t.keys()), [K[0], K[1]]), []) self._checkIt(t) def testDamagedIterator(self): # A cute one from Steve Alexander. This caused the BTreeItems # object to go insane, accessing memory beyond the allocated part # of the bucket. If it fails, the symptom is either a C-level # assertion error (if the BTree code was compiled without NDEBUG), # or most likely a segfault (if the BTree code was compiled with # NDEBUG). t = self._makeOne() self._populate(t, 10) # In order for this to fail, it's important that k be a "lazy" # iterator, referring to the BTree by indirect position (index) # instead of a fully materialized list. Then the position can # end up pointing into trash memory, if the bucket pointed to # shrinks. k = t.keys() for dummy in range(20): try: del t[k[0]] except RuntimeError as detail: self.assertEqual(str(detail), "the bucket being iterated " "changed size") break except KeyError as v: # The Python implementation behaves very differently and # gives a key error in this situation. It can't mess up # memory and can't readily detect changes to underlying buckets # in any sane way. self.assertEqual(v.args[0], k[0]) self._checkIt(t) def testAddTwoSetsChanged(self): # A bug in the BTree Python implementation once # caused adding a second item to a tree to fail # to set _p_changed (adding the first item sets it because # the _firstbucket gets set, but the second item only grew the # existing bucket) t = self._makeOne() # Note that for the property to actually hold, we have to fake a # _p_jar and _p_oid t._p_oid = b'\0\0\0\0\0' class Jar: def __init__(self): self._cache = self self.registered = None def mru(self, arg): pass def readCurrent(self, arg): pass def register(self, arg): self.registered = arg t._p_jar = Jar() K = self.KEYS V = self.VALUES t[K[1]] = V[3] # reset these, setting _firstbucket triggered a change t._p_changed = False t._p_jar.registered = None t[K[2]] = V[4] self.assertTrue(t._p_changed) self.assertEqual(t, t._p_jar.registered) # Setting the same key to a different value also triggers a change t._p_changed = False t._p_jar.registered = None t[K[2]] = V[5] self.assertTrue(t._p_changed) self.assertEqual(t, t._p_jar.registered) # Likewise with only a single value t = self._makeOne() t._p_oid = b'\0\0\0\0\0' t._p_jar = Jar() t[K[1]] = V[3] # reset these, setting _firstbucket triggered a change t._p_changed = False t._p_jar.registered = None t[K[1]] = V[6] self.assertTrue(t._p_changed) self.assertEqual(t, t._p_jar.registered) def testRemoveInSmallMapSetsChanged(self): # A bug in the BTree Python implementation once caused # deleting from a small btree to set _p_changed. # There must be at least two objects so that _firstbucket doesn't # get set t = self._makeOne() K = self.KEYS V = self.VALUES # Note that for the property to actually hold, we have to fake a # _p_jar and _p_oid t._p_oid = b'\0\0\0\0\0' class Jar: def __init__(self): self._cache = self self.registered = None def mru(self, arg): pass def readCurrent(self, arg): pass def register(self, arg): self.registered = arg t._p_jar = Jar() t[K[0]] = V[1] t[K[1]] = V[2] # reset these, setting _firstbucket triggered a change t._p_changed = False t._p_jar.registered = None # now remove the second value del t[K[1]] self.assertTrue(t._p_changed) self.assertEqual(t, t._p_jar.registered) def test_legacy_py_pickle(self): # Issue #2 # If we have a pickle that includes the 'Py' suffix, # it (unfortunately) unpickles to the python type. But # new pickles never produce that. import pickle made_one = self._makeOne() for proto in (1, 2): s = pickle.dumps(made_one, proto) # It's not legacy assert b'TreePy\n' not in s, repr(s) # \np for protocol 1, \nq for proto 2, assert b'Tree\np' in s or b'Tree\nq' in s, repr(s) # Now make it pseudo-legacy legacys = s.replace(b'Tree\np', b'TreePy\np') legacys = legacys.replace(b'Tree\nq', b'TreePy\nq') # It loads up as the specified class loaded_one = pickle.loads(legacys) # It still functions and can be dumped again, as the original class s2 = pickle.dumps(loaded_one, proto) self.assertTrue(b'Py' not in s2) self.assertEqual(s2, s) def testSetstateBadChild(self): # tree used to allow to pass in non (tree or bucket) node as a child # via __setstate__. This was leading to crashes when using C mode. t = self._makeOne() b = t._bucket_type() K = self.KEYS V = self.VALUES if isaset(b): b.add(K[1]) else: b[K[1]] = V[11] # xchild is non-BTree class deriving from Persistent import persistent xchild = persistent.Persistent() self.assertIs(xchild._p_oid, None) typeErrOK = "tree child {} is neither {} nor {}".format( _tp_name(type(xchild)), _tp_name(type(t)), _tp_name(t._bucket_type) ) # if the following is allowed, e.g. # t.__getstate__(), or t[0]=1 corrupt memory and crash. with self.assertRaises(TypeError) as exc: t.__setstate__( ( (xchild,), # child0 is neither tree nor bucket b ) ) self.assertEqual(str(exc.exception), typeErrOK) # if the following is allowed, e.g. t[5]=1 corrupts memory and crash. with self.assertRaises(TypeError) as exc: t.__setstate__( ( (b, K[4], xchild), b ) ) self.assertEqual(str(exc.exception), typeErrOK) class NormalSetTests(Base): # Test common to all set types def _getCollectionsABC(self): import collections.abc return collections.abc.MutableSet def _populate(self, t, largest): # Make some data t.update(self.coerce_to_key(k) for k in range(largest)) def test_isdisjoint(self): t = self._makeOne() K = self.KEYS # The empty set is disjoint with itself self.assertTrue(t.isdisjoint(t)) # Empty sequences self.assertTrue(t.isdisjoint(())) self.assertTrue(t.isdisjoint([])) self.assertTrue(t.isdisjoint(set())) # non-empty sequences but empty set self.assertTrue(t.isdisjoint((K[1], K[2]))) self.assertTrue(t.isdisjoint([K[1], K[2]])) self.assertTrue(t.isdisjoint({K[1], K[2]})) self.assertTrue(t.isdisjoint(K[k] for k in range(10))) # non-empty sequences and non-empty set, null intersection self._populate(t, 2) self.assertEqual(set(t), {K[0], K[1]}) self.assertTrue(t.isdisjoint((K[2], K[3]))) self.assertTrue(t.isdisjoint([K[2], K[3]])) self.assertTrue(t.isdisjoint({K[2], K[3]})) self.assertTrue(t.isdisjoint(K[k] for k in range(2, 10))) # non-null intersection self.assertFalse(t.isdisjoint(t)) self.assertFalse(t.isdisjoint((K[0],))) self.assertFalse(t.isdisjoint((K[1],))) self.assertFalse(t.isdisjoint([K[2], K[3], K[0]])) self.assertFalse(t.isdisjoint({K[2], K[3], K[1]})) self.assertFalse(t.isdisjoint(K[k] for k in range(1, 10))) def test_discard(self): t = self._makeOne() K = self.KEYS # empty set, raises no error, even if the key is # of the wrong type t.discard(K[1]) t.discard(object()) self.assertEqual(set(t), set()) # non-empty set, discarding absent key self._populate(t, 2) self.assertEqual(set(t), {K[0], K[1]}) t.discard(K[3]) t.discard(object()) self.assertEqual(set(t), {K[0], K[1]}) t.discard(K[1]) self.assertEqual(set(t), {K[0]}) t.discard(K[0]) self.assertEqual(set(t), set()) def test_pop(self): t = self._makeOne() K = self.KEYS with self.assertRaises(KeyError): t.pop() self._populate(t, 2) self.assertEqual(K[0], t.pop()) self.assertEqual(K[1], t.pop()) self.assertEqual(set(t), set()) with self.assertRaises(KeyError): t.pop() def test___ior__(self): t = self._makeOne() K = self.KEYS orig_t = t t |= (K[1],) t |= [K[2],] t |= {K[1], K[2], K[3]} t |= (K[k] for k in range(10)) t |= t self.assertIs(t, orig_t) self.assertEqual(set(t), {K[k] for k in range(10)}) def test___iand__(self): t = self._makeOne() K = self.KEYS orig_t = t t &= (K[1],) t &= [K[2],] t &= {K[3], K[4]} self.assertIs(t, orig_t) self.assertEqual(set(t), set()) self._populate(t, 10) self.assertEqual(set(t), {K[k] for k in range(10)}) t &= (K[1], K[2], K[3]) self.assertIs(t, orig_t) self.assertEqual(set(t), {K[1], K[2], K[3]}) def test___isub__(self): t = self._makeOne() K = self.KEYS orig_t = t t -= (K[1],) t -= [K[2],] t -= {K[3], K[4]} self.assertIs(t, orig_t) self.assertEqual(set(t), set()) self._populate(t, 10) self.assertEqual(set(t), {K[k] for k in range(10)}) t -= (K[1], K[2], K[3]) self.assertIs(t, orig_t) self.assertEqual(set(t), {K[0], K[4], K[5], K[6], K[7], K[8], K[9]}) t -= t self.assertIs(t, orig_t) self.assertEqual(set(t), set()) def test___ixor__(self): t = self._makeOne() K = self.KEYS t ^= (K[1],) self.assertEqual(set(t), {K[1]}) t ^= t self.assertEqual(set(t), set()) t ^= (K[1], K[2], K[3]) self.assertEqual(set(t), {K[1], K[2], K[3]}) t ^= [K[2], K[3], K[4]] self.assertEqual(set(t), {K[1], K[4]}) def test___xor__(self): t = self._makeOne() K = self.KEYS u = t ^ (K[1],) self.assertEqual(set(u), {K[1]}) u = t ^ t self.assertEqual(set(u), set()) u = t ^ (K[1], K[2], K[3]) self.assertEqual(set(u), {K[1], K[2], K[3]}) t.update(u) u = t ^ [K[2], K[3], K[4]] self.assertEqual(set(u), {K[1], K[4]}) def testShortRepr(self): t = self._makeOne() K = self.KEYS for i in range(5): t.add(K[i]) t._p_oid = b'12345678' r = repr(t) # Make sure the repr is **not* 10000 bytes long for a shrort bucket. # (the buffer must be terminated when copied). self.assertTrue(len(r) < 10000) # Make sure the repr is human readable, unless it's a tree if 'TreeSet' not in r: self.assertTrue(r.endswith("Set(%r)" % t.keys())) else: # persistent-4.4 changed the default reprs, adding # oid and jar reprs self.assertIn("= bucket_size: data = tree.__getstate__()[0] if len(data) >= 3: break transaction.commit() # Now, delete the internal key and make sure it's really gone key = data[1] del tree[key] data = tree.__getstate__()[0] self.assertTrue(data[1] != key) # The tree should have changed: self.assertTrue(tree._p_changed) # Grow the btree until we have multiple levels while 1: i += 1 try: self.add_key(tree, i) except KeyCoercionFailed: # Only expected in fsbtree assert i == 32768 and type(tree).__name__.startswith('fs') break if i >= tree_size * bucket_size: data = tree.__getstate__()[0] if data[0].__class__ == tree.__class__: assert len(data[2].__getstate__()[0]) >= 3 break # Now, delete the internal key and make sure it's really gone key = data[1] del tree[key] data = tree.__getstate__()[0] self.assertTrue(data[1] != key) transaction.abort() db.close() class ModuleTest: # test for presence of generic names in module prefix = '' key_type = None value_type = None def _getModule(self): raise NotImplementedError def testNames(self): names = ['Bucket', 'BTree', 'Set', 'TreeSet'] mod = self._getModule() mod_all = mod.__all__ for name in names: klass = getattr(mod, name) self.assertEqual(klass.__module__, mod.__name__) self.assertIs(klass, getattr(mod, self.prefix + name)) self.assertIn(name, mod_all) self.assertIn(self.prefix + name, mod_all) # BBB for zope.app.security ZCML :( pfx_iter = self.prefix + 'TreeIterator' klass = getattr(mod, pfx_iter) self.assertEqual(klass.__module__, mod.__name__) def testModuleProvides(self): from zope.interface.verify import verifyObject verifyObject(self._getInterface(), self._getModule()) def testFamily(self): import BTrees if self.prefix == 'OO': self.assertTrue( getattr(self._getModule(), 'family', self) is self) elif 'L' in self.prefix: self.assertTrue(self._getModule().family is BTrees.family64) elif 'I' in self.prefix: self.assertTrue(self._getModule().family is BTrees.family32) def _check_union_presence(self, datatype, name): mod = self._getModule() if datatype.supports_value_union(): in_ = self.assertIn has = self.assertTrue else: in_ = self.assertNotIn has = self.assertFalse in_(name, dir(mod)) has(hasattr(mod, name)) in_(name, mod.__all__) # The weighted* functions require the value type to support unions. def test_weightedUnion_presence(self): self._check_union_presence(self.value_type, 'weightedUnion') def test_weightedIntersection_presence(self): self._check_union_presence(self.value_type, 'weightedIntersection') # The multiunion function requires the key type to support unions def test_multiunion_presence(self): self._check_union_presence(self.key_type, 'multiunion') class I_SetsBase: def setUp(self): super().setUp() _skip_if_pure_py_and_py_test(self) def _getTargetClass(self): raise NotImplementedError def _makeOne(self): return self._getTargetClass()() def testBadBadKeyAfterFirst(self): with self.assertRaises(TypeError): self._getTargetClass()([1, object()]) t = self._makeOne() with self.assertRaises(TypeError): t.update([1, object()]) def __test_key(self, k): try: self.key_type(k) except Exception as e: with self.assertRaises(type(e)): self._makeOne().insert(k) else: self._makeOne().insert(k) def test_key_type_str(self): self.__test_key('c') def test_key_type_float(self): self.__test_key(2.5) def test_key_type_None(self): self.__test_key(None) LARGEST_32_BITS = 2147483647 SMALLEST_32_BITS = -LARGEST_32_BITS - 1 SMALLEST_POSITIVE_33_BITS = LARGEST_32_BITS + 1 LARGEST_NEGATIVE_33_BITS = SMALLEST_32_BITS - 1 LARGEST_64_BITS = 0x7fffffffffffffff # Signed. 2**63 - 1 SMALLEST_64_BITS = -LARGEST_64_BITS - 1 SMALLEST_POSITIVE_65_BITS = LARGEST_64_BITS + 1 LARGEST_NEGATIVE_65_BITS = SMALLEST_64_BITS - 1 class TestLongIntSupport: def getTwoValues(self): # Return two distinct values; these must compare as un-equal. # # These values must be usable as values. return object(), object() def getTwoKeys(self): # Return two distinct values, these must compare as un-equal. # # These values must be usable as keys. return 0, 1 def _skip_if_not_64bit(self): mod = sys.modules[self._getTargetClass().__module__] if not mod.using64bits: self.skipTest("Needs 64 bit support.") # pragma: no cover class TestLongIntKeys(TestLongIntSupport): SUPPORTS_NEGATIVE_KEYS = True def _makeLong(self, v): try: return long(v) except NameError: # pragma: no cover return int(v) def testLongIntKeysWork(self): self._skip_if_not_64bit() t = self._makeOne() K = self.KEYS o1, o2 = self.getTwoValues() assert o1 != o2 # Test some small key values first: one_long = self._makeLong(1) t[one_long] = o1 self.assertEqual(t[K[1]], o1) t[K[1]] = o2 self.assertEqual(t[one_long], o2) self.assertEqual(list(t.keys()), [1]) self.assertEqual(list(t.keys(None, None)), [1]) # Test some large key values too: k1 = SMALLEST_POSITIVE_33_BITS k2 = LARGEST_64_BITS k3 = SMALLEST_64_BITS if self.SUPPORTS_NEGATIVE_KEYS else 0 t[k1] = o1 t[k2] = o2 t[k3] = o1 self.assertEqual(t[k1], o1) self.assertEqual(t[k2], o2) self.assertEqual(t[k3], o1) self.assertEqual(list(t.keys()), [k3, 1, k1, k2]) self.assertEqual(list(t.keys(k3, None)), [k3, 1, k1, k2]) self.assertEqual(list(t.keys(None, k2)), [k3, 1, k1, k2]) def testLongIntKeysOutOfRange(self): self._skip_if_not_64bit() o1, o2 = self.getTwoValues() t = self._makeOne() k1 = ( SMALLEST_POSITIVE_65_BITS if self.SUPPORTS_NEGATIVE_KEYS else 2**64 + 1 ) with self.assertRaises(TypeError): t[k1] = self.coerce_to_value(o1) t = self._makeOne() with self.assertRaises(TypeError): t[LARGEST_NEGATIVE_65_BITS] = self.coerce_to_value(o1) class TestLongIntValues(TestLongIntSupport): SUPPORTS_NEGATIVE_VALUES = True def testLongIntValuesWork(self): self._skip_if_not_64bit() t = self._makeOne() keys = sorted(self.getTwoKeys()) k1, k2 = keys assert k1 != k2 # This is the smallest positive integer that requires 33 bits: v1 = SMALLEST_POSITIVE_33_BITS v2 = v1 + 1 t[k1] = self.coerce_to_value(v1) t[k2] = self.coerce_to_value(v2) self.assertEqual(t[k1], v1) self.assertEqual(t[k2], v2) self.assertEqual(list(t.values()), [v1, v2]) self.assertEqual(list(t.values(None, None)), [v1, v2]) def testLongIntValuesOutOfRange(self): self._skip_if_not_64bit() k1, k2 = self.getTwoKeys() t = self._makeOne() v1 = ( SMALLEST_POSITIVE_65_BITS if self.SUPPORTS_NEGATIVE_VALUES else 2**64 + 1 ) with self.assertRaises(TypeError): t[k1] = self.coerce_to_value(v1) t = self._makeOne() with self.assertRaises(TypeError): t[k1] = self.coerce_to_value(LARGEST_NEGATIVE_65_BITS) def makeMapBuilder(self, mapbuilder): # Given a mapping builder (IIBTree, OOBucket, etc), return a function # that builds an object of that type given only a list of keys. def result(keys=(), mapbuilder=mapbuilder, self=self): return mapbuilder(list(zip( (self.KEYS[k] for k in keys), (self.VALUES[k] for k in keys) ))) return result def makeSetBuilder(self, setbuilder): def result(keys=(), setbuilder=setbuilder, self=self): return setbuilder(self.KEYS[k] for k in keys) return result class SetResult: # Subclasses have to set up: # builders() - function returning functions to build inputs, # each returned callable takes an optional keys arg # intersection, union, difference - set to the type-correct versions def setUp(self): super().setUp() _skip_if_pure_py_and_py_test(self) rawAkeys = [1, 3, 5, 6] rawBkeys = [ 2, 3, 4, 6, 7] # noqa #201 self.Akeys = [self.KEYS[k] for k in rawAkeys] self.Bkeys = [self.KEYS[k] for k in rawBkeys] self.As = [makeset(rawAkeys) for makeset in self.builders()] self.Bs = [makeset(rawBkeys) for makeset in self.builders()] self.emptys = [makeset() for makeset in self.builders()] # Slow but obviously correct Python implementations of basic ops. def _union(self, x, y): result = list(x) for e in y: if e not in result: result.append(e) return sorted(result) def _intersection(self, x, y): result = [] for e in x: if e in y: result.append(e) return result def _difference(self, x, y): result = list(x) for e in y: if e in result: result.remove(e) # Difference preserves LHS values. if hasattr(x, "values"): result = [(k, x[k]) for k in result] return result def testNone(self): for op in self.union, self.intersection, self.difference: C = op(None, None) self.assertIsNone(C) for op in self.union, self.intersection, self.difference: for A in self.As: C = op(A, None) self.assertIs(C, A) C = op(None, A) if op == self.difference: self.assertIsNone(C, None) else: self.assertIs(C, A) def testEmptyUnion(self): for A in self.As: for E in self.emptys: C = self.union(A, E) self.assertTrue(not hasattr(C, "values")) self.assertEqual(list(C), self.Akeys) C = self.union(E, A) self.assertTrue(not hasattr(C, "values")) self.assertEqual(list(C), self.Akeys) def testEmptyIntersection(self): for A in self.As: for E in self.emptys: C = self.intersection(A, E) self.assertTrue(not hasattr(C, "values")) self.assertEqual(list(C), []) C = self.intersection(E, A) self.assertTrue(not hasattr(C, "values")) self.assertEqual(list(C), []) def testEmptyDifference(self): for A in self.As: for E in self.emptys: C = self.difference(A, E) # Difference preserves LHS values. self.assertEqual(hasattr(C, "values"), hasattr(A, "values")) if hasattr(A, "values"): self.assertEqual(list(C.items()), list(A.items())) else: self.assertEqual(list(C), self.Akeys) C = self.difference(E, A) self.assertEqual(hasattr(C, "values"), hasattr(E, "values")) self.assertEqual(list(C), []) def _reversed(self, x): x = list(x) x.reverse() return x def testUnion(self): inputs = self.As + self.Bs for A in inputs: for B in inputs: for convert in lambda x: x, self._reversed, list, tuple, set: # For all of these tests, we need to be sure we have at # least one value that is *not* sorted relative to the # other. See # https://github.com/zopefoundation/BTrees/issues/171 a = convert(A) b = convert(B) if hasattr(b, 'reverse'): b.reverse() __traceback_info__ = a, b C = self.union(a, b) self.assertTrue(not hasattr(C, "values")) self.assertEqual(list(C), self._union(a, b)) self.assertEqual(set(A) | set(B), set(A | B)) def testIntersection(self): inputs = self.As + self.Bs for A in inputs: for B in inputs: for convert in lambda x: x, self._reversed, list, tuple, set: a = convert(A) b = convert(B) if hasattr(b, 'reverse'): b.reverse() __traceback_info__ = a, b C = self.intersection(a, b) self.assertTrue(not hasattr(C, "values")) self.assertEqual(list(C), self._intersection(A, B)) self.assertEqual(set(A) & set(B), set(A & B)) def testDifference(self): inputs = self.As + self.Bs for A in inputs: for B in inputs: for convert in lambda x: x, self._reversed, list, tuple, set: # Difference is unlike the others: The first argument # must be a BTree type, in both C and Python. C = self.difference(A, convert(B)) # Difference preserves LHS values. self.assertEqual( hasattr(C, "values"), hasattr(A, "values") ) want = self._difference(A, B) if hasattr(A, "values"): self.assertEqual(list(C.items()), want) else: self.assertEqual(list(C), want) self.assertEqual(set(A) - set(B), set(A - B)) def testLargerInputs(self): from random import randint from BTrees.IIBTree import IISet MAXSIZE = 200 MAXVAL = 400 K = self.KEYS for i in range(3): n = randint(0, MAXSIZE) Akeys = [randint(1, MAXVAL) for j in range(n)] As = [makeset(Akeys) for makeset in self.builders()] Akeys = IISet(Akeys) n = randint(0, MAXSIZE) Bkeys = [randint(1, MAXVAL) for j in range(n)] Bs = [makeset(Bkeys) for makeset in self.builders()] Bkeys = IISet(Bkeys) Akeys = [K[k] for k in Akeys] Bkeys = [K[k] for k in Bkeys] for op, simulator in ((self.union, self._union), (self.intersection, self._intersection), (self.difference, self._difference)): for A in As: for B in Bs: got = op(A, B) want = simulator(Akeys, Bkeys) self.assertEqual( list(got), want, (A, B, Akeys, Bkeys, list(got), want) ) class Weighted(SignedMixin): # Subclasses must set up (as class variables): # weightedUnion, weightedIntersection # builders -- sequence of constructors, taking items # union, intersection -- the module routines of those names # mkbucket -- the module bucket builder def setUp(self): self.Aitems = [(1, 10), (3, 30), (5, 50), (6, 60)] self.Bitems = [(2, 21), (3, 31), (4, 41), (6, 61), (7, 71)] self.As = [make(self.Aitems) for make in self.builders()] self.Bs = [make(self.Bitems) for make in self.builders()] self.emptys = [make([]) for make in self.builders()] weights = [] for w1 in -3, -1, 0, 1, 7: for w2 in -3, -1, 0, 1, 7: weights.append((w1, w2)) self.weights = weights def testBothNone(self): for op in self.weightedUnion(), self.weightedIntersection(): w, C = op(None, None) self.assertTrue(C is None) self.assertEqual(w, 0) w, C = op(None, None, 42, 666) self.assertTrue(C is None) self.assertEqual(w, 0) def testLeftNone(self): for op in self.weightedUnion(), self.weightedIntersection(): for A in self.As + self.emptys: w, C = op(None, A) self.assertTrue(C is A) self.assertEqual(w, 1) w, C = op(None, A, 42, 666) self.assertTrue(C is A) self.assertEqual(w, 666) def testRightNone(self): for op in self.weightedUnion(), self.weightedIntersection(): for A in self.As + self.emptys: w, C = op(A, None) self.assertTrue(C is A) self.assertEqual(w, 1) w, C = op(A, None, 42, 666) self.assertTrue(C is A) self.assertEqual(w, 42) # If obj is a set, return a bucket with values all 1; else return obj. def _normalize(self, obj): if isaset(obj): obj = self.mkbucket(list(zip(obj, [1] * len(obj)))) return obj # Python simulation of weightedUnion. def _wunion(self, A, B, w1=1, w2=1): if isaset(A) and isaset(B): return 1, self.union()(A, B).keys() A = self._normalize(A) B = self._normalize(B) result = [] for key in self.union()(A, B): v1 = A.get(key, 0) v2 = B.get(key, 0) result.append((key, v1*w1 + v2*w2)) return 1, result def testUnion(self): inputs = self.As + self.Bs + self.emptys for A in inputs: for B in inputs: want_w, want_s = self._wunion(A, B) got_w, got_s = self.weightedUnion()(A, B) self.assertEqual(got_w, want_w) if isaset(got_s): self.assertEqual(got_s.keys(), want_s) else: self.assertEqual(got_s.items(), want_s) for w1, w2 in self.weights: if ( (w1 < 0 or w2 < 0) and not self.SUPPORTS_NEGATIVE_VALUES ): continue want_w, want_s = self._wunion(A, B, w1, w2) got_w, got_s = self.weightedUnion()(A, B, w1, w2) self.assertEqual(got_w, want_w) if isaset(got_s): self.assertEqual(got_s.keys(), want_s) else: self.assertEqual(got_s.items(), want_s) # Python simulation weightedIntersection. def _wintersection(self, A, B, w1=1, w2=1): if isaset(A) and isaset(B): return w1 + w2, self.intersection()(A, B).keys() A = self._normalize(A) B = self._normalize(B) result = [] for key in self.intersection()(A, B): result.append((key, A[key]*w1 + B[key]*w2)) return 1, result def testIntersection(self): inputs = self.As + self.Bs + self.emptys for A in inputs: for B in inputs: want_w, want_s = self._wintersection(A, B) got_w, got_s = self.weightedIntersection()(A, B) self.assertEqual(got_w, want_w) if isaset(got_s): self.assertEqual(got_s.keys(), want_s) else: self.assertEqual(got_s.items(), want_s) for w1, w2 in self.weights: if ( (w1 < 0 or w2 < 0) and not self.SUPPORTS_NEGATIVE_VALUES ): continue want_w, want_s = self._wintersection(A, B, w1, w2) got_w, got_s = self.weightedIntersection()(A, B, w1, w2) self.assertEqual(got_w, want_w) if isaset(got_s): self.assertEqual(got_s.keys(), want_s) else: self.assertEqual(got_s.items(), want_s) # Given a set builder (like OITreeSet or OISet), return a function that # takes a list of (key, value) pairs and builds a set out of the keys. def itemsToSet(setbuilder): def result(items, setbuilder=setbuilder): return setbuilder([key for key, value in items]) return result # 'thing' is a bucket, btree, set or treeset. Return true iff it's one of the # latter two. def isaset(thing): return not hasattr(thing, 'values') class MultiUnion(SignedMixin): # Subclasses must set up (as class variables): # multiunion, union # mkset, mktreeset # mkbucket, mkbtree def setUp(self): super().setUp() _skip_if_pure_py_and_py_test(self) def testEmpty(self): self.assertEqual(len(self.multiunion([])), 0) def _testOne(self, builder): for sequence in ( [3], list(range(20)), list(range(-10, 0, 2)) + list(range(1, 10, 2)), ): if min(sequence) < 0 and not self.SUPPORTS_NEGATIVE_KEYS: continue seq1 = sequence[:] seq2 = list(reversed(sequence[:])) seqsorted = sorted(sequence[:]) for seq in seq1, seq2, seqsorted: input = builder(seq) output = self.multiunion([input]) self.assertEqual(len(seq), len(output)) self.assertEqual(seqsorted, list(output)) def testOneBTSet(self): self._testOne(self.mkset) def testOneBTTreeSet(self): self._testOne(self.mktreeset) def testOneList(self): self._testOne(list) def testOneTuple(self): self._testOne(tuple) def testOneSet(self): self._testOne(set) def testOneGenerator(self): def generator(seq): yield from seq self._testOne(generator) def testValuesIgnored(self): for builder in self.mkbucket, self.mkbtree, dict: input = builder([(1, 2), (3, 4), (5, 6)]) output = self.multiunion([input]) self.assertEqual([1, 3, 5], list(output)) def testValuesIgnoredNonInteger(self): # This only uses a dict because the bucket and tree can't # hold non-integers. i1 = {1: 'a', 2: 'b'} i2 = {1: 'c', 3: 'd'} output = self.multiunion((i1, i2)) self.assertEqual([1, 2, 3], list(output)) def testRangeInputs(self): i1 = range(3) i2 = range(7) output = self.multiunion((i1, i2)) self.assertEqual([0, 1, 2, 3, 4, 5, 6], list(output)) def testNegativeKeys(self): i1 = (-1, -2, -3) i2 = (0, 1, 2) if not self.SUPPORTS_NEGATIVE_KEYS: with self.assertRaises(TypeError): self.multiunion((i2, i1)) else: output = self.multiunion((i2, i1)) self.assertEqual([-3, -2, -1, 0, 1, 2], list(output)) def testOneIterableWithBadKeys(self): i1 = [1, 2, 3, 'a'] for kind in list, tuple: with self.assertRaises(TypeError): self.multiunion((kind(i1),)) def testBadIterable(self): class MyException(Exception): pass def gen(): yield from range(3) raise MyException with self.assertRaises(MyException): self.multiunion((gen(),)) def testBigInput(self): N = 100000 if ( (_c_optimizations_ignored() or 'Py' in type(self).__name__) and not PYPY ): # This is extremely slow in CPython implemented in Python, # taking 20s or more on a 2015-era laptop N = N // 10 input = self.mkset(list(range(N))) output = self.multiunion([input] * 10) self.assertEqual(len(output), N) self.assertEqual(output.minKey(), 0) self.assertEqual(output.maxKey(), N-1) self.assertEqual(list(output), list(range(N))) def testLotsOfLittleOnes(self): from random import shuffle N = 5000 inputs = [] mkset, mktreeset = self.mkset, self.mktreeset for i in range(N): if self.SUPPORTS_NEGATIVE_KEYS: base = i * 4 - N else: base = i * 4 inputs.append(mkset([base, base + 1])) inputs.append(mktreeset([base + 2, base + 3])) shuffle(inputs) output = self.multiunion(inputs) self.assertEqual(len(output), N * 4) if self.SUPPORTS_NEGATIVE_KEYS: self.assertEqual(list(output), list(range(-N, 3 * N))) else: self.assertEqual(list(output), list(range(N * 4))) def testFunkyKeyIteration(self): # The internal set iteration protocol allows "iterating over" a # a single key as if it were a set. N = 100 union, mkset = self.union, self.mkset slow = mkset() for i in range(N): slow = union(slow, mkset([i])) fast = self.multiunion(list(range(N))) # ~ N distinct singleton sets self.assertEqual(len(slow), N) self.assertEqual(len(fast), N) self.assertEqual(list(slow), list(fast)) self.assertEqual(list(fast), list(range(N))) class ConflictTestBase(SignedMixin): # Tests common to all types: sets, buckets, and BTrees storage = None db = None def setUp(self): super().setUp() _skip_if_pure_py_and_py_test(self) def identity(x): return x self.key_tx = abs if not self.SUPPORTS_NEGATIVE_KEYS else identity self.val_tx = abs if not self.SUPPORTS_NEGATIVE_VALUES else identity def tearDown(self): import transaction transaction.abort() if self.storage is not None: self.storage.close() self.storage.cleanup() def _makeOne(self): return self._getTargetClass()() def openDB(self): import os from ZODB.DB import DB from ZODB.FileStorage import FileStorage n = 'fs_tmp__%s' % os.getpid() self.storage = FileStorage(n) self.db = DB(self.storage) return self.db def _test_merge( self, o1, o2, o3, expect, message='failed to merge', should_fail=False, ): from BTrees.Interfaces import BTreesConflictError s1 = o1.__getstate__() s2 = o2.__getstate__() s3 = o3.__getstate__() expected = expect.__getstate__() if expected is None: expected = ((((),),),) if should_fail: with self.assertRaises(BTreesConflictError): __traceback_info__ = message o1._p_resolveConflict(s1, s2, s3) else: merged = o1._p_resolveConflict(s1, s2, s3) self.assertEqual(merged, expected, message) class MappingConflictTestBase(ConflictTestBase): # Tests common to mappings (buckets, btrees). def _skip_if_only_small_keys(self): try: self.coerce_to_key(99999) except TypeError: assert 'fs' in self._getTargetClass().__name__ self.skipTest("Uses keys too large for fsBTree") def _deletefail(self): t = self._makeOne() del t[self.KEYS[1]] def _setupConflict(self): if self._getTargetClass().__name__.startswith('fs'): # Too many negative numbers, could be done with a little work # though. self.skipTest("Needs ported to fsBTree") key_tx = self.key_tx keys = [ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067 ] keys = [key_tx(v) for v in keys] e1 = [(-1704, 0), (5420, 1), (-239, 2), (4024, 3), (-6984, 4)] e1 = [(key_tx(k), v) for k, v in e1] e2 = [(7745, 0), (4868, 1), (-2548, 2), (-2711, 3), (-3154, 4)] e2 = [(key_tx(k), v) for k, v in e2] base = self._makeOne() base.update([ (self.coerce_to_key(i), self.coerce_to_value(i * i)) for i in keys[:20] ]) b1 = type(base)(base) b2 = type(base)(base) bm = type(base)(base) items = base.items() return base, b1, b2, bm, e1, e2, items def testMergeDelete(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() del b1[items[1][0]] del b2[items[5][0]] del b1[items[-1][0]] del b2[items[-2][0]] del bm[items[1][0]] del bm[items[5][0]] del bm[items[-1][0]] del bm[items[-2][0]] self._test_merge(base, b1, b2, bm, 'merge delete') def testMergeDeleteAndUpdate(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() V = self.VALUES del b1[items[1][0]] b2[items[5][0]] = V[1] del b1[items[-1][0]] b2[items[-2][0]] = V[2] del bm[items[1][0]] bm[items[5][0]] = V[1] del bm[items[-1][0]] bm[items[-2][0]] = V[2] self._test_merge(base, b1, b2, bm, 'merge update and delete') def testMergeUpdate(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() V = self.VALUES b1[items[0][0]] = V[1] b2[items[5][0]] = V[2] b1[items[-1][0]] = V[3] b2[items[-2][0]] = V[4] bm[items[0][0]] = V[1] bm[items[5][0]] = V[2] bm[items[-1][0]] = V[3] bm[items[-2][0]] = V[4] self._test_merge(base, b1, b2, bm, 'merge update') def testFailMergeDelete(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() del b1[items[0][0]] del b2[items[0][0]] self._test_merge(base, b1, b2, bm, 'merge conflicting delete', should_fail=1) def testFailMergeUpdate(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() V = self.VALUES b1[items[0][0]] = V[1] b2[items[0][0]] = V[2] self._test_merge(base, b1, b2, bm, 'merge conflicting update', should_fail=1) def testFailMergeDeleteAndUpdate(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() del b1[items[0][0]] b2[items[0][0]] = self.val_tx(-9) self._test_merge( base, b1, b2, bm, 'merge conflicting update and delete', should_fail=1, ) def testMergeInserts(self): self._skip_if_only_small_keys() base, b1, b2, bm, e1, e2, items = self._setupConflict() b1[self.key_tx(-99999)] = self.val_tx(-99999) b1[e1[0][0]] = e1[0][1] b2[99999] = self.coerce_to_value(99999) b2[e1[2][0]] = e1[2][1] bm[self.key_tx(-99999)] = self.val_tx(-99999) bm[e1[0][0]] = e1[0][1] bm[99999] = self.coerce_to_value(99999) bm[e1[2][0]] = e1[2][1] self._test_merge(base, b1, b2, bm, 'merge insert', should_fail=not self.SUPPORTS_NEGATIVE_KEYS) def testMergeInsertsFromEmpty(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() base.clear() b1.clear() b2.clear() bm.clear() b1.update(e1) bm.update(e1) b2.update(e2) bm.update(e2) self._test_merge(base, b1, b2, bm, 'merge insert from empty') def testFailMergeEmptyAndFill(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.clear() bm.clear() b2.update(e2) bm.update(e2) self._test_merge( base, b1, b2, bm, 'merge insert from empty', should_fail=1, ) def testMergeEmpty(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.clear() bm.clear() self._test_merge( base, b1, b2, bm, 'empty one and not other', should_fail=1, ) def testFailMergeInsert(self): self._skip_if_only_small_keys() base, b1, b2, bm, e1, e2, items = self._setupConflict() b1[self.key_tx(-99999)] = self.val_tx(-99999) b1[e1[0][0]] = e1[0][1] b2[99999] = self.coerce_to_value(99999) b2[e1[0][0]] = e1[0][1] self._test_merge(base, b1, b2, bm, 'merge conflicting inserts', should_fail=1) class SetConflictTestBase(ConflictTestBase): "Set (as opposed to TreeSet) specific tests." def _skip_if_only_small_keys(self): try: self.coerce_to_key(99999) except TypeError: assert 'fs' in self._getTargetClass().__name__ self.skipTest("Uses keys too large for fsBTree") def _setupConflict(self): def to_key(x): return self.coerce_to_key(self.key_tx(x)) items = [ to_key(x)for x in [ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067, ] ] e1 = [to_key(x) for x in [-1704, 5420, -239, 4024, -6984]] e2 = [to_key(x) for x in [7745, 4868, -2548, -2711, -3154]] base = self._makeOne() base.update(items) b1 = base.__class__(base) b2 = base.__class__(base) bm = base.__class__(base) items = base.keys() return base, b1, b2, bm, e1, e2, items def testMergeDelete(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.remove(items[1]) b2.remove(items[5]) b1.remove(items[-1]) b2.remove(items[-2]) bm.remove(items[1]) bm.remove(items[5]) bm.remove(items[-1]) bm.remove(items[-2]) self._test_merge(base, b1, b2, bm, 'merge delete') def testFailMergeDelete(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.remove(items[0]) b2.remove(items[0]) self._test_merge(base, b1, b2, bm, 'merge conflicting delete', should_fail=1) def testMergeInserts(self): self._skip_if_only_small_keys() base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.insert(self.key_tx(-99999)) b1.insert(e1[0]) b2.insert(99999) b2.insert(e1[2]) bm.insert(self.key_tx(-99999)) bm.insert(e1[0]) bm.insert(99999) bm.insert(e1[2]) self._test_merge(base, b1, b2, bm, 'merge insert', should_fail=not self.SUPPORTS_NEGATIVE_KEYS) def testMergeInsertsFromEmpty(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() base.clear() b1.clear() b2.clear() bm.clear() b1.update(e1) bm.update(e1) b2.update(e2) bm.update(e2) self._test_merge(base, b1, b2, bm, 'merge insert from empty') def testFailMergeEmptyAndFill(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.clear() bm.clear() b2.update(e2) bm.update(e2) self._test_merge( base, b1, b2, bm, 'merge insert from empty', should_fail=1, ) def testMergeEmpty(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.clear() bm.clear() self._test_merge( base, b1, b2, bm, 'empty one and not other', should_fail=1, ) def testFailMergeInsert(self): self._skip_if_only_small_keys() base, b1, b2, bm, e1, e2, items = self._setupConflict() b1.insert(self.coerce_to_key(self.key_tx(-99999))) b1.insert(e1[0]) b2.insert(99999) b2.insert(e1[0]) self._test_merge(base, b1, b2, bm, 'merge conflicting inserts', should_fail=1) # utility functions def lsubtract(l1, l2): l1 = list(l1) l2 = list(l2) return (list(filter(lambda x, l1=l1: x not in l1, l2)) + list(filter(lambda x, l2=l2: x not in l2, l1))) def realseq(itemsob): return list(itemsob) def permutations(x): # Return a list of all permutations of list x. n = len(x) if n <= 1: return [x] result = [] x0 = x[0] for i in range(n): # Build the (n-1)! permutations with x[i] in the first position. xcopy = x[:] first, xcopy[i] = xcopy[i], x0 result.extend([[first] + p for p in permutations(xcopy[1:])]) return result ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/testBTrees.py0000644000076500000240000004267314626022106017213 0ustar00jensstaff############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import unittest from BTrees.tests.common import permutations class DegenerateBTree(unittest.TestCase): # Build a degenerate tree (set). Boxes are BTree nodes. There are # 5 leaf buckets, each containing a single int. Keys in the BTree # nodes don't appear in the buckets. Seven BTree nodes are purely # indirection nodes (no keys). Buckets aren't all at the same depth: # # +------------------------+ # | 4 | # +------------------------+ # | | # | v # | +-+ # | | | # | +-+ # | | # v v # +-------+ +-------------+ # | 2 | | 6 10 | # +-------+ +-------------+ # | | | | | # v v v v v # +-+ +-+ +-+ +-+ +-+ # | | | | | | | | | | # +-+ +-+ +-+ +-+ +-+ # | | | | | # v v v v v # 1 3 +-+ 7 11 # | | # +-+ # | # v # 5 # # This is nasty for many algorithms. Consider a high-end range search # for 4. The BTree nodes direct it to the 5 bucket, but the correct # answer is the 3 bucket, which requires going in a different direction # at the very top node already. Consider a low-end range search for # 9. The BTree nodes direct it to the 7 bucket, but the correct answer # is the 11 bucket. This is also a nasty-case tree for deletions. def _build_degenerate_tree(self): # Build the buckets and chain them together. from BTrees.check import check from BTrees.IIBTree import IISet from BTrees.IIBTree import IITreeSet bucket11 = IISet([11]) bucket7 = IISet() bucket7.__setstate__(((7,), bucket11)) bucket5 = IISet() bucket5.__setstate__(((5,), bucket7)) bucket3 = IISet() bucket3.__setstate__(((3,), bucket5)) bucket1 = IISet() bucket1.__setstate__(((1,), bucket3)) # Build the deepest layers of indirection nodes. ts = IITreeSet tree1 = ts() tree1.__setstate__(((bucket1,), bucket1)) tree3 = ts() tree3.__setstate__(((bucket3,), bucket3)) tree5lower = ts() tree5lower.__setstate__(((bucket5,), bucket5)) tree5 = ts() tree5.__setstate__(((tree5lower,), bucket5)) tree7 = ts() tree7.__setstate__(((bucket7,), bucket7)) tree11 = ts() tree11.__setstate__(((bucket11,), bucket11)) # Paste together the middle layers. tree13 = ts() tree13.__setstate__(((tree1, 2, tree3), bucket1)) tree5711lower = ts() tree5711lower.__setstate__(((tree5, 6, tree7, 10, tree11), bucket5)) tree5711 = ts() tree5711.__setstate__(((tree5711lower,), bucket5)) # One more. t = ts() t.__setstate__(((tree13, 4, tree5711), bucket1)) t._check() check(t) return t, [1, 3, 5, 7, 11] def testBasicOps(self): t, keys = self._build_degenerate_tree() self.assertEqual(len(t), len(keys)) self.assertEqual(list(t.keys()), keys) self.assertTrue(t.has_key(1)) self.assertTrue(t.has_key(3)) self.assertTrue(t.has_key(5)) self.assertTrue(t.has_key(7)) self.assertTrue(t.has_key(11)) for i in 0, 2, 4, 6, 8, 9, 10, 12: self.assertNotIn(i, t) def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) sorted_keys = keys[:] sorted_keys.sort() self.assertEqual(list(tree.keys()), sorted_keys) for k in keys: self.assertTrue(k in tree) if keys: lokey = sorted_keys[0] hikey = sorted_keys[-1] self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tree.maxKey()) else: lokey = hikey = 42 # Try all range searches. for lo in range(lokey - 1, hikey + 2): for hi in range(lo - 1, hikey + 2): for skipmin in False, True: for skipmax in False, True: wantlo, wanthi = lo, hi if skipmin: wantlo += 1 if skipmax: wanthi -= 1 want = [k for k in keys if wantlo <= k <= wanthi] got = list(tree.keys(lo, hi, skipmin, skipmax)) self.assertEqual(want, got) def testRanges(self): t, keys = self._build_degenerate_tree() self._checkRanges(t, keys) def testDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. # This is a tough test. Previous failure modes included: # 1. A variety of assertion failures in _checkRanges. # 2. Assorted "Invalid firstbucket pointer" failures at # seemingly random times, coming out of the BTree destructor. # 3. Under Python 2.3 CVS, some baffling # RuntimeWarning: tp_compare didn't return -1 or -2 for exception # warnings, possibly due to memory corruption after a BTree # goes insane. # On CPython in PURE_PYTHON mode, this is a *slow* test, taking 15+s # on a 2015 laptop. from BTrees.check import check t, keys = self._build_degenerate_tree() for oneperm in permutations(keys): t, keys = self._build_degenerate_tree() for key in oneperm: t.remove(key) keys.remove(key) t._check() check(t) self._checkRanges(t, keys) # We removed all the keys, so the tree should be empty now. self.assertEqual(t.__getstate__(), None) # A damaged tree may trigger an "invalid firstbucket pointer" # failure at the time its destructor is invoked. Try to force # that to happen now, so it doesn't look like a baffling failure # at some unrelated line. del t # trigger destructor LP294788_ids = {} class ToBeDeleted: def __init__(self, id): assert isinstance(id, int) # don't want to store any object ref here self.id = id global LP294788_ids LP294788_ids[id] = 1 def __del__(self): global LP294788_ids LP294788_ids.pop(self.id, None) def __le__(self, other): return self.id <= other.id def __lt__(self, other): return self.id < other.id def __eq__(self, other): return self.id == other.id def __ne__(self, other): return self.id != other.id def __gt__(self, other): return self.id > other.id def __ge__(self, other): return self.id >= other.id def __hash__(self): return hash(self.id) class TestBugFixes(unittest.TestCase): # Collector 1843. Error returns were effectively ignored in # Bucket_rangeSearch(), leading to "delayed" errors, or worse. def testFixed1843(self): from BTrees.IIBTree import IISet t = IISet() t.insert(1) # This one used to fail to raise the TypeError when it occurred. self.assertRaises(TypeError, t.keys, "") # This one used to segfault. self.assertRaises(TypeError, t.keys, 0, "") def test_LP294788(self): # https://bugs.launchpad.net/bugs/294788 # BTree keeps some deleted objects referenced # The logic here together with the ToBeDeleted class is that # a separate reference dict is populated on object creation # and removed in __del__ # That means what's left in the reference dict is never GC'ed # therefore referenced somewhere # To simulate real life, some random data is used to exercise the tree import gc import random from BTrees.OOBTree import OOBTree t = OOBTree() trandom = random.Random('OOBTree') global LP294788_ids # /// BTree keys are integers, value is an object LP294788_ids = {} ids = {} for i in range(1024): if trandom.random() > 0.1 or not ids: # add id = None while id is None or id in ids: id = trandom.randint(0, 1000000) ids[id] = 1 t[id] = ToBeDeleted(id) else: # del keys = list(ids.keys()) if keys: id = trandom.choice(list(ids.keys())) del t[id] del ids[id] ids = ids.keys() trandom.shuffle(list(ids)) for id in ids: del t[id] ids = None # to be on the safe side run a full GC gc.collect() # print LP294788_ids self.assertEqual(len(t), 0) self.assertEqual(len(LP294788_ids), 0) # \\\ # /// BTree keys are integers, value is a tuple having an object LP294788_ids = {} ids = {} for i in range(1024): if trandom.random() > 0.1 or not ids: # add id = None while id is None or id in ids: id = trandom.randint(0, 1000000) ids[id] = 1 t[id] = (id, ToBeDeleted(id), 'somename') else: # del keys = list(ids.keys()) if keys: id = trandom.choice(keys) del t[id] del ids[id] ids = ids.keys() trandom.shuffle(list(ids)) for id in ids: del t[id] ids = None # to be on the safe side run a full GC gc.collect() # print LP294788_ids self.assertEqual(len(t), 0) self.assertEqual(len(LP294788_ids), 0) # \\\ # /// BTree keys are objects, value is an int t = OOBTree() LP294788_ids = {} ids = {} for i in range(1024): if trandom.random() > 0.1 or not ids: # add id = None while id is None or id in ids: id = ToBeDeleted(trandom.randint(0, 1000000)) ids[id] = 1 t[id] = 1 else: # del id = trandom.choice(list(ids.keys())) del ids[id] del t[id] ids = ids.keys() trandom.shuffle(list(ids)) for id in ids: del t[id] # release all refs ids = id = None # to be on the safe side run a full GC gc.collect() # print LP294788_ids self.assertEqual(len(t), 0) self.assertEqual(len(LP294788_ids), 0) # /// BTree keys are tuples having objects, value is an int t = OOBTree() LP294788_ids = {} ids = {} for i in range(1024): if trandom.random() > 0.1 or not ids: # add id = None while id is None or id in ids: id = trandom.randint(0, 1000000) id = (id, ToBeDeleted(id), 'somename') ids[id] = 1 t[id] = 1 else: # del id = trandom.choice(list(ids.keys())) del ids[id] del t[id] ids = ids.keys() trandom.shuffle(list(ids)) for id in ids: del t[id] # release all refs ids = id = None # to be on the safe side run a full GC gc.collect() # print LP294788_ids self.assertEqual(len(t), 0) self.assertEqual(len(LP294788_ids), 0) # comparison error propagation tests class DoesntLikeBeingCompared: def _cmp(self, other): raise ValueError('incomparable') __lt__ = __le__ = __eq__ = __ne__ = __ge__ = __gt__ = _cmp class TestCmpError(unittest.TestCase): def testFoo(self): from BTrees.OOBTree import OOBTree t = OOBTree() t['hello world'] = None try: t[DoesntLikeBeingCompared()] = None except ValueError as e: self.assertEqual(str(e), 'incomparable') else: self.fail('incomarable objects should not be allowed into ' 'the tree') class FamilyTest(unittest.TestCase): def test32(self): from zope.interface.verify import verifyObject import BTrees from BTrees.IOBTree import IOTreeSet verifyObject(BTrees.Interfaces.IBTreeFamily, BTrees.family32) self.assertEqual( BTrees.family32.IO, BTrees.IOBTree) self.assertEqual( BTrees.family32.OI, BTrees.OIBTree) self.assertEqual( BTrees.family32.II, BTrees.IIBTree) self.assertEqual( BTrees.family32.IF, BTrees.IFBTree) self.assertEqual( BTrees.family32.UO, BTrees.UOBTree) self.assertEqual( BTrees.family32.OU, BTrees.OUBTree) self.assertEqual( BTrees.family32.UU, BTrees.UUBTree) self.assertEqual( BTrees.family32.UF, BTrees.UFBTree) self.assertEqual( BTrees.family32.OO, BTrees.OOBTree) self.assertEqual( BTrees.family32.OU, BTrees.OUBTree) s = IOTreeSet() s.insert(BTrees.family32.maxint) self.assertTrue(BTrees.family32.maxint in s) s = IOTreeSet() s.insert(BTrees.family32.minint) self.assertTrue(BTrees.family32.minint in s) s = IOTreeSet() # this next bit illustrates an, um, "interesting feature". If # the characteristics change to match the 64 bit version, please # feel free to change. with self.assertRaises((TypeError, OverflowError)): s.insert(BTrees.family32.maxint + 1) with self.assertRaises((TypeError, OverflowError)): s.insert(BTrees.family32.minint - 1) self.check_pickling(BTrees.family32) def test64(self): from zope.interface.verify import verifyObject import BTrees from BTrees.LOBTree import LOTreeSet verifyObject(BTrees.Interfaces.IBTreeFamily, BTrees.family64) self.assertEqual( BTrees.family64.IO, BTrees.LOBTree) self.assertEqual( BTrees.family64.OI, BTrees.OLBTree) self.assertEqual( BTrees.family64.II, BTrees.LLBTree) self.assertEqual( BTrees.family64.IF, BTrees.LFBTree) self.assertEqual( BTrees.family64.UO, BTrees.QOBTree) self.assertEqual( BTrees.family64.OU, BTrees.OQBTree) self.assertEqual( BTrees.family64.UU, BTrees.QQBTree) self.assertEqual( BTrees.family64.UF, BTrees.QFBTree) self.assertEqual( BTrees.family64.OO, BTrees.OOBTree) self.assertEqual( BTrees.family64.OU, BTrees.OQBTree) s = LOTreeSet() s.insert(BTrees.family64.maxint) self.assertTrue(BTrees.family64.maxint in s) s = LOTreeSet() s.insert(BTrees.family64.minint) self.assertTrue(BTrees.family64.minint in s) s = LOTreeSet() # XXX why oh why do we expect ValueError here, but TypeError in test32? with self.assertRaises((TypeError, OverflowError)): s.insert(BTrees.family64.maxint + 1) with self.assertRaises((TypeError, OverflowError)): s.insert(BTrees.family64.minint - 1) self.check_pickling(BTrees.family64) def check_pickling(self, family): # The "family" objects are singletons; they can be pickled and # unpickled, and the same instances will always be returned on # unpickling, whether from the same unpickler or different # unpicklers. import pickle from io import BytesIO s = pickle.dumps((family, family)) (f1, f2) = pickle.loads(s) self.assertIs(f1, family) self.assertIs(f2, family) # Using a single memo across multiple pickles: sio = BytesIO() p = pickle.Pickler(sio) p.dump(family) p.dump([family]) u = pickle.Unpickler(BytesIO(sio.getvalue())) f1 = u.load() f2, = u.load() self.assertTrue(f1 is family) self.assertTrue(f2 is family) # Using separate memos for each pickle: sio = BytesIO() p = pickle.Pickler(sio) p.dump(family) p.clear_memo() p.dump([family]) u = pickle.Unpickler(BytesIO(sio.getvalue())) f1 = u.load() f2, = u.load() self.assertTrue(f1 is family) self.assertTrue(f2 is family) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/testConflict.py0000644000076500000240000005137314626022106017565 0ustar00jensstaff############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import unittest from .common import ConflictTestBase from .common import _skip_wo_ZODB class NastyConfictFunctionalTests(ConflictTestBase, unittest.TestCase): # FUNCTESTS: Provoke various conflict scenarios using ZODB + transaction def _getTargetClass(self): from BTrees.OOBTree import OOBTree return OOBTree def openDB(self): # The conflict tests tend to open two or more connections # and then try to commit them. A standard FileStorage # is not MVCC aware, and so each connection would have the same # instance of the storage, leading to the error # "Duplicate tpc_begin calls for same transaction" on commit; # thus we use a MVCCMappingStorage for these tests, ensuring each # connection has its own storage. # # Unfortunately, it wants to acquire the identically same # non-recursive lock in each of its *its* tpc_* methods, which # deadlocks. # # The solution is to give each instance its own lock, and trust in the # serialization (ordering) of the datamanager, and the fact that these # tests are single-threaded. import threading from ZODB.DB import DB from ZODB.tests.MVCCMappingStorage import MVCCMappingStorage class _MVCCMappingStorage(MVCCMappingStorage): def new_instance(self): inst = MVCCMappingStorage.new_instance(self) inst._commit_lock = threading.Lock() return inst self.storage = _MVCCMappingStorage() self.db = DB(self.storage) return self.db @_skip_wo_ZODB def testSimpleConflict(self): # Invoke conflict resolution by committing a transaction and # catching a conflict in the storage. import transaction self.openDB() r1 = self.db.open().root() r1["t"] = t = self._makeOne() transaction.commit() r2 = self.db.open().root() copy = r2["t"] list(copy) # unghostify self.assertEqual(t._p_serial, copy._p_serial) t.update({1: 2, 2: 3}) transaction.commit() copy.update({3: 4}) transaction.commit() # This tests a problem that cropped up while trying to write # testBucketSplitConflict (below): conflict resolution wasn't # working at all in non-trivial cases. Symptoms varied from # strange complaints about pickling (despite that the test isn't # doing any *directly*), thru SystemErrors from Python and # AssertionErrors inside the BTree code. @_skip_wo_ZODB def testResolutionBlowsUp(self): import transaction b = self._makeOne() for i in range(0, 200, 4): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # If these fail, the *preconditions* for running the test aren't # satisfied -- the test itself hasn't been run yet. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Invoke conflict resolution by committing a transaction. self.openDB() r1 = self.db.open().root() r1["t"] = b transaction.commit() r2 = self.db.open().root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(b._p_serial, copy._p_serial) b.update({1: 2, 2: 3}) transaction.commit() copy.update({3: 4}) transaction.commit() # if this doesn't blow up list(copy.values()) # and this doesn't either, then fine @_skip_wo_ZODB def testBucketSplitConflict(self): # Tests that a bucket split is viewed as a conflict. # It's (almost necessarily) a white-box test, and sensitive to # implementation details. import transaction from ZODB.POSException import ConflictError b = orig = self._makeOne() for i in range(0, 200, 4): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # If these fail, the *preconditions* for running the test aren't # satisfied -- the test itself hasn't been run yet. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Invoke conflict resolution by committing a transaction. self.openDB() tm1 = transaction.TransactionManager() r1 = self.db.open(transaction_manager=tm1).root() r1["t"] = b tm1.commit() tm2 = transaction.TransactionManager() r2 = self.db.open(transaction_manager=tm2).root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(orig._p_serial, copy._p_serial) # In one transaction, add 16 new keys to bucket1, to force a bucket # split. b = orig numtoadd = 16 candidate = 60 while numtoadd: if candidate not in b: b[candidate] = candidate numtoadd -= 1 candidate += 1 # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 61 .. 74 # bucket 2 has 16 values: [75, 76 .. 81] + [84, 88 ..116] # bucket 3 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((b0, 60, b1, 75, b2, 120, b3), firstbucket) # The next block is still verifying preconditions. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 7) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 75) self.assertEqual(state[0][5], 120) tm1.commit() # In the other transaction, add 3 values near the tail end of bucket1. # This doesn't cause a split. b = copy for i in range(112, 116): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 18 values: 60, 64 .. 112, 113, 114, 115, 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # The next block is still verifying preconditions. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.assertRaises(ConflictError, tm2.commit) @_skip_wo_ZODB def testEmptyBucketConflict(self): # Tests that an emptied bucket *created by* conflict resolution is # viewed as a conflict: conflict resolution doesn't have enough # info to unlink the empty bucket from the BTree correctly. import transaction from ZODB.POSException import ConflictError b = orig = self._makeOne() for i in range(0, 200, 4): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # If these fail, the *preconditions* for running the test aren't # satisfied -- the test itself hasn't been run yet. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Invoke conflict resolution by committing a transaction. self.openDB() tm1 = transaction.TransactionManager() r1 = self.db.open(transaction_manager=tm1).root() r1["t"] = b tm1.commit() tm2 = transaction.TransactionManager() r2 = self.db.open(transaction_manager=tm2).root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(orig._p_serial, copy._p_serial) # In one transaction, delete half of bucket 1. b = orig for k in 60, 64, 68, 72, 76, 80, 84, 88: del b[k] # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 7 values: 92, 96, 100, 104, 108, 112, 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # The next block is still verifying preconditions. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 92) self.assertEqual(state[0][3], 120) tm1.commit() # In the other transaction, delete the other half of bucket 1. b = copy for k in 92, 96, 100, 104, 108, 112, 116: del b[k] # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 8 values: 60, 64, 68, 72, 76, 80, 84, 88 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # The next block is still verifying preconditions. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Conflict resolution empties bucket1 entirely. This used to # create an "insane" BTree (a legit BTree cannot contain an empty # bucket -- it contains NULL pointers the BTree code doesn't # expect, and segfaults result). self.assertRaises(ConflictError, tm2.commit) @_skip_wo_ZODB def testEmptyBucketNoConflict(self): # Tests that a plain empty bucket (on input) is not viewed as a # conflict. import transaction b = orig = self._makeOne() for i in range(0, 200, 4): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # If these fail, the *preconditions* for running the test aren't # satisfied -- the test itself hasn't been run yet. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Invoke conflict resolution by committing a transaction. self.openDB() r1 = self.db.open().root() r1["t"] = orig transaction.commit() r2 = self.db.open().root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(orig._p_serial, copy._p_serial) # In one transaction, just add a key. b = orig b[1] = 1 # bucket 0 has 16 values: [0, 1] + [4, 8 .. 56] # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # The next block is still verifying preconditions. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) transaction.commit() # In the other transaction, delete bucket 2. b = copy for k in range(120, 200, 4): del b[k] # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1), firstbucket) # The next block is still verifying preconditions. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 3) self.assertEqual(state[0][1], 60) # This shouldn't create a ConflictError. transaction.commit() # And the resulting BTree shouldn't have internal damage. b._check() # The snaky control flow in _bucket__p_resolveConflict ended up trying # to decref a NULL pointer if conflict resolution was fed 3 empty # buckets. http://collector.zope.org/Zope/553 def testThreeEmptyBucketsNoSegfault(self): # Note that the conflict is raised by our C extension, rather than # indirectly via the storage, and hence is a more specialized type. # This test therefore does not require ZODB. from BTrees.Interfaces import BTreesConflictError t = self._makeOne() t[1] = 1 bucket = t._firstbucket del t[1] state1 = bucket.__getstate__() state2 = bucket.__getstate__() state3 = bucket.__getstate__() self.assertTrue(state2 is not state1 and state2 is not state3 and state3 is not state1) self.assertTrue(state2 == state1 and state3 == state1) self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, state1, state2, state3) # When an empty BTree resolves conflicts, it computes the # bucket state as None, so... self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, None, None, None) @_skip_wo_ZODB def testCantResolveBTreeConflict(self): # Test that a conflict involving two different changes to # an internal BTree node is unresolvable. An internal node # only changes when there are enough additions or deletions # to a child bucket that the bucket is split or removed. # It's (almost necessarily) a white-box test, and sensitive to # implementation details. import transaction from ZODB.POSException import ConflictError b = orig = self._makeOne() for i in range(0, 200, 4): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # If these fail, the *preconditions* for running the test aren't # satisfied -- the test itself hasn't been run yet. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Set up database connections to provoke conflict. self.openDB() tm1 = transaction.TransactionManager() r1 = self.db.open(transaction_manager=tm1).root() r1["t"] = orig tm1.commit() tm2 = transaction.TransactionManager() r2 = self.db.open(transaction_manager=tm2).root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(orig._p_serial, copy._p_serial) # Now one transaction should add enough keys to cause a split, # and another should remove all the keys in one bucket. for k in range(200, 300, 4): orig[k] = k tm1.commit() for k in range(0, 60, 4): del copy[k] self.assertRaises(ConflictError, tm2.commit) @_skip_wo_ZODB def testConflictWithOneEmptyBucket(self): # If one transaction empties a bucket, while another adds an item # to the bucket, all the changes "look resolvable": bucket conflict # resolution returns a bucket containing (only) the item added by # the latter transaction, but changes from the former transaction # removing the bucket are uncontested: the bucket is removed from # the BTree despite that resolution thinks it's non-empty! This # was first reported by Dieter Maurer, to zodb-dev on 22 Mar 2005. import transaction from ZODB.POSException import ConflictError b = orig = self._makeOne() for i in range(0, 200, 4): b[i] = i # bucket 0 has 15 values: 0, 4 .. 56 # bucket 1 has 15 values: 60, 64 .. 116 # bucket 2 has 20 values: 120, 124 .. 196 state = b.__getstate__() # Looks like: ((bucket0, 60, bucket1, 120, bucket2), firstbucket) # If these fail, the *preconditions* for running the test aren't # satisfied -- the test itself hasn't been run yet. self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) # Set up database connections to provoke conflict. self.openDB() tm1 = transaction.TransactionManager() r1 = self.db.open(transaction_manager=tm1).root() r1["t"] = orig tm1.commit() tm2 = transaction.TransactionManager() r2 = self.db.open(transaction_manager=tm2).root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(orig._p_serial, copy._p_serial) # Now one transaction empties the first bucket, and another adds a # key to the first bucket. for k in range(0, 60, 4): del orig[k] tm1.commit() copy[1] = 1 self.assertRaises(ConflictError, tm2.commit) # Same thing, except commit the transactions in the opposite order. b = self._makeOne() for i in range(0, 200, 4): b[i] = i tm1 = transaction.TransactionManager() r1 = self.db.open(transaction_manager=tm1).root() r1["t"] = b tm1.commit() tm2 = transaction.TransactionManager() r2 = self.db.open(transaction_manager=tm2).root() copy = r2["t"] # Make sure all of copy is loaded. list(copy.values()) self.assertEqual(b._p_serial, copy._p_serial) # Now one transaction empties the first bucket, and another adds a # key to the first bucket. b[1] = 1 tm1.commit() for k in range(0, 60, 4): del copy[k] self.assertRaises(ConflictError, tm2.commit) @_skip_wo_ZODB def testConflictOfInsertAndDeleteOfFirstBucketItem(self): # Recently, BTrees became careful about removing internal keys # (keys in internal aka BTree nodes) when they were deleted from # buckets. This poses a problem for conflict resolution. # We want to guard against a case in which the first key in a # bucket is removed in one transaction while a key is added # after that key but before the next key in another transaction # with the result that the added key is unreachable. # original: # Bucket(...), k1, Bucket((k1, v1), (k3, v3), ...) # tran1 # Bucket(...), k3, Bucket(k3, v3), ...) # tran2 # Bucket(...), k1, Bucket((k1, v1), (k2, v2), (k3, v3), ...) # where k1 < k2 < k3 # We don't want: # Bucket(...), k3, Bucket((k2, v2), (k3, v3), ...) # as k2 would be unfindable, so we want a conflict. import transaction from ZODB.POSException import ConflictError mytype = self._getTargetClass() db = self.openDB() tm1 = transaction.TransactionManager() conn1 = db.open(tm1) conn1.root.t = t = mytype() for i in range(0, 200, 2): t[i] = i tm1.commit() k = t.__getstate__()[0][1] assert t.__getstate__()[0][2].keys()[0] == k tm2 = transaction.TransactionManager() conn2 = db.open(tm2) t[k+1] = k+1 del conn2.root.t[k] for i in range(200, 300): conn2.root.t[i] = i tm1.commit() self.assertRaises(ConflictError, tm2.commit) tm2.abort() k = t.__getstate__()[0][1] t[k+1] = k+1 del conn2.root.t[k] tm2.commit() self.assertRaises(ConflictError, tm1.commit) tm1.abort() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1715872922.0 BTrees-6.0/src/BTrees/tests/testPersistency.py0000644000076500000240000000316014621422232020322 0ustar00jensstaff############################################################################## # # Copyright (c) 2020 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from unittest import TestCase from ..OOBTree import OOBTree from .common import ZODBAccess from .common import _skip_wo_ZODB BUCKET_SIZE = OOBTree.max_leaf_size class TestPersistency(ZODBAccess, TestCase): @_skip_wo_ZODB def test_empty_bucket_persistency(self): from transaction import commit root = self._getRoot() try: # tree with 3 buckets (internal implementation details) tree = OOBTree( {i: i for i in range(3 * BUCKET_SIZE // 2 + 2)}) root["tree"] = tree commit() # almost clear the second bucket keeping the last element for i in range(BUCKET_SIZE // 2, BUCKET_SIZE - 1): del tree[i] commit() del tree[BUCKET_SIZE - 1] # remove the last element commit() tree._check() tree._p_deactivate() tree._check() # fails in case of bad persistency finally: self._closeRoot(root) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test_Length.py0000644000076500000240000000463714626022106017405 0ustar00jensstaff############################################################################## # # Copyright (c) 2008 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import unittest _marker = object() class TestLength(unittest.TestCase): def _getTargetClass(self): from BTrees.Length import Length return Length def _makeOne(self, value=_marker): if value is _marker: return self._getTargetClass()() return self._getTargetClass()(value) def test_ctor_default(self): length = self._makeOne() self.assertEqual(length.value, 0) def test_ctor_explict(self): length = self._makeOne(42) self.assertEqual(length.value, 42) def test___getstate___(self): length = self._makeOne(42) self.assertEqual(length.__getstate__(), 42) def test___setstate__(self): length = self._makeOne() length.__setstate__(42) self.assertEqual(length.value, 42) def test_set(self): length = self._makeOne() length.set(42) self.assertEqual(length.value, 42) def test__p_resolveConflict(self): length = self._makeOne() self.assertEqual(length._p_resolveConflict(5, 7, 9), 11) def test_change_w_positive_delta(self): length = self._makeOne() length.change(3) self.assertEqual(length.value, 3) def test_change_w_negative_delta(self): length = self._makeOne() length.change(-3) self.assertEqual(length.value, -3) def test___call___no_args(self): length = self._makeOne(42) self.assertEqual(length(), 42) def test___call___w_args(self): length = self._makeOne(42) self.assertEqual(length(0, None, (), [], {}), 42) def test_lp_516653(self): # Test for https://bugs.launchpad.net/zodb/+bug/516653 import copy length = self._makeOne() other = copy.copy(length) self.assertEqual(other(), 0) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test_OOBTree.py0000644000076500000240000001301214626022106017406 0ustar00jensstaff############################################################################## # # Copyright (c) 2001-2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from BTrees import OOBTree from ._test_builder import update_module from .common import BTreeTests class OOBTreeTest(BTreeTests): def test_byValue(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.byValue(22)), [(y, x) for x, y in reversed(ITEMS[22:])]) def testRejectDefaultComparisonOnSet(self): # Check that passing in keys w default comparison fails. Only # applies to new-style class instances if we're using the C # extensions; old-style instances are too hard to introspect # in C. # This is white box because we know that the check is being # used in a function that's used in lots of places. # Otherwise, there are many permutations that would have to be # checked. t = self._makeOne() class OldStyle: pass if self._getTargetClass() is OOBTree.OOBTreePy: with self.assertRaises(TypeError): t[OldStyle()] = 1 class C: pass with self.assertRaises(TypeError) as raising: t[C()] = 1 self.assertEqual( raising.exception.args[0], "Object of class C has default comparison") class With___lt__: def __lt__(*args): return 1 c = With___lt__() t[c] = 1 t.clear() class With___lt__Old: def __lt__(*args): return 1 c = With___lt__Old() t[c] = 1 t.clear() def testAcceptDefaultComparisonOnGet(self): # Issue #42 t = self._makeOne() class C: pass self.assertEqual(t.get(C(), 42), 42) self.assertRaises(KeyError, t.__getitem__, C()) self.assertFalse(C() in t) def testNewStyleClassWithCustomMetaClassAllowed(self): class Meta(type): def __lt__(cls, other): return 1 cls = Meta('Class', (object,), {}) m = self._makeOne() m[cls] = self.getTwoValues()[0] def test_None_is_smallest(self): t = self._makeOne() for i in range(999): # Make sure we multiple buckets t[i] = i*i t[None] = -1 for i in range(-99, 0): # Make sure we multiple buckets t[i] = i*i self.assertEqual(list(t), [None] + list(range(-99, 999))) self.assertEqual(list(t.values()), [-1] + [i*i for i in range(-99, 999)]) self.assertEqual(t[2], 4) self.assertEqual(t[-2], 4) self.assertEqual(t[None], -1) t[None] = -2 self.assertEqual(t[None], -2) t2 = t.__class__(t) del t[None] self.assertEqual(list(t), list(range(-99, 999))) if 'Py' in self.__class__.__name__: return from BTrees.OOBTree import difference from BTrees.OOBTree import intersection from BTrees.OOBTree import union self.assertEqual(list(difference(t2, t).items()), [(None, -2)]) self.assertEqual(list(union(t, t2)), list(t2)) self.assertEqual(list(intersection(t, t2)), list(t)) def testDeleteNoneKey(self): # Check that a None key can be deleted in Python 2. # This doesn't work on Python 3 because None is unorderable, # so the tree can't be searched. But None also can't be inserted, # and we don't support migrating Python 2 databases to Python 3. t = self._makeOne() bucket_state = ((None, 42),) tree_state = ((bucket_state,),) t.__setstate__(tree_state) self.assertEqual(t[None], 42) del t[None] def testUnpickleNoneKey(self): # All versions (py2 and py3, C and Python) can unpickle # data that looks like this: {None: 42}, even though None # is unorderable.. # This pickle was captured in BTree/ZODB3 3.10.7 import pickle data = ( b'ccopy_reg\n__newobj__\np0\n(' b'cBTrees.OOBTree\nOOBTree\np1\ntp2\nRp3\n(' b'(((NI42\ntp4\ntp5\ntp6\ntp7\nb.' ) t = pickle.loads(data) keys = list(t) self.assertEqual([None], keys) def testIdentityTrumpsBrokenComparison(self): # Identical keys always match, even if their comparison is # broken. See https://github.com/zopefoundation/BTrees/issues/50 from functools import total_ordering @total_ordering class Bad: def __eq__(self, other): return False __lt__ = __cmp__ = __eq__ t = self._makeOne() bad_key = Bad() t[bad_key] = 42 self.assertIn(bad_key, t) self.assertEqual(list(t), [bad_key]) del t[bad_key] self.assertNotIn(bad_key, t) self.assertEqual(list(t), []) update_module(globals(), OOBTree, btree_tests_base=OOBTreeTest) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test__base.py0000644000076500000240000033024614626022106017233 0ustar00jensstaff############################################################################## # # Copyright 2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import unittest def _assertRaises(self, e_type, checked, *args, **kw): try: checked(*args, **kw) except e_type as e: return e self.fail("Didn't raise: %s" % e_type.__name__) class Test_Base(unittest.TestCase): def _getTargetClass(self): from .._base import _Base return _Base def _makeOne(self, items=None): class _Test(self._getTargetClass()): max_leaf_size = 10 max_internal_size = 15 def clear(self): self._data = {} def update(self, d): self._data.update(d) return _Test(items) def test_ctor_wo_items(self): base = self._makeOne() self.assertEqual(base._data, {}) def test_ctor_w_items(self): base = self._makeOne({'a': 'b'}) self.assertEqual(base._data, {'a': 'b'}) class Test_BucketBase(unittest.TestCase): def _getTargetClass(self): from .._base import _BucketBase return _BucketBase def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor_defaults(self): bucket = self._makeOne() self.assertEqual(bucket._keys, []) self.assertEqual(bucket._next, None) self.assertEqual(len(bucket), 0) self.assertEqual(bucket.size, 0) def test__deleteNextBucket_none(self): bucket = self._makeOne() bucket._deleteNextBucket() # no raise self.assertTrue(bucket._next is None) def test__deleteNextBucket_one(self): bucket1 = self._makeOne() bucket1._next = self._makeOne() bucket1._deleteNextBucket() # no raise self.assertTrue(bucket1._next is None) def test__deleteNextBucket_two(self): bucket1 = self._makeOne() bucket2 = bucket1._next = self._makeOne() bucket3 = bucket2._next = self._makeOne() bucket1._deleteNextBucket() # no raise self.assertTrue(bucket1._next is bucket3) def test__search_empty(self): bucket = self._makeOne() self.assertEqual(bucket._search('nonesuch'), -1) def test__search_nonempty_miss(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._search('candy'), -3) def test__search_nonempty_hit(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._search('charlie'), 2) def test_minKey_empty(self): bucket = self._makeOne() self.assertRaises(IndexError, bucket.minKey) def test_minKey_no_bound(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.minKey(), 'alpha') def test_minKey_w_bound_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.minKey('bravo'), 'bravo') def test_minKey_w_bound_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.minKey('candy'), 'charlie') def test_minKey_w_bound_fail(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertRaises(ValueError, bucket.minKey, 'foxtrot') def test_maxKey_empty(self): bucket = self._makeOne() self.assertRaises(IndexError, bucket.maxKey) def test_maxKey_no_bound(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.maxKey(), 'echo') def test_maxKey_w_bound_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.maxKey('bravo'), 'bravo') def test_maxKey_w_bound_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.maxKey('candy'), 'bravo') def test_maxKey_w_bound_fail(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertRaises(ValueError, bucket.maxKey, 'abacus') def test__range_defaults_empty(self): bucket = self._makeOne() self.assertEqual(bucket._range(), (0, 0)) def test__range_defaults_filled(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(), (0, 5)) def test__range_defaults_exclude_min(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(excludemin=True), (1, 5)) def test__range_defaults_exclude_max(self): bucket = self._makeOne() bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(excludemax=True), (0, 4)) def test__range_w_min_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(min='bravo'), (1, 5)) def test__range_w_min_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(min='candy'), (2, 5)) def test__range_w_min_hit_w_exclude_min(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(min='bravo', excludemin=True), (2, 5)) def test__range_w_min_miss_w_exclude_min(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] # 'excludemin' doesn't fire on miss self.assertEqual(bucket._range(min='candy', excludemin=True), (2, 5)) def test__range_w_max_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(max='delta'), (0, 4)) def test__range_w_max_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(max='dandy'), (0, 3)) def test__range_w_max_hit_w_exclude_max(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket._range(max='delta', excludemax=True), (0, 3)) def test__range_w_max_miss_w_exclude_max(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] # 'excludemax' doesn't fire on miss self.assertEqual(bucket._range(max='dandy', excludemax=True), (0, 3)) def test_keys_defaults_empty(self): bucket = self._makeOne() self.assertEqual(bucket.keys(), []) def test_keys_defaults_filled(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(), KEYS[0: 5]) def test_keys_defaults_exclude_min(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(excludemin=True), KEYS[1: 5]) def test_keys_defaults_exclude_max(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(excludemax=True), KEYS[0: 4]) def test_keys_w_min_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(min='bravo'), KEYS[1: 5]) def test_keys_w_min_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(min='candy'), KEYS[2: 5]) def test_keys_w_min_hit_w_exclude_min(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(min='bravo', excludemin=True), KEYS[2: 5]) def test_keys_w_min_miss_w_exclude_min(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] # 'excludemin' doesn't fire on miss self.assertEqual(bucket.keys(min='candy', excludemin=True), KEYS[2: 5]) def test_keys_w_max_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(max='delta'), KEYS[0: 4]) def test_keys_w_max_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(max='dandy'), KEYS[0: 3]) def test_keys_w_max_hit_w_exclude_max(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(bucket.keys(max='delta', excludemax=True), KEYS[0: 3]) def test_keys_w_max_miss_w_exclude_max(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] # 'excludemax' doesn't fire on miss self.assertEqual(bucket.keys(max='dandy', excludemax=True), KEYS[0: 3]) def test_iterkeys_defaults_empty(self): bucket = self._makeOne() self.assertEqual(list(bucket.iterkeys()), []) def test_iterkeys_defaults_filled(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys()), KEYS[0: 5]) def test_iterkeys_defaults_exclude_min(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(excludemin=True)), KEYS[1: 5]) def test_iterkeys_defaults_exclude_max(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(excludemax=True)), KEYS[0: 4]) def test_iterkeys_w_min_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(min='bravo')), KEYS[1: 5]) def test_iterkeys_w_min_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(min='candy')), KEYS[2: 5]) def test_iterkeys_w_min_hit_w_exclude_min(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(min='bravo', excludemin=True)), KEYS[2: 5]) def test_iterkeys_w_min_miss_w_exclude_min(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] # 'excludemin' doesn't fire on miss self.assertEqual(list(bucket.iterkeys(min='candy', excludemin=True)), KEYS[2: 5]) def test_iterkeys_w_max_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(max='delta')), KEYS[0: 4]) def test_iterkeys_w_max_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.iterkeys(max='dandy')), KEYS[0: 3]) def test_iterkeys_w_max_hit_w_exclude_max(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual(list(bucket.keys(max='delta', excludemax=True)), KEYS[0: 3]) def test_iterkeys_w_max_miss_w_exclude_max(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] # 'excludemax' doesn't fire on miss self.assertEqual(list(bucket.iterkeys(max='dandy', excludemax=True)), KEYS[0: 3]) def test___iter___empty(self): bucket = self._makeOne() self.assertEqual([x for x in bucket], []) def test___iter___filled(self): bucket = self._makeOne() KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertEqual([x for x in bucket], KEYS[0: 5]) def test___contains___empty(self): bucket = self._makeOne() bucket._to_key = lambda x: x self.assertFalse('nonesuch' in bucket) def test___contains___filled_miss(self): bucket = self._makeOne() bucket._to_key = lambda x: x bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] self.assertFalse('nonesuch' in bucket) def test___contains___filled_hit(self): bucket = self._makeOne() bucket._to_key = lambda x: x KEYS = bucket._keys = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: self.assertTrue(key in bucket) class Test_SetIteration(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import _SetIteration return _SetIteration def _makeOne(self, to_iterate, useValues=False, default=None): return self._getTargetClass()(to_iterate, useValues, default) def test_ctor_w_None(self): from .._base import _marker si = self._makeOne(None) self.assertEqual(si.useValues, False) self.assertTrue(si.key is _marker) self.assertEqual(si.value, None) self.assertEqual(si.active, False) self.assertEqual(si.position, -1) def test_ctor_w_non_empty_list(self): si = self._makeOne(['a', 'b', 'c']) self.assertEqual(si.useValues, False) self.assertEqual(si.key, 'a') self.assertEqual(si.value, None) self.assertEqual(si.active, True) self.assertEqual(si.position, 1) class BucketTests(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import Bucket return Bucket def _makeOne(self): from .._datatypes import O class _Bucket(self._getTargetClass()): _to_key = O() def _to_value(self, x): return x return _Bucket() def test_ctor_defaults(self): bucket = self._makeOne() self.assertEqual(bucket._keys, []) self.assertEqual(bucket._values, []) def test_setdefault_miss(self): bucket = self._makeOne() self.assertEqual(bucket.setdefault('a', 'b'), 'b') self.assertEqual(bucket._keys, ['a']) self.assertEqual(bucket._values, ['b']) def test_setdefault_hit(self): bucket = self._makeOne() bucket._keys.append('a') bucket._values.append('b') self.assertEqual(bucket.setdefault('a', 'b'), 'b') self.assertEqual(bucket._keys, ['a']) self.assertEqual(bucket._values, ['b']) def test_pop_miss_no_default(self): bucket = self._makeOne() self.assertRaises(KeyError, bucket.pop, 'nonesuch') def test_pop_miss_w_default(self): bucket = self._makeOne() self.assertEqual(bucket.pop('nonesuch', 'b'), 'b') def test_pop_hit(self): bucket = self._makeOne() bucket._keys.append('a') bucket._values.append('b') self.assertEqual(bucket.pop('a'), 'b') self.assertEqual(bucket._keys, []) self.assertEqual(bucket._values, []) def test_update_value_w_iteritems(self): bucket = self._makeOne() bucket.update({'a': 'b'}) self.assertEqual(bucket._keys, ['a']) self.assertEqual(bucket._values, ['b']) def test_update_value_w_items(self): bucket = self._makeOne() class Foo: def items(self): return [('a', 'b')] bucket.update(Foo()) self.assertEqual(bucket._keys, ['a']) self.assertEqual(bucket._values, ['b']) def test_update_value_w_invalid_items(self): bucket = self._makeOne() class Foo: def items(self): return ('a', 'b', 'c') self.assertRaises(TypeError, bucket.update, Foo()) def test_update_sequence(self): bucket = self._makeOne() bucket.update([('a', 'b')]) self.assertEqual(bucket._keys, ['a']) self.assertEqual(bucket._values, ['b']) def test_update_replacing(self): bucket = self._makeOne() bucket['a'] = 'b' bucket.update([('a', 'c')]) self.assertEqual(bucket['a'], 'c') def test___setitem___incomparable(self): bucket = self._makeOne() def _should_error(): bucket[object()] = 'b' self.assertRaises(TypeError, _should_error) def test___setitem___comparable(self): bucket = self._makeOne() bucket['a'] = 'b' self.assertEqual(bucket['a'], 'b') def test___setitem___replace(self): bucket = self._makeOne() bucket['a'] = 'b' bucket['a'] = 'c' self.assertEqual(bucket['a'], 'c') def test___delitem___miss(self): bucket = self._makeOne() def _should_error(): del bucket['nonesuch'] self.assertRaises(KeyError, _should_error) def test___delitem___hit(self): bucket = self._makeOne() bucket._keys.append('a') bucket._values.append('b') del bucket['a'] self.assertEqual(bucket._keys, []) self.assertEqual(bucket._values, []) def test_clear_filled(self): bucket = self._makeOne() bucket['a'] = 'b' bucket['c'] = 'd' bucket.clear() self.assertEqual(len(bucket._keys), 0) self.assertEqual(len(bucket._values), 0) def test_clear_empty(self): bucket = self._makeOne() bucket.clear() self.assertEqual(len(bucket._keys), 0) self.assertEqual(len(bucket._values), 0) def test_get_miss_no_default(self): bucket = self._makeOne() self.assertEqual(bucket.get('nonesuch'), None) def test_get_miss_w_default(self): bucket = self._makeOne() self.assertEqual(bucket.get('nonesuch', 'b'), 'b') def test_get_hit(self): bucket = self._makeOne() bucket._keys.append('a') bucket._values.append('b') self.assertEqual(bucket.get('a'), 'b') def test___getitem___miss(self): bucket = self._makeOne() def _should_error(): return bucket['nonesuch'] self.assertRaises(KeyError, _should_error) def test___getitem___hit(self): bucket = self._makeOne() bucket._keys.append('a') bucket._values.append('b') self.assertEqual(bucket['a'], 'b') def test__split_empty(self): bucket = self._makeOne() next_b = bucket._next = self._makeOne() new_b = bucket._split() self.assertEqual(len(bucket._keys), 0) self.assertEqual(len(bucket._values), 0) self.assertEqual(len(new_b._keys), 0) self.assertEqual(len(new_b._values), 0) self.assertTrue(bucket._next is new_b) self.assertTrue(new_b._next is next_b) def test__split_filled_default_index(self): bucket = self._makeOne() next_b = bucket._next = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i new_b = bucket._split() self.assertEqual(list(bucket._keys), ['a', 'b', 'c']) self.assertEqual(list(bucket._values), [0, 1, 2]) self.assertEqual(list(new_b._keys), ['d', 'e', 'f']) self.assertEqual(list(new_b._values), [3, 4, 5]) self.assertTrue(bucket._next is new_b) self.assertTrue(new_b._next is next_b) def test__split_filled_explicit_index(self): bucket = self._makeOne() next_b = bucket._next = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i new_b = bucket._split(2) self.assertEqual(list(bucket._keys), ['a', 'b']) self.assertEqual(list(bucket._values), [0, 1]) self.assertEqual(list(new_b._keys), ['c', 'd', 'e', 'f']) self.assertEqual(list(new_b._values), [2, 3, 4, 5]) self.assertTrue(bucket._next is new_b) self.assertTrue(new_b._next is next_b) def test_keys_empty_no_args(self): bucket = self._makeOne() self.assertEqual(bucket.keys(), []) def test_keys_filled_no_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual( bucket.keys(), ['a', 'b', 'c', 'd', 'e', 'f'], ) def test_keys_filled_w_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual( bucket.keys(min='b', excludemin=True, max='f', excludemax=True), ['c', 'd', 'e'], ) def test_iterkeys_empty_no_args(self): bucket = self._makeOne() self.assertEqual(list(bucket.iterkeys()), []) def test_iterkeys_filled_no_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual(list(bucket.iterkeys()), ['a', 'b', 'c', 'd', 'e', 'f']) def test_iterkeys_filled_w_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual(list(bucket.iterkeys( min='b', excludemin=True, max='f', excludemax=True)), ['c', 'd', 'e']) def test_values_empty_no_args(self): bucket = self._makeOne() self.assertEqual(bucket.values(), []) def test_values_filled_no_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual(bucket.values(), list(range(6))) def test_values_filled_w_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual(bucket.values(min='b', excludemin=True, max='f', excludemax=True), [2, 3, 4]) def test_itervalues_empty_no_args(self): bucket = self._makeOne() self.assertEqual(list(bucket.itervalues()), []) def test_itervalues_filled_no_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual(list(bucket.itervalues()), list(range(6))) def test_itervalues_filled_w_args(self): bucket = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i self.assertEqual(list(bucket.itervalues( min='b', excludemin=True, max='f', excludemax=True)), [2, 3, 4]) def test_items_empty_no_args(self): bucket = self._makeOne() self.assertEqual(bucket.items(), []) def test_items_filled_no_args(self): bucket = self._makeOne() EXPECTED = [] for i, c in enumerate('abcdef'): bucket[c] = i EXPECTED.append((c, i)) self.assertEqual(bucket.items(), EXPECTED) def test_items_filled_w_args(self): bucket = self._makeOne() EXPECTED = [] for i, c in enumerate('abcdef'): bucket[c] = i EXPECTED.append((c, i)) self.assertEqual( bucket.items(min='b', excludemin=True, max='f', excludemax=True), EXPECTED[2:5], ) def test_iteritems_empty_no_args(self): bucket = self._makeOne() self.assertEqual(list(bucket.iteritems()), []) def test_iteritems_filled_no_args(self): bucket = self._makeOne() EXPECTED = [] for i, c in enumerate('abcdef'): bucket[c] = i EXPECTED.append((c, i)) self.assertEqual(list(bucket.iteritems()), EXPECTED) def test_iteritems_filled_w_args(self): bucket = self._makeOne() EXPECTED = [] for i, c in enumerate('abcdef'): bucket[c] = i EXPECTED.append((c, i)) self.assertEqual( list( bucket.iteritems( min='b', excludemin=True, max='f', excludemax=True, ) ), EXPECTED[2:5] ) def test___getstate___empty_no_next(self): bucket = self._makeOne() self.assertEqual(bucket.__getstate__(), ((),)) def test___getstate___empty_w_next(self): bucket = self._makeOne() bucket._next = next_b = self._makeOne() self.assertEqual(bucket.__getstate__(), ((), next_b)) def test___getstate___non_empty_no_next(self): bucket = self._makeOne() EXPECTED = () for i, c in enumerate('abcdef'): bucket[c] = i EXPECTED += (c, i) self.assertEqual(bucket.__getstate__(), (EXPECTED,)) def test___getstate___non_empty_w_next(self): bucket = self._makeOne() bucket._next = next_b = self._makeOne() EXPECTED = () for i, c in enumerate('abcdef'): bucket[c] = i EXPECTED += (c, i) self.assertEqual(bucket.__getstate__(), (EXPECTED, next_b)) def test___setstate___w_non_tuple(self): bucket = self._makeOne() self.assertRaises(TypeError, bucket.__setstate__, (None,)) def test___setstate___w_empty_no_next(self): bucket = self._makeOne() bucket._next = self._makeOne() for i, c in enumerate('abcdef'): bucket[c] = i bucket.__setstate__(((),)) self.assertEqual(len(bucket.keys()), 0) self.assertTrue(bucket._next is None) def test___setstate___w_non_empty_w_next(self): bucket = self._makeOne() next_b = self._makeOne() ITEMS = () EXPECTED = [] for i, c in enumerate('abcdef'): ITEMS += (c, i) EXPECTED.append((c, i)) bucket.__setstate__((ITEMS, next_b)) self.assertEqual(bucket.items(), EXPECTED) self.assertTrue(bucket._next is next_b) def test__p_resolveConflict_x_on_com_next_old_new_None(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() N_NEW = object() s_old = None s_com = ((), N_NEW) s_new = None e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_com_next(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() N_NEW = object() s_old = ((), None) s_com = ((), N_NEW) s_new = ((), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_new_next_old_com_None(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() N_NEW = object() s_old = None s_com = None s_new = ((), N_NEW) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_new_next(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() N_NEW = object() s_old = ((), None) s_com = ((), None) s_new = ((), N_NEW) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_com_empty(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 'b', 'c', 'd'), None) s_com = ((), None) s_new = (('a', 'b'), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 12) def test__p_resolveConflict_x_on_new_empty(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1), None) s_com = (('a', 0), None) s_new = ((), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 12) def test__p_resolveConflict_x_both_update_same_key(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0), None) s_com = (('a', 5, 'b', 1, 'c', 2), None) s_new = (('a', 6, 'd', 3), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 1) def test__p_resolveConflict_x_on_del_first_com_x(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('b', 1), None) s_new = (('a', 0, 'b', 1), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 13) def test__p_resolveConflict_x_on_del_first_new_x(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, 'b', 1), None) s_new = (('b', 1), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 13) def test__p_resolveConflict_x_on_del_first_new(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1), None) s_com = (('a', 1, 'b', 2, 'c', 3), None) s_new = (('b', 4), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 2) def test__p_resolveConflict_x_on_del_first_com(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1), None) s_com = (('b', 4), None) s_new = (('a', 1, 'b', 2, 'c', 3), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 3) def test__p_resolveConflict_x_on_ins_same_after_del(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1), None) s_com = (('a', 0, 'c', 2), None) s_new = (('a', 0, 'c', 2, 'd', 3), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 4) def test__p_resolveConflict_x_on_del_same(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, 'c', 2), None) s_new = (('a', 0, 'd', 3, 'e', 4), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 5) def test__p_resolveConflict_x_on_append_same(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, ), None) s_com = (('a', 0, 'b', 1), None) s_new = (('a', 0, 'b', 1, 'c', 2), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 6) def test__p_resolveConflict_x_on_new_deletes_all_com_adds(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, 'd', 3, 'e', 4, 'f', 5), None) s_new = (('a', 0, ), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 7) def test__p_resolveConflict_x_on_com_deletes_all_new_adds(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, ), None) s_new = (('a', 0, 'd', 3, 'e', 4, 'f', 5), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 8) def test__p_resolveConflict_x_on_com_deletes_all_new_deletes(self): from ..Interfaces import BTreesConflictError bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, ), None) s_new = (('a', 0, 'b', 1), None) e = self.assertRaises(BTreesConflictError, bucket._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 9) def test__p_resolveConflict_ok_both_add_new_max(self): bucket = self._makeOne() s_old = (('a', 0), None) s_com = (('a', 0, 'b', 1), None) s_new = (('a', 0, 'c', 2), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'c', 2),)) def test__p_resolveConflict_ok_com_updates(self): bucket = self._makeOne() s_old = (('a', 0), None) s_com = (('a', 5), None) s_new = (('a', 0, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 5, 'd', 3),)) def test__p_resolveConflict_ok_new_updates(self): bucket = self._makeOne() s_old = (('a', 0), None) s_com = (('a', 0, 'd', 3), None) s_new = (('a', 5), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 5, 'd', 3),)) def test__p_resolveConflict_ok_com_inserts_new_adds(self): bucket = self._makeOne() s_old = (('a', 0, 'c', 2), None) s_com = (('a', 0, 'b', 1, 'c', 2), None) s_new = (('a', 0, 'c', 2, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'c', 2, 'd', 3),)) def test__p_resolveConflict_ok_com_adds_new_inserts(self): bucket = self._makeOne() s_old = (('a', 0, 'c', 2), None) s_com = (('a', 0, 'c', 2, 'd', 3), None) s_new = (('a', 0, 'b', 1, 'c', 2), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'c', 2, 'd', 3),)) def test__p_resolveConflict_ok_com_adds_new_deletes(self): bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, 'b', 1, 'c', 2, 'd', 3), None) s_new = (('a', 0, 'e', 4), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'd', 3, 'e', 4),)) def test__p_resolveConflict_ok_com_deletes_new_adds(self): bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'c', 2), None) s_com = (('a', 0, 'e', 4), None) s_new = (('a', 0, 'b', 1, 'c', 2, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'd', 3, 'e', 4),)) def test__p_resolveConflict_ok_both_insert_new_lt_com(self): bucket = self._makeOne() s_old = (('a', 0, 'd', 3), None) s_com = (('a', 0, 'c', 2, 'd', 3), None) s_new = (('a', 0, 'b', 1, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'c', 2, 'd', 3),)) def test__p_resolveConflict_ok_both_insert_new_gt_com(self): bucket = self._makeOne() s_old = (('a', 0, 'd', 3), None) s_com = (('a', 0, 'b', 1, 'd', 3), None) s_new = (('a', 0, 'c', 2, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'c', 2, 'd', 3),)) def test__p_resolveConflict_ok_new_insert_then_com_append(self): bucket = self._makeOne() s_old = (('a', 0, 'd', 3), None) s_com = (('a', 0, 'e', 4), None) s_new = (('a', 0, 'b', 1, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'e', 4),)) def test__p_resolveConflict_ok_com_insert_then_new_append(self): bucket = self._makeOne() s_old = (('a', 0, 'd', 3), None) s_com = (('a', 0, 'b', 1, 'd', 3), None) s_new = (('a', 0, 'e', 4), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'b', 1, 'e', 4),)) def test__p_resolveConflict_ok_new_deletes_tail_com_inserts(self): bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'd', 3), None) s_com = (('a', 0, 'b', 1, 'c', 2, 'd', 3), None) s_new = (('a', 0), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'c', 2),)) def test__p_resolveConflict_ok_com_deletes_tail_new_inserts(self): bucket = self._makeOne() s_old = (('a', 0, 'b', 1, 'd', 3), None) s_com = (('a', 0), None) s_new = (('a', 0, 'b', 1, 'c', 2, 'd', 3), None) result = bucket._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 0, 'c', 2),)) class SetTests(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import Set return Set def _makeOne(self): class _Set(self._getTargetClass()): def _to_key(self, x): return x return _Set() def test_add_not_extant(self): _set = self._makeOne() _set.add('not_extant') self.assertEqual(list(_set), ['not_extant']) def test_add_extant(self): _set = self._makeOne() _set.add('extant') _set.add('extant') self.assertEqual(list(_set), ['extant']) def test_insert(self): _set = self._makeOne() _set.insert('inserted') self.assertEqual(list(_set), ['inserted']) def test_remove_miss(self): _set = self._makeOne() self.assertRaises(KeyError, _set.remove, 'not_extant') def test_remove_extant(self): _set = self._makeOne() _set.add('one') _set.add('another') _set.remove('one') self.assertEqual(list(_set), ['another']) def test_update(self): _set = self._makeOne() _set.update(['one', 'after', 'another']) self.assertEqual(sorted(_set), ['after', 'another', 'one']) def test___getstate___empty_no_next(self): _set = self._makeOne() self.assertEqual(_set.__getstate__(), ((),)) def test___getstate___empty_w_next(self): _set = self._makeOne() _set._next = next_s = self._makeOne() self.assertEqual(_set.__getstate__(), ((), next_s)) def test___getstate___non_empty_no_next(self): _set = self._makeOne() EXPECTED = () for c in 'abcdef': _set.add(c) EXPECTED += (c,) self.assertEqual(_set.__getstate__(), (EXPECTED,)) def test___getstate___non_empty_w_next(self): _set = self._makeOne() _set._next = next_s = self._makeOne() EXPECTED = () for c in 'abcdef': _set.add(c) EXPECTED += (c,) self.assertEqual(_set.__getstate__(), (EXPECTED, next_s)) def test___setstate___w_non_tuple(self): _set = self._makeOne() self.assertRaises(TypeError, _set.__setstate__, (None,)) def test___setstate___w_empty_no_next(self): _set = self._makeOne() _set._next = self._makeOne() for c in 'abcdef': _set.add(c) _set.__setstate__(((),)) self.assertEqual(len(_set), 0) self.assertTrue(_set._next is None) def test___setstate___w_non_empty_w_next(self): _set = self._makeOne() next_s = self._makeOne() ITEMS = () EXPECTED = [] for c in 'abcdef': ITEMS += (c,) EXPECTED.append(c) _set.__setstate__((ITEMS, next_s)) self.assertEqual(sorted(_set), EXPECTED) self.assertTrue(_set._next is next_s) def test___getitem___out_of_bounds(self): _set = self._makeOne() self.assertRaises(IndexError, _set.__getitem__, 1) def test___getitem___hit_bounds(self): _set = self._makeOne() _set.add('b') _set.add('a') _set.add('c') self.assertEqual(_set[0], 'a') self.assertEqual(_set[1], 'b') self.assertEqual(_set[2], 'c') def test__split_empty(self): _set = self._makeOne() next_b = _set._next = self._makeOne() new_b = _set._split() self.assertEqual(len(_set._keys), 0) self.assertEqual(len(new_b._keys), 0) self.assertTrue(_set._next is new_b) self.assertTrue(new_b._next is next_b) def test__split_filled_default_index(self): _set = self._makeOne() next_b = _set._next = self._makeOne() for c in 'abcdef': _set.add(c) new_b = _set._split() self.assertEqual(list(_set._keys), ['a', 'b', 'c']) self.assertEqual(list(new_b._keys), ['d', 'e', 'f']) self.assertTrue(_set._next is new_b) self.assertTrue(new_b._next is next_b) def test__split_filled_explicit_index(self): _set = self._makeOne() next_b = _set._next = self._makeOne() for c in 'abcdef': _set.add(c) new_b = _set._split(2) self.assertEqual(list(_set._keys), ['a', 'b']) self.assertEqual(list(new_b._keys), ['c', 'd', 'e', 'f']) self.assertTrue(_set._next is new_b) self.assertTrue(new_b._next is next_b) def test__p_resolveConflict_x_on_com_next_old_new_None(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() N_NEW = object() s_old = None s_com = ((), N_NEW) s_new = None e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_com_next(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() N_NEW = object() s_old = ((), None) s_com = ((), N_NEW) s_new = ((), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_new_next_old_com_None(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() N_NEW = object() s_old = None s_com = None s_new = ((), N_NEW) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_new_next(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() N_NEW = object() s_old = ((), None) s_com = ((), None) s_new = ((), N_NEW) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 0) def test__p_resolveConflict_x_on_com_empty(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b'), None) s_com = ((), None) s_new = (('a',), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 12) def test__p_resolveConflict_x_on_new_empty(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b'), None) s_com = (('a',), None) s_new = ((), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 12) def test__p_resolveConflict_x_on_del_first_com(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b'), None) s_com = (('b',), None) s_new = (('a', 'b', 'c'), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 13) def test__p_resolveConflict_x_on_del_first_new(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b'), None) s_com = (('a', 'b', 'c'), None) s_new = (('b',), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 13) def test__p_resolveConflict_x_on_ins_same_after_del(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b'), None) s_com = (('a', 'c'), None) s_new = (('a', 'c', 'd'), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 4) def test__p_resolveConflict_x_on_del_same(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a', 'c'), None) s_new = (('a', 'd', 'e'), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 5) def test__p_resolveConflict_x_on_append_same(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a',), None) s_com = (('a', 'b'), None) s_new = (('a', 'b', 'c'), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 6) def test__p_resolveConflict_x_on_new_deletes_all_com_adds(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a', 'd', 'e', 'f'), None) s_new = (('a',), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 7) def test__p_resolveConflict_x_on_com_deletes_all_new_adds(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a',), None) s_new = (('a', 'd', 'e', 'f'), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 8) def test__p_resolveConflict_x_on_com_deletes_all_new_deletes(self): from ..Interfaces import BTreesConflictError _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a',), None) s_new = (('a', 'b'), None) e = self.assertRaises(BTreesConflictError, _set._p_resolveConflict, s_old, s_com, s_new) self.assertEqual(e.reason, 9) def test__p_resolveConflict_ok_insert_in_new_add_in_com(self): _set = self._makeOne() s_old = (('a', 'c'), None) s_com = (('a', 'c', 'd'), None) s_new = (('a', 'b', 'c'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) # Note that _SetBase uses default __getstate__ self.assertEqual(result, (('a', 'b', 'c', 'd'),)) def test__p_resolveConflict_ok_insert_in_com_add_in_new(self): _set = self._makeOne() s_old = (('a', 'c'), None) s_com = (('a', 'b', 'c'), None) s_new = (('a', 'c', 'd'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'c', 'd'),)) def test__p_resolveConflict_ok_delete_in_new_add_in_com(self): _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a', 'b', 'c', 'd'), None) s_new = (('a', 'c'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'c', 'd'),)) def test__p_resolveConflict_ok_delete_in_com_add_in_new(self): _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a', 'c'), None) s_new = (('a', 'b', 'c', 'd'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'c', 'd'),)) def test__p_resolveConflict_ok_add_new_lt_add_com(self): _set = self._makeOne() s_old = (('a',), None) s_com = (('a', 'd'), None) s_new = (('a', 'b', 'c'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'c', 'd'),)) def test__p_resolveConflict_ok_add_com_lt_add_new(self): _set = self._makeOne() s_old = (('a',), None) s_com = (('a', 'b', 'c'), None) s_new = (('a', 'd'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'c', 'd'),)) def test__p_resolveConflict_ok_ins_in_com_del_add_in_new(self): _set = self._makeOne() s_old = (('a', 'c'), None) s_com = (('a', 'b', 'c'), None) s_new = (('a', 'd', 'e'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'd', 'e'),)) def test__p_resolveConflict_ok_ins_in_new_del_add_in_com(self): _set = self._makeOne() s_old = (('a', 'c'), None) s_com = (('a', 'd', 'e'), None) s_new = (('a', 'b', 'c'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'd', 'e'),)) def test__p_resolveConflict_ok_ins_both_new_lt_com(self): _set = self._makeOne() s_old = (('a', 'e'), None) s_com = (('a', 'c', 'd', 'e'), None) s_new = (('a', 'b', 'e'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'c', 'd', 'e'),)) def test__p_resolveConflict_ok_del_new_add_com(self): _set = self._makeOne() s_old = (('a', 'e'), None) s_com = (('a', 'c', 'd', 'e'), None) s_new = (('a',), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'c', 'd'),)) def test__p_resolveConflict_ok_del_com_add_new(self): _set = self._makeOne() s_old = (('a', 'e'), None) s_com = (('a',), None) s_new = (('a', 'c', 'd', 'e'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'c', 'd'),)) def test__p_resolveConflict_add_new_gt_old_com_lt_old(self): _set = self._makeOne() s_old = (('a', 'b', 'c'), None) s_com = (('a', 'b', 'bb', 'c'), None) s_new = (('a', 'b', 'c', 'd'), None) result = _set._p_resolveConflict(s_old, s_com, s_new) self.assertEqual(result, (('a', 'b', 'bb', 'c', 'd'),)) class Test_TreeItem(unittest.TestCase): def _getTargetClass(self): from .._base import _TreeItem return _TreeItem def _makeOne(self, key, child): return self._getTargetClass()(key, child) def test_ctor(self): child = object() item = self._makeOne('key', child) self.assertEqual(item.key, 'key') self.assertTrue(item.child is child) class Test_Tree(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import _Tree return _Tree def _makeOne(self, items=None, bucket_type=None): from .._base import Bucket from .._datatypes import Any from .._datatypes import O if bucket_type is None: class _Bucket(Bucket): _to_key = O() bucket_type = _Bucket class _Test(self._getTargetClass()): _to_key = O() _to_value = Any() _bucket_type = bucket_type max_leaf_size = 10 max_internal_size = 15 return _Test(items) def test_setdefault_miss(self): tree = self._makeOne() value = object() self.assertTrue(tree.setdefault('non_extant', value) is value) self.assertTrue('non_extant' in tree) self.assertTrue(tree._findbucket('non_extant')['non_extant'] is value) def test_setdefault_hit(self): tree = self._makeOne() value1 = object() value2 = object() tree['extant'] = value1 self.assertTrue(tree.setdefault('extant', value2) is value1) self.assertTrue('extant' in tree) self.assertTrue(tree._findbucket('extant')['extant'] is value1) def test_pop_miss_no_default(self): tree = self._makeOne() self.assertRaises(KeyError, tree.pop, 'nonesuch') def test_pop_miss_w_default(self): default = object() tree = self._makeOne() self.assertTrue(tree.pop('nonesuch', default) is default) def test_pop_hit(self): tree = self._makeOne() value = object() tree['extant'] = value self.assertTrue(tree.pop('extant', value) is value) self.assertFalse('extant' in tree) def test_update_value_w_iteritems(self): tree = self._makeOne() tree.update({'a': 'b'}) self.assertEqual(tree._findbucket('a')['a'], 'b') def test_update_value_w_items(self): tree = self._makeOne() class Foo: def items(self): return [('a', 'b')] tree.update(Foo()) self.assertEqual(tree._findbucket('a')['a'], 'b') def test_update_value_w_invalid_items(self): tree = self._makeOne() class Foo: def items(self): return ('a', 'b', 'c') self.assertRaises(TypeError, tree.update, Foo()) def test_update_sequence(self): tree = self._makeOne() tree.update([('a', 'b')]) self.assertEqual(tree._findbucket('a')['a'], 'b') def test_update_replacing(self): tree = self._makeOne() tree['a'] = 'b' tree.update([('a', 'c')]) self.assertEqual(tree._findbucket('a')['a'], 'c') def test___setitem___incomparable(self): tree = self._makeOne() def _should_error(): tree[object()] = 'b' self.assertRaises(TypeError, _should_error) def test___delitem___miss(self): tree = self._makeOne() def _should_error(): del tree['a'] self.assertRaises(KeyError, _should_error) def test___delitem___hit(self): tree = self._makeOne() tree['a'] = 'b' del tree['a'] self.assertFalse('a' in tree) def test_clear(self): tree = self._makeOne() tree['a'] = 'b' tree.clear() self.assertFalse('a' in tree) self.assertEqual(tree._firstbucket, None) def test___nonzero___empty(self): tree = self._makeOne() self.assertFalse(tree) def test___nonzero___nonempty(self): tree = self._makeOne() tree['a'] = 'b' self.assertTrue(tree) def test___len__empty(self): tree = self._makeOne() self.assertEqual(len(tree), 0) def test___len__nonempty(self): tree = self._makeOne() tree['a'] = 'b' self.assertEqual(len(tree), 1) def test___len__nonempty_multiple_buckets(self): tree = self._makeOne() for i in range(100): tree[str(i)] = i self.assertEqual(len(tree), 100) def test_size_empty(self): tree = self._makeOne() self.assertEqual(tree.size, 0) def test_size_nonempty(self): tree = self._makeOne() tree['a'] = 'b' self.assertEqual(tree.size, 1) def test_size_nonempty_multiple_buckets(self): tree = self._makeOne() for i in range(100): tree[str(i)] = i b_count = 0 bucket = tree._firstbucket while bucket is not None: b_count += 1 bucket = bucket._next self.assertEqual(tree.size, b_count) def test__search_empty(self): tree = self._makeOne() self.assertEqual(tree._search('nonesuch'), -1) def test__search_miss_high(self): tree = self._makeOne() for i in range(100): tree[float(i)] = i b_count = 0 bucket = tree._firstbucket while bucket is not None: b_count += 1 bucket = bucket._next self.assertEqual(tree.size, b_count) self.assertEqual(tree._search(99.5), b_count - 1) def test__search_miss_low(self): tree = self._makeOne() for i in range(100): tree[float(i)] = i self.assertEqual(tree._search(0.1), 0) def test__search_miss_between(self): tree = self._makeOne() for i in range(100): tree[float(i)] = i self.assertEqual(tree._search(1.5), 0) def test__search_hit(self): tree = self._makeOne() for i in range(100): tree[float(i)] = i key = tree._data[1].key self.assertEqual(tree._search(key), 1) def test__find_bucket_low(self): tree = self._makeOne() for i in range(1000): tree[float(i)] = i self.assertTrue(tree._findbucket(0.1) is tree._firstbucket) def test__find_bucket_high(self): tree = self._makeOne() for i in range(1000): tree[float(i)] = i bucket = tree._firstbucket while bucket._next is not None: bucket = bucket._next self.assertTrue(tree._findbucket(999.5) is bucket) def test___contains___empty(self): tree = self._makeOne() self.assertFalse('nonesuch' in tree) def test___contains___miss(self): tree = self._makeOne() for i in range(1000): tree[float(i)] = i self.assertFalse(1000.0 in tree) def test___contains___hit(self): tree = self._makeOne() keys = [] for i in range(1000): key = float(i) tree[key] = i keys.append(key) for key in keys: self.assertTrue(key in tree) def test_has_key_empty(self): tree = self._makeOne() self.assertFalse(tree.has_key('nonesuch')) def test_has_key_miss(self): tree = self._makeOne() for i in range(1000): tree[float(i)] = i self.assertFalse(tree.has_key(1000.0)) def test_has_key_hit(self): tree = self._makeOne() KEYS = [] for i in range(1000): key = float(i) tree[key] = i KEYS.append(key) for key in KEYS: # XXX should we be testing for the 'depth' value? self.assertTrue(tree.has_key(key)) def test_keys_defaults_empty(self): tree = self._makeOne() self.assertEqual(list(tree.keys()), []) def test_keys_defaults_filled(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys()), KEYS[:]) def test_keys_defaults_exclude_min(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(excludemin=True)), KEYS[1: 5]) def test_keys_defaults_exclude_max(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(excludemax=True)), KEYS[0: 4]) def test_keys_w_min_hit(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(min='bravo')), KEYS[1: 5]) def test_keys_w_min_miss(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(min='candy')), KEYS[2: 5]) def test_keys_w_min_hit_w_exclude_min(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(min='bravo', excludemin=True)), KEYS[2: 5]) def test_keys_w_min_miss_w_exclude_min(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() # 'excludemin' doesn't fire on miss self.assertEqual(list(tree.keys(min='candy', excludemin=True)), KEYS[2: 5]) def test_keys_w_max_hit(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(max='delta')), KEYS[0: 4]) def test_keys_w_max_miss(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(max='dandy')), KEYS[0: 3]) def test_keys_w_max_hit_w_exclude_max(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.keys(max='delta', excludemax=True)), KEYS[0: 3]) def test_keys_w_max_miss_w_exclude_max(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() # 'excludemax' doesn't fire on miss self.assertEqual(list(tree.keys(max='dandy', excludemax=True)), KEYS[0: 3]) def test_iterkeys(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree.iterkeys()), KEYS) def test___iter__(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(list(tree), KEYS) def test_minKey_empty(self): tree = self._makeOne() self.assertRaises(ValueError, tree.minKey) def test_minKey_filled_default(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(tree.minKey(), KEYS[0]) def test_minKey_filled_explicit_hit(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(tree.minKey(min='bravo'), 'bravo') def test_minKey_filled_explicit_miss(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(tree.minKey(min='basso'), 'bravo') def test_maxKey_empty(self): tree = self._makeOne() self.assertRaises(ValueError, tree.maxKey) def test_maxKey_filled_default(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(tree.maxKey(), 'echo') def test_maxKey_filled_explicit_hit(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(tree.maxKey('bravo'), 'bravo') def test_maxKey_filled_explicit_miss(self): tree = self._makeOne() KEYS = ['alpha', 'bravo', 'charlie', 'delta', 'echo'] for key in KEYS: tree[key] = key.upper() self.assertEqual(tree.maxKey('candy'), 'bravo') def test__set_calls_readCurrent_on_jar(self): tree = self._makeOne() tree._p_oid = b'OID' tree._p_serial = b'01234567' tree._p_jar = jar = _Jar() tree._set('a', 'b') self.assertTrue(tree in jar._current) def test__split_empty(self): tree = self._makeOne() self.assertRaises(IndexError, tree._split) def test__split_filled_empties_original(self): tree = self._makeOne() for i, c in enumerate('abcdef'): tree[c] = i fb = tree._firstbucket new_t = tree._split() self.assertEqual(list(tree), []) self.assertTrue(tree._firstbucket is None) self.assertEqual(list(new_t), ['a', 'b', 'c', 'd', 'e', 'f']) self.assertTrue(new_t._firstbucket is fb) def test__split_filled_divides_original(self): tree = self._makeOne() LETTERS = 'abcdefghijklmnopqrstuvwxyz' for i, c in enumerate(LETTERS): tree[c] = i fb = tree._firstbucket new_t = tree._split() # Note that original tree still links to split buckets self.assertEqual(''.join(list(tree)), LETTERS) self.assertTrue(tree._firstbucket is fb) self.assertEqual(''.join(list(new_t)), LETTERS[10:]) self.assertFalse(new_t._firstbucket is fb) def test__split_filled_divides_deeper(self): tree = self._makeOne() KEYS = [] FMT = '%05d' for i in range(1000): key = FMT % i tree[key] = i KEYS.append(key) fb = tree._firstbucket new_t = tree._split(tree.max_internal_size - 2) # Note that original tree still links to split buckets self.assertEqual(list(tree), KEYS) self.assertTrue(tree._firstbucket is fb) new_min = new_t.minKey() self.assertEqual(list(new_t), KEYS[int(new_min):]) self.assertFalse(new_t._firstbucket is fb) def test__del_calls_readCurrent_on_jar(self): tree = self._makeOne({'a': 'b'}) tree._p_oid = b'OID' tree._p_serial = b'01234567' tree._p_jar = jar = _Jar() tree._del('a') self.assertTrue(tree in jar._current) def test__del_miss(self): tree = self._makeOne({'a': 'b'}) self.assertRaises(KeyError, tree._del, 'nonesuch') def test__del_fixes_up_node_key(self): SOURCE = {'%05d' % i: i for i in range(1000)} tree = self._makeOne(SOURCE) before = tree._data[1].key del tree[before] after = tree._data[1].key self.assertTrue(after > before) def test__del_empties_first_bucket_not_zeroth_item(self): SOURCE = {'%05d' % i: i for i in range(1000)} tree = self._makeOne(SOURCE) bucket = tree._data[1].child._firstbucket next_b = bucket._next for key in list(bucket): # don't del while iterting del tree[key] self.assertTrue(tree._data[1].child._firstbucket is next_b) def test__del_empties_first_bucket_zeroth_item(self): SOURCE = {'%05d' % i: i for i in range(1000)} tree = self._makeOne(SOURCE) bucket = tree._data[0].child._firstbucket next_b = bucket._next for key in list(bucket): # don't del while iterting del tree[key] self.assertTrue(tree._data[0].child._firstbucket is next_b) self.assertTrue(tree._firstbucket is next_b) def test__del_empties_other_bucket_not_zeroth_item(self): SOURCE = {'%05d' % i: i for i in range(1000)} tree = self._makeOne(SOURCE) bucket = tree._data[1].child._firstbucket._next next_b = bucket._next for key in list(bucket): # don't del while iterting del tree[key] self.assertTrue(tree._data[1].child._firstbucket._next is next_b) def test___getstate___empty(self): tree = self._makeOne() self.assertEqual(tree.__getstate__(), None) def test___getstate___single_bucket_wo_oid(self): tree = self._makeOne({'a': 'b'}) self.assertEqual(tree.__getstate__(), (((('a', 'b'),),),)) def test___getstate___single_bucket_w_oid(self): tree = self._makeOne({'a': 'b'}) bucket = tree._firstbucket jar = _Jar() bucket._p_jar = jar bucket._p_oid = b'OID' self.assertEqual(tree.__getstate__(), ((bucket,), bucket)) def test___getstate___multiple_buckets(self): tree = self._makeOne() FMT = '%05d' for i in range(1000): key = FMT % i tree[key] = i bucket = tree._firstbucket EXPECTED = (tree._data[0].child,) for item in tree._data[1:]: EXPECTED += (item.key, item.child) self.assertEqual(tree.__getstate__(), (EXPECTED, bucket)) def test___setstate___invalid(self): tree = self._makeOne() self.assertRaises(TypeError, tree.__setstate__, ('a', 'b')) def test___setstate___to_empty(self): tree = self._makeOne({'a': 'b'}) tree.__setstate__(None) self.assertEqual(len(tree), 0) def test___setstate___to_single_bucket_wo_oid(self): tree = self._makeOne() tree.__setstate__((((('a', 'b'),),),)) self.assertEqual(list(tree.keys()), ['a']) self.assertEqual(tree._findbucket('a')['a'], 'b') self.assertTrue(len(tree._data), 1) self.assertTrue(tree._data[0].child is tree._firstbucket) self.assertTrue(tree._firstbucket._p_oid is None) def test___setstate___to_multiple_buckets(self): from .._base import Bucket class _Bucket(Bucket): def _to_key(self, x): return x tree = self._makeOne(bucket_type=_Bucket) b1 = _Bucket({'a': 0, 'b': 1}) b2 = _Bucket({'c': 2, 'd': 3}) b1._next = b2 tree.__setstate__(((b1, 'c', b2), b1)) self.assertEqual(list(tree.keys()), ['a', 'b', 'c', 'd']) self.assertTrue(len(tree._data), 2) self.assertEqual(tree._data[0].key, None) self.assertEqual(tree._data[0].child, b1) self.assertEqual(tree._data[1].key, 'c') self.assertEqual(tree._data[1].child, b2) self.assertTrue(tree._firstbucket is b1) def test__check_empty_wo_firstbucket(self): tree = self._makeOne() tree._check() # no raise def test__check_empty_w_firstbucket(self): tree = self._makeOne() tree._firstbucket = object() e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "Empty BTree has non-NULL firstbucket") def test__check_nonempty_wo_firstbucket(self): tree = self._makeOne({'a': 'b'}) tree._firstbucket = None e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "Non-empty BTree has NULL firstbucket") def test__check_nonempty_w_null_child(self): tree = self._makeOne({'a': 'b'}) tree._data.append(tree._data[0].__class__('c', None)) e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "BTree has NULL child") def test__check_nonempty_w_heterogenous_child(self): class Other: pass tree = self._makeOne({'a': 'b'}) tree._data.append(tree._data[0].__class__('c', Other())) e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "BTree children have different types") def test__check_nonempty_w_empty_child(self): tree = self._makeOne({'a': 'b'}) first = tree._data[0] tree._data.append(first.__class__('c', first.child.__class__())) e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "Bucket length < 1") def test__check_branch_w_mismatched_firstbucket(self): tree = self._makeOne() c_tree = tree.__class__({'a': 'b'}) c_first = c_tree._data[0] tree._data.append(c_first.__class__('a', c_tree)) tree._firstbucket = object() e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "BTree has firstbucket different than " "its first child's firstbucket") def test__check_nonempty_w_invalid_child(self): class Invalid: size = 2 tree = self._makeOne({'a': 'b'}) tree._data[0].child = Invalid() e = self.assertRaises(AssertionError, tree._check) self.assertEqual(str(e), "Incorrect child type") def test__check_branch_traverse_bucket_pointers(self): tree = self._makeOne() t_first = tree.__class__({'a': 'b'}) c_first = t_first._data[0] b_first = c_first.child t_second = tree.__class__({'c': 'd'}) b_first._next = t_second._firstbucket tree._data.append(c_first.__class__('a', t_first)) tree._data.append(c_first.__class__('c', t_second)) tree._firstbucket = t_first._firstbucket tree._check() # no raise def test__check_nonempty_leaf_traverse_bucket_pointers(self): tree = self._makeOne({'a': 'b'}) first = tree._data[0] first.child._next = b2 = first.child.__class__({'c': 'd'}) tree._data.append(first.__class__('c', b2)) tree._check() # no raise def test__p_resolveConflict_invalid_state_non_tuple(self): tree = self._makeOne() INVALID = [] EMPTY = None DEGEN = (((('a', 'b'),),),) self.assertRaises(TypeError, tree._p_resolveConflict, INVALID, EMPTY, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, INVALID, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, DEGEN, INVALID) def test__p_resolveConflict_non_degenerate_state(self): from ..Interfaces import BTreesConflictError tree = self._makeOne() FIRST = object() NON_DEGEN = ((FIRST, 'a', object(), 'b', object()), FIRST) EMPTY = None DEGEN = (((('a', 'b'),),),) e = self.assertRaises(BTreesConflictError, tree._p_resolveConflict, NON_DEGEN, EMPTY, DEGEN) self.assertEqual(e.reason, 11) e = self.assertRaises(BTreesConflictError, tree._p_resolveConflict, EMPTY, NON_DEGEN, DEGEN) self.assertEqual(e.reason, 11) e = self.assertRaises(BTreesConflictError, tree._p_resolveConflict, EMPTY, DEGEN, NON_DEGEN) self.assertEqual(e.reason, 11) def test__p_resolveConflict_invalid_state_non_1_tuple(self): tree = self._makeOne() INVALID = ('a', 'b', 'c') EMPTY = None DEGEN = (((('a', 'b'),),),) self.assertRaises(TypeError, tree._p_resolveConflict, INVALID, EMPTY, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, INVALID, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, DEGEN, INVALID) def test__p_resolveConflict_invalid_state_nested_non_tuple(self): tree = self._makeOne() INVALID = ([],) EMPTY = None DEGEN = (((('a', 'b'),),),) self.assertRaises(TypeError, tree._p_resolveConflict, INVALID, EMPTY, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, INVALID, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, DEGEN, INVALID) def test__p_resolveConflict_invalid_state_nested_non_1_tuple(self): tree = self._makeOne() INVALID = (('a', 'b', 'c'),) EMPTY = None DEGEN = (((('a', 'b'),),),) self.assertRaises(TypeError, tree._p_resolveConflict, INVALID, EMPTY, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, INVALID, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, DEGEN, INVALID) def test__p_resolveConflict_invalid_state_nested2_non_tuple(self): tree = self._makeOne() INVALID = (([],),) EMPTY = None DEGEN = (((('a', 'b'),),),) self.assertRaises(TypeError, tree._p_resolveConflict, INVALID, EMPTY, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, INVALID, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, DEGEN, INVALID) def test__p_resolveConflict_invalid_state_nested2_non_1_tuple(self): tree = self._makeOne() INVALID = ((('a', 'b', 'c'),)) EMPTY = None DEGEN = (((('a', 'b'),),),) self.assertRaises(TypeError, tree._p_resolveConflict, INVALID, EMPTY, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, INVALID, DEGEN) self.assertRaises(TypeError, tree._p_resolveConflict, EMPTY, DEGEN, INVALID) def test__p_resolveConflict_w_degenerate_state(self): tree = self._makeOne() OLD = (((('a', 'b', 'c', 'd'),),),) COM = (((('a', 'b', 'c', 'd', 'e', 'f'),),),) NEW = (((('a', 'b'),),),) resolved = tree._p_resolveConflict(OLD, COM, NEW) self.assertEqual(resolved, (((('a', 'b', 'e', 'f'),),),)) class Test_TreeItems(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import _TreeItems return _TreeItems def _makeOne(self, firstbucket, itertype, iterargs): return self._getTargetClass()(firstbucket, itertype, iterargs) def _makeBucket(self, items=None): from .._base import Bucket class _Bucket(Bucket): def _to_key(self, k): return k return _Bucket(items) def test___getitem___w_slice(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iterkeys', ()) self.assertEqual(list(ti[0:3]), ['a', 'b', 'c']) def test___getitem___w_negative_index_le_minus_length(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iterkeys', ()) def _should_error(): return ti[-27] self.assertRaises(IndexError, _should_error) def test___getitem___w_index_gt_length(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iterkeys', ()) def _should_error(): return ti[27] self.assertRaises(IndexError, _should_error) def test___getitem___w_index_smaller_than_cursor(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iterkeys', ()) ti[12] self.assertEqual(ti[1], 'b') def test___len__(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iterkeys', ()) self.assertEqual(len(ti), 26) # short-circuit on second pass self.assertEqual(len(ti), 26) def test___iter___w_iterkeys(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iterkeys', ()) self.assertEqual(list(ti), [x[0] for x in ITEMS]) def test___iter___w_iteritems(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'iteritems', ()) self.assertEqual(list(ti), ITEMS) def test___iter___w_itervalues(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket = self._makeBucket(ITEMS) ti = self._makeOne(bucket, 'itervalues', ()) self.assertEqual(list(ti), [x[1] for x in ITEMS]) def test___iter___w_empty_last_bucket(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] bucket1 = self._makeBucket(ITEMS) ti = self._makeOne(bucket1, 'iterkeys', ()) self.assertEqual(list(ti), [x[0] for x in ITEMS]) class TreeTests(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import Tree return Tree def _makeOne(self, items=None): from .._base import Bucket class _Bucket(Bucket): def _to_key(self, k): return k class _Test(self._getTargetClass()): _to_key = _to_value = lambda self, x: x _bucket_type = _Bucket max_leaf_size = 10 max_internal_size = 15 return _Test(items) def test_get_empty_miss(self): tree = self._makeOne() self.assertEqual(tree.get('nonesuch'), None) def test_get_empty_miss_w_default(self): DEFAULT = object() tree = self._makeOne() self.assertTrue(tree.get('nonesuch', DEFAULT) is DEFAULT) def test_get_filled_miss(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(tree.get('nonesuch'), None) def test_get_filled_miss_w_default(self): DEFAULT = object() ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertTrue(tree.get('nonesuch', DEFAULT) is DEFAULT) def test_get_filled_hit(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(tree.get('a'), 0) def test___getitem___empty_miss(self): tree = self._makeOne() def _should_error(): return tree['nonesuch'] self.assertRaises(KeyError, _should_error) def test___getitem___filled_miss(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) def _should_error(): return tree['nonesuch'] self.assertRaises(KeyError, _should_error) def test___getitem___filled_hit(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(tree['a'], 0) def test_values_empty_no_args(self): tree = self._makeOne() self.assertEqual(list(tree.values()), []) def test_values_filled_no_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.values()), list(range(26))) def test_values_filled_w_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.values(min='b', excludemin=True, max='f', excludemax=True)), [2, 3, 4]) def test_itervalues_empty_no_args(self): tree = self._makeOne() self.assertEqual(list(tree.itervalues()), []) def test_itervalues_filled_no_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.itervalues()), list(range(26))) def test_itervalues_filled_w_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual( list( tree.itervalues( min='b', excludemin=True, max='f', excludemax=True, ) ), [2, 3, 4], ) def test_items_empty_no_args(self): tree = self._makeOne() self.assertEqual(list(tree.items()), []) def test_items_filled_no_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.items()), ITEMS) def test_items_filled_w_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual( list( tree.items( min='b', excludemin=True, max='f', excludemax=True, ) ), ITEMS[2:5] ) def test_iteritems_empty_no_args(self): tree = self._makeOne() self.assertEqual(list(tree.iteritems()), []) def test_iteritems_filled_no_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.iteritems()), ITEMS) def test_iteritems_filled_w_args(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.iteritems(min='b', excludemin=True, max='f', excludemax=True)), ITEMS[2:5]) def test_byValue(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertEqual(list(tree.byValue(min=22)), [(y, x) for x, y in reversed(ITEMS[22:])]) def test_insert_new_key(self): tree = self._makeOne() self.assertTrue(tree.insert('a', 0)) self.assertEqual(tree['a'], 0) def test_insert_would_change_key(self): ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')] tree = self._makeOne(ITEMS) self.assertFalse(tree.insert('a', 1)) self.assertEqual(tree['a'], 0) class TreeSetTests(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import TreeSet return TreeSet def _makeOne(self, items=None): from .._base import Bucket class _Bucket(Bucket): def _to_key(self, k): return k class _Test(self._getTargetClass()): _to_key = _to_value = lambda self, x: x _bucket_type = _Bucket max_leaf_size = 10 max_internal_size = 15 return _Test(items) def test_add_new_key(self): _set = self._makeOne() self.assertTrue(_set.add('a')) self.assertTrue('a' in _set) def test_add_existing_key(self): _set = self._makeOne() _set.add('a') self.assertFalse(_set.add('a')) def test_remove_miss(self): _set = self._makeOne() self.assertRaises(KeyError, _set.remove, 'a') def test_remove_hit(self): _set = self._makeOne() _set.add('a') self.assertEqual(_set.remove('a'), None) self.assertFalse('a' in _set) def test_update_empty_sequence(self): _set = self._makeOne() _set.update(()) self.assertEqual(len(_set), 0) def test_update_simple_sequence(self): _set = self._makeOne() LETTERS = 'abcdefghijklmnopqrstuvwxyz' _set.update(LETTERS) self.assertEqual(len(_set), len(LETTERS)) for letter in LETTERS: self.assertTrue(letter in _set) def test_update_mppaing(self): _set = self._makeOne() LETTERS = 'abcdefghijklmnopqrstuvwxyz' a_dict = {y: x for x, y in enumerate(LETTERS)} _set.update(a_dict) self.assertEqual(len(_set), len(LETTERS)) for letter in LETTERS: self.assertTrue(letter in _set) class Test_set_operation(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from .._base import set_operation return set_operation def _makeOne(self, func, set_type): return self._getTargetClass()(func, set_type) def test_it(self): class _SetType: pass _called_with = [] def _func(*args, **kw): _called_with.append((args, kw)) set_op = self._makeOne(_func, _SetType) set_op('a', b=1) self.assertEqual(_called_with, [((_SetType, 'a',), {'b': 1})]) class _SetObBase: def _makeSet(self, *args): return _Set(*args) def _makeMapping(self, *args, **kw): return _Mapping(*args, **kw) class Test_difference(unittest.TestCase, _SetObBase): def _callFUT(self, *args, **kw): from .._base import difference return difference(*args, **kw) def test_lhs_none(self): rhs = self._makeSet('a', 'b', 'c') self.assertEqual(self._callFUT(rhs.__class__, None, rhs), None) def test_rhs_none(self): lhs = self._makeSet('a', 'b', 'c') self.assertEqual(self._callFUT(lhs.__class__, lhs, None), lhs) def test_both_sets_rhs_empty(self): lhs = self._makeSet('a', 'b', 'c') rhs = self._makeSet() result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), list(lhs)) def test_both_sets_lhs_empty(self): lhs = self._makeSet() rhs = self._makeSet('a', 'b', 'c') result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), list(lhs)) def test_lhs_set_rhs_mapping(self): lhs = self._makeSet('a', 'b', 'c') rhs = self._makeMapping({'a': 13, 'b': 12}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['c']) def test_lhs_mapping_rhs_set(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeSet('a', 'b') result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['c']) self.assertEqual(result['c'], 11) def test_both_mappings_rhs_empty(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeMapping({}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['a', 'b', 'c']) self.assertEqual(result['a'], 13) self.assertEqual(result['b'], 12) self.assertEqual(result['c'], 11) def test_both_mappings_rhs_non_empty(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11, 'f': 10}) rhs = self._makeMapping({'b': 22, 'e': 37}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['a', 'c', 'f']) self.assertEqual(result['a'], 13) self.assertEqual(result['c'], 11) self.assertEqual(result['f'], 10) class Test_union(unittest.TestCase, _SetObBase): def _callFUT(self, *args, **kw): from .._base import union return union(*args, **kw) def test_lhs_none(self): rhs = self._makeSet('a', 'b', 'c') self.assertEqual(self._callFUT(rhs.__class__, None, rhs), rhs) def test_rhs_none(self): lhs = self._makeSet('a', 'b', 'c') self.assertEqual(self._callFUT(lhs.__class__, lhs, None), lhs) def test_both_sets_rhs_empty(self): lhs = self._makeSet('a', 'b', 'c') rhs = self._makeSet() result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), list(lhs)) def test_both_sets_lhs_empty(self): lhs = self._makeSet() rhs = self._makeSet('a', 'b', 'c') result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), list(rhs)) def test_lhs_set_rhs_mapping(self): lhs = self._makeSet('a', 'b', 'c') rhs = self._makeMapping({'a': 13, 'd': 12}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['a', 'b', 'c', 'd']) def test_lhs_mapping_rhs_set(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeSet('a', 'd') result = self._callFUT(lhs._set_type, lhs, rhs) self.assertIsInstance(result, _Set) self.assertEqual(list(result), ['a', 'b', 'c', 'd']) def test_both_mappings_rhs_empty(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeMapping({}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['a', 'b', 'c']) def test_both_mappings_rhs_non_empty(self): lhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) rhs = self._makeMapping({'b': 22, 'd': 33}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['a', 'b', 'c', 'd', 'e']) class Test_intersection(unittest.TestCase, _SetObBase): def _callFUT(self, *args, **kw): from .._base import intersection return intersection(*args, **kw) def test_lhs_none(self): rhs = self._makeSet(('a', 'b', 'c')) self.assertEqual(self._callFUT(rhs.__class__, None, rhs), rhs) def test_rhs_none(self): lhs = self._makeSet(('a', 'b', 'c')) self.assertEqual(self._callFUT(lhs.__class__, lhs, None), lhs) def test_both_sets_rhs_empty(self): lhs = self._makeSet('a', 'b', 'c') rhs = self._makeSet() result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), []) def test_both_sets_lhs_empty(self): lhs = self._makeSet() rhs = self._makeSet('a', 'b', 'c') result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), []) def test_lhs_set_rhs_mapping(self): lhs = self._makeSet('a', 'b', 'c') rhs = self._makeMapping({'a': 13, 'd': 12}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['a']) def test_lhs_mapping_rhs_set(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeSet('a', 'd') result = self._callFUT(lhs._set_type, lhs, rhs) self.assertIsInstance(result, _Set) self.assertEqual(list(result), ['a']) def test_both_mappings_rhs_empty(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeMapping({}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), []) def test_both_mappings_rhs_non_empty(self): lhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) rhs = self._makeMapping({'b': 22, 'c': 44, 'd': 33}) result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(list(result), ['c']) class Test_weightedUnion(unittest.TestCase, _SetObBase): def _callFUT(self, *args, **kw): from .._base import weightedUnion return weightedUnion(*args, **kw) def test_both_none(self): self.assertEqual(self._callFUT(_Mapping, None, None), (0, None)) def test_lhs_none(self): rhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) self.assertEqual(self._callFUT(rhs.__class__, None, rhs), (1, rhs)) def test_rhs_none(self): lhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) self.assertEqual(self._callFUT(lhs.__class__, lhs, None), (1, lhs)) def test_both_mappings_but_no_merge(self): lhs = {'a': 13, 'b': 12, 'c': 11} rhs = {'b': 22, 'd': 14} self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_set_wo_MERGE_DEFAULT_rhs_set(self): lhs = self._makeSet('a', 'd') lhs.MERGE = lambda v1, w1, v2, w2: (v1 * w1) + (v2 * w2) lhs.MERGE_WEIGHT = lambda v, w: v lhs._mapping_type = _Mapping rhs = self._makeSet('a', 'b', 'c') self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_mapping_wo_MERGE_DEFAULT_rhs_set(self): class _MappingWoDefault(dict): def MERGE(self, v1, w1, v2, w2): return (v1 * w1) + (v2 * w2) def MERGE_WEIGHT(self, v, w): return v lhs = _MappingWoDefault({'a': 13, 'b': 12, 'c': 11}) lhs._mapping_type = _MappingWoDefault rhs = self._makeSet('a', 'b', 'c') self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_mapping_wo_MERGE_rhs_mapping(self): class _MappingWoMerge(dict): def MERGE_DEFAULT(self): return 1 def MERGE_WEIGHT(self, v, w): return v lhs = _MappingWoMerge({'a': 13, 'b': 12, 'c': 11}) lhs._mapping_type = _MappingWoMerge rhs = self._makeMapping({'a': 1, 'b': 2, 'c': 3}) self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_set_wo_MERGE_DEFAULT_rhs_mapping(self): lhs = self._makeSet('a', 'd') lhs.MERGE = lambda v1, w1, v2, w2: (v1 * w1) + (v2 * w2) lhs.MERGE_WEIGHT = lambda v, w: v lhs._mapping_type = _Mapping rhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_mergeable_set_rhs_mapping(self): lhs = self._makeSet('a', 'd') lhs.MERGE = lambda v1, w1, v2, w2: (v1 * w1) + (v2 * w2) lhs.MERGE_WEIGHT = lambda v, w: v lhs.MERGE_DEFAULT = 1 lhs._mapping_type = _Mapping rhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertTrue(isinstance(result, _Mapping)) self.assertEqual(list(result), ['a', 'b', 'c', 'd']) self.assertEqual(result['a'], 14) self.assertEqual(result['b'], 12) self.assertEqual(result['c'], 11) self.assertEqual(result['d'], 1) def test_lhs_mapping_rhs_set(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeSet('a', 'd') weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertTrue(isinstance(result, _Mapping)) self.assertEqual(list(result), ['a', 'b', 'c', 'd']) self.assertEqual(result['a'], 55) self.assertEqual(result['b'], 12) self.assertEqual(result['c'], 11) self.assertEqual(result['d'], 42) def test_both_mappings_rhs_empty(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeMapping({}) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertEqual(list(result), ['a', 'b', 'c']) self.assertEqual(result['a'], 13) self.assertEqual(result['b'], 12) self.assertEqual(result['c'], 11) def test_both_mappings_rhs_non_empty(self): lhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) rhs = self._makeMapping({'a': 10, 'b': 22, 'd': 33}) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertEqual(list(result), ['a', 'b', 'c', 'd', 'e']) self.assertEqual(result['a'], 23) self.assertEqual(result['b'], 22) self.assertEqual(result['c'], 12) self.assertEqual(result['d'], 33) self.assertEqual(result['e'], 11) def test_w_lhs_Set_rhs_Set(self): from BTrees.IIBTree import IISetPy lhs = IISetPy([1, 2, 3]) rhs = IISetPy([1, 4]) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertEqual(list(result), [1, 2, 3, 4]) # TODO: test non-default weights class Test_weightedIntersection(unittest.TestCase, _SetObBase): def _callFUT(self, *args, **kw): from .._base import weightedIntersection return weightedIntersection(*args, **kw) def test_both_none(self): self.assertEqual(self._callFUT(_Mapping, None, None), (0, None)) def test_lhs_none(self): rhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) self.assertEqual(self._callFUT(rhs.__class__, None, rhs), (1, rhs)) def test_rhs_none(self): lhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) self.assertEqual(self._callFUT(lhs.__class__, lhs, None), (1, lhs)) def test_both_mappings_but_no_merge(self): lhs = {'a': 13, 'b': 12, 'c': 11} rhs = {'b': 22, 'd': 14} self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_mapping_wo_MERGE_rhs_mapping(self): class _MappingWoMerge(dict): def MERGE_DEFAULT(self): return 1 def MERGE_WEIGHT(self, v, w): return v lhs = _MappingWoMerge({'a': 13, 'b': 12, 'c': 11}) lhs._mapping_type = _MappingWoMerge rhs = self._makeMapping({'a': 1, 'b': 2, 'c': 3}) self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_set_wo_MERGE_DEFAULT_rhs_set(self): lhs = self._makeSet('a', 'd') lhs.MERGE = lambda v1, w1, v2, w2: (v1 * w1) + (v2 * w2) lhs.MERGE_WEIGHT = lambda v, w: v lhs._mapping_type = _Mapping rhs = self._makeSet('a', 'b', 'c') self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_set_wo_MERGE_DEFAULT_rhs_mapping(self): lhs = self._makeSet('a', 'd') lhs.MERGE = lambda v1, w1, v2, w2: (v1 * w1) + (v2 * w2) lhs.MERGE_WEIGHT = lambda v, w: v lhs._mapping_type = _Mapping rhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) self.assertRaises(TypeError, self._callFUT, lhs.__class__, lhs, rhs) def test_lhs_mapping_rhs_set(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeSet('a', 'd') weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertTrue(isinstance(result, _Mapping)) self.assertEqual(list(result), ['a']) self.assertEqual(result['a'], 55) def test_both_mappings_rhs_empty(self): lhs = self._makeMapping({'a': 13, 'b': 12, 'c': 11}) rhs = self._makeMapping({}) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertEqual(list(result), []) def test_both_mappings_rhs_non_empty(self): lhs = self._makeMapping({'a': 13, 'c': 12, 'e': 11}) rhs = self._makeMapping({'a': 10, 'b': 22, 'd': 33}) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 1) self.assertEqual(list(result), ['a']) self.assertEqual(result['a'], 23) def test_w_lhs_Set_rhs_Set(self): from BTrees.IIBTree import IISetPy lhs = IISetPy([1, 2, 3]) rhs = IISetPy([1, 4]) weight, result = self._callFUT(lhs.__class__, lhs, rhs) self.assertEqual(weight, 2) self.assertEqual(list(result), [1]) # TODO: test non-default weights class Test_multiunion(unittest.TestCase, _SetObBase): def _callFUT(self, *args, **kw): from .._base import multiunion return multiunion(*args, **kw) def test_no_seqs(self): result = self._callFUT(_Set, ()) self.assertEqual(list(result), []) def test_w_non_iterable_seq(self): result = self._callFUT(_Set, (1, 2)) self.assertEqual(list(result), [1, 2]) def test_w_iterable_seqs(self): result = self._callFUT(_Set, [(1,), (2,)]) self.assertEqual(list(result), [1, 2]) def test_w_mix(self): result = self._callFUT(_Set, [1, (2,)]) self.assertEqual(list(result), [1, 2]) class Test_helpers(unittest.TestCase): def test_MERGE(self): from BTrees._base import MERGE faux_self = object() self.assertEqual(MERGE(faux_self, 1, 1, 1, 1), 2) self.assertEqual(MERGE(faux_self, 1, 2, 1, 3), 5) def test_MERGE_WEIGHT_default(self): from BTrees._base import MERGE_WEIGHT_default faux_self = object() self.assertEqual(MERGE_WEIGHT_default(faux_self, 1, 17), 1) self.assertEqual(MERGE_WEIGHT_default(faux_self, 7, 1), 7) def test_MERGE_WEIGHT_numeric(self): from BTrees._base import MERGE_WEIGHT_numeric faux_self = object() self.assertEqual(MERGE_WEIGHT_numeric(faux_self, 1, 17), 17) self.assertEqual(MERGE_WEIGHT_numeric(faux_self, 7, 1), 7) class _Cache: def __init__(self): self._mru = [] def mru(self, oid): self._mru.append(oid) class _Jar: def __init__(self): self._current = set() self._cache = _Cache() def readCurrent(self, obj): self._current.add(obj) def register(self, obj): pass class _Set: def __init__(self, *args, **kw): if len(args) == 1 and isinstance(args[0], tuple): keys = args[0] else: keys = set(args) self._keys = sorted(keys) def keys(self): return self._keys def __iter__(self): return iter(self._keys) def update(self, items): self._keys = sorted(self._keys + list(items)) _Set._set_type = _Set class _Mapping(dict): def __init__(self, source=None): if source is None: source = {} self._keys = [] self._values = [] for k, v in sorted(source.items()): self._keys.append(k) self._values.append(v) MERGE_DEFAULT = 42 def MERGE_WEIGHT(self, v, w): return v def MERGE(self, v1, w1, v2, w2): return v1 * w1 + v2 * w2 def iteritems(self): yield from zip(self._keys, self._values) def __iter__(self): return iter(self._keys) def __getitem__(self, key): search = dict(zip(self._keys, self._values)) return search[key] def __repr__(self): return repr(dict(zip(self._keys, self._values))) _Mapping._set_type = _Set _Mapping._mapping_type = _Mapping ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test__datatypes.py0000644000076500000240000000537314626022106020317 0ustar00jensstaff############################################################################## # # Copyright 2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import unittest from BTrees import _datatypes to_ob = _datatypes.Any() to_int = _datatypes.I() to_float = _datatypes.F() to_long = _datatypes.L() to_2_bytes = _datatypes.f() to_6_bytes = _datatypes.s() class TestDatatypes(unittest.TestCase): def test_to_ob(self): for thing in "abc", 0, 1.3, (), frozenset((1, 2)), object(): self.assertTrue(to_ob(thing) is thing) def test_to_int_w_int(self): self.assertEqual(to_int(3), 3) def test_to_int_w_overflow(self): self.assertRaises(TypeError, to_int, 2**64) def test_to_int_w_invalid(self): self.assertRaises(TypeError, to_int, ()) def test_to_float_w_float(self): self.assertEqual(to_float(3.14159), 3.14159) def test_to_float_w_int(self): self.assertEqual(to_float(3), 3.0) def test_to_float_w_invalid(self): self.assertRaises(TypeError, to_float, ()) def test_to_long_w_int(self): self.assertEqual(to_long(3), 3) def test_to_long_w_overflow(self): self.assertRaises(TypeError, to_long, 2**64) def test_to_long_w_invalid(self): self.assertRaises(TypeError, to_long, ()) def test_to_2_bytes_w_ok(self): self.assertEqual(to_2_bytes(b'ab'), b'ab') def test_to_2_bytes_w_invalid_length(self): self.assertRaises(TypeError, to_2_bytes, b'a') self.assertRaises(TypeError, to_2_bytes, b'abcd') def test_to_6_bytes_w_ok(self): self.assertEqual(to_6_bytes(b'abcdef'), b'abcdef') def test_to_6_bytes_w_invalid_length(self): self.assertRaises(TypeError, to_6_bytes, b'a') self.assertRaises(TypeError, to_6_bytes, b'abcd') def test_coerce_to_6_bytes(self): # correct input is passed through self.assertEqual(to_6_bytes.coerce(b'abcdef'), b'abcdef') # small positive integers are converted self.assertEqual(to_6_bytes.coerce(1), b'\x00\x00\x00\x00\x00\x01') # negative values are disallowed self.assertRaises(TypeError, to_6_bytes.coerce, -1) # values outside the bigger than 64-bits are disallowed self.assertRaises(TypeError, to_6_bytes.coerce, 2 ** 64 + 1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test_btreesubclass.py0000644000076500000240000000503214626022106021013 0ustar00jensstaff############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import unittest from BTrees.OOBTree import OOBTree from BTrees.OOBTree import OOBucket class B(OOBucket): pass class T(OOBTree): _bucket_type = B max_leaf_size = 2 max_internal_size = 3 class S(T): pass class SubclassTest(unittest.TestCase): def testSubclass(self): # test that a subclass that defines _bucket_type gets buckets # of that type t = T() t[0] = 0 self.assertTrue(t._firstbucket.__class__ is B) def testCustomNodeSizes(self, TreeKind=S, BucketKind=B): # We override btree and bucket split sizes in BTree subclasses. t = TreeKind() for i in range(8): t[i] = i state = t.__getstate__()[0] self.assertEqual(len(state), 5) sub = state[0] # __class__ is a property in the Python implementation, and # if the C extension is available it returns the C version. self.assertIsInstance(sub, TreeKind) sub = sub.__getstate__()[0] self.assertEqual(len(sub), 5) sub = sub[0] self.assertIsInstance(sub, BucketKind) self.assertEqual(len(sub), 1) def _checkReplaceNodeSizes(self, TreeKind, BucketKind): # We can also change the node sizes globally. orig_leaf = TreeKind.max_leaf_size orig_internal = TreeKind.max_internal_size TreeKind.max_leaf_size = T.max_leaf_size TreeKind.max_internal_size = T.max_internal_size try: self.testCustomNodeSizes(TreeKind, BucketKind) finally: TreeKind.max_leaf_size = orig_leaf TreeKind.max_internal_size = orig_internal def testReplaceNodeSizesNative(self): self._checkReplaceNodeSizes(OOBTree, OOBucket) def testReplaceNodeSizesPython(self): from BTrees.OOBTree import OOBTreePy from BTrees.OOBTree import OOBucketPy self._checkReplaceNodeSizes(OOBTreePy, OOBucketPy) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test_check.py0000644000076500000240000002536414626022106017241 0ustar00jensstaff############################################################################## # # Copyright (c) 2003 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import unittest def _assertRaises(self, e_type, checked, *args, **kw): try: checked(*args, **kw) except e_type as e: return e self.fail("Didn't raise: %s" % e_type.__name__) class Test_classify(unittest.TestCase): def _callFUT(self, obj): from BTrees.check import classify return classify(obj) def test_classify_w_unknown(self): class NotClassified: pass self.assertRaises(KeyError, self._callFUT, NotClassified()) def test_classify_w_bucket(self): from BTrees.check import TYPE_BUCKET from BTrees.OOBTree import OOBucketPy kind, is_mapping = self._callFUT(OOBucketPy()) self.assertEqual(kind, TYPE_BUCKET) self.assertTrue(is_mapping) def test_classify_w_set(self): from BTrees.check import TYPE_BUCKET from BTrees.OOBTree import OOSetPy kind, is_mapping = self._callFUT(OOSetPy()) self.assertEqual(kind, TYPE_BUCKET) self.assertFalse(is_mapping) def test_classify_w_tree(self): from BTrees.check import TYPE_BTREE from BTrees.OOBTree import OOBTreePy kind, is_mapping = self._callFUT(OOBTreePy()) self.assertEqual(kind, TYPE_BTREE) self.assertTrue(is_mapping) def test_classify_w_treeset(self): from BTrees.check import TYPE_BTREE from BTrees.OOBTree import OOTreeSetPy kind, is_mapping = self._callFUT(OOTreeSetPy()) self.assertEqual(kind, TYPE_BTREE) self.assertFalse(is_mapping) class Test_crack_btree(unittest.TestCase): def _callFUT(self, obj, is_mapping): from BTrees.check import crack_btree return crack_btree(obj, is_mapping) def test_w_empty_tree(self): from BTrees.check import BTREE_EMPTY class Empty: def __getstate__(self): return None kind, keys, kids = self._callFUT(Empty(), True) self.assertEqual(kind, BTREE_EMPTY) self.assertEqual(keys, []) self.assertEqual(kids, []) def test_w_degenerate_tree(self): from BTrees.check import BTREE_ONE class Degenerate: def __getstate__(self): return ((('a', 1, 'b', 2),),) kind, keys, kids = self._callFUT(Degenerate(), True) self.assertEqual(kind, BTREE_ONE) self.assertEqual(keys, ('a', 1, 'b', 2)) self.assertEqual(kids, None) def test_w_normal_tree(self): from BTrees.check import BTREE_NORMAL first_bucket = [object()] * 8 second_bucket = [object()] * 8 class Normal: def __getstate__(self): return ((first_bucket, 'b', second_bucket), first_bucket) kind, keys, kids = self._callFUT(Normal(), True) self.assertEqual(kind, BTREE_NORMAL) self.assertEqual(keys, ['b']) self.assertEqual(kids, [first_bucket, second_bucket]) class Test_crack_bucket(unittest.TestCase): def _callFUT(self, obj, is_mapping): from BTrees.check import crack_bucket return crack_bucket(obj, is_mapping) def test_w_empty_set(self): class EmptySet: def __getstate__(self): return ([],) keys, values = self._callFUT(EmptySet(), False) self.assertEqual(keys, []) self.assertEqual(values, []) def test_w_non_empty_set(self): class NonEmptySet: def __getstate__(self): return (['a', 'b', 'c'],) keys, values = self._callFUT(NonEmptySet(), False) self.assertEqual(keys, ['a', 'b', 'c']) self.assertEqual(values, []) def test_w_empty_mapping(self): class EmptyMapping: def __getstate__(self): return ([], object()) keys, values = self._callFUT(EmptyMapping(), True) self.assertEqual(keys, []) self.assertEqual(values, []) def test_w_non_empty_mapping(self): class NonEmptyMapping: def __getstate__(self): return (['a', 1, 'b', 2, 'c', 3], object()) keys, values = self._callFUT(NonEmptyMapping(), True) self.assertEqual(keys, ['a', 'b', 'c']) self.assertEqual(values, [1, 2, 3]) class Test_type_and_adr(unittest.TestCase): def _callFUT(self, obj): from BTrees.check import type_and_adr return type_and_adr(obj) def test_type_and_adr_w_oid(self): from BTrees.utils import oid_repr class WithOid: _p_oid = b'DEADBEEF' t_and_a = self._callFUT(WithOid()) self.assertTrue(t_and_a.startswith('WithOid (0x')) self.assertTrue(t_and_a.endswith('oid=%s)' % oid_repr(b'DEADBEEF'))) def test_type_and_adr_wo_oid(self): class WithoutOid: pass t_and_a = self._callFUT(WithoutOid()) self.assertTrue(t_and_a.startswith('WithoutOid (0x')) self.assertTrue(t_and_a.endswith('oid=None)')) class WalkerTests(unittest.TestCase): def _getTargetClass(self): from BTrees.check import Walker return Walker def _makeOne(self, obj): return self._getTargetClass()(obj) def test_visit_btree_abstract(self): walker = self._makeOne(object()) obj = object() path = '/' parent = object() is_mapping = True keys = [] kids = [] lo = 0 hi = None self.assertRaises(NotImplementedError, walker.visit_btree, obj, path, parent, is_mapping, keys, kids, lo, hi) def test_visit_bucket_abstract(self): walker = self._makeOne(object()) obj = object() path = '/' parent = object() is_mapping = True keys = [] kids = [] lo = 0 hi = None self.assertRaises(NotImplementedError, walker.visit_bucket, obj, path, parent, is_mapping, keys, kids, lo, hi) def test_walk_w_empty_bucket(self): from BTrees.OOBTree import OOBucket obj = OOBucket() walker = self._makeOne(obj) self.assertRaises(NotImplementedError, walker.walk) def test_walk_w_empty_btree(self): from BTrees.OOBTree import OOBTree obj = OOBTree() walker = self._makeOne(obj) self.assertRaises(NotImplementedError, walker.walk) def test_walk_w_degenerate_btree(self): from BTrees.OOBTree import OOBTree obj = OOBTree() obj['a'] = 1 walker = self._makeOne(obj) self.assertRaises(NotImplementedError, walker.walk) def test_walk_w_normal_btree(self): from BTrees.IIBTree import IIBTree obj = IIBTree() for i in range(1000): obj[i] = i walker = self._makeOne(obj) self.assertRaises(NotImplementedError, walker.walk) class CheckerTests(unittest.TestCase): assertRaises = _assertRaises def _getTargetClass(self): from BTrees.check import Checker return Checker def _makeOne(self, obj): return self._getTargetClass()(obj) def test_walk_w_empty_bucket(self): from BTrees.OOBTree import OOBucket obj = OOBucket() checker = self._makeOne(obj) checker.check() # noraise def test_walk_w_empty_btree(self): obj = _makeTree(False) checker = self._makeOne(obj) checker.check() # noraise def test_walk_w_degenerate_btree(self): obj = _makeTree(False) obj['a'] = 1 checker = self._makeOne(obj) checker.check() # noraise def test_walk_w_normal_btree(self): obj = _makeTree(False) checker = self._makeOne(obj) checker.check() # noraise def test_walk_w_key_too_large(self): obj = _makeTree(True) state = obj.__getstate__() # Damage an invariant by dropping the BTree key to 14. new_state = (state[0][0], 14, state[0][2]), state[1] obj.__setstate__(new_state) checker = self._makeOne(obj) e = self.assertRaises(AssertionError, checker.check) self.assertTrue(">= upper bound" in str(e)) def test_walk_w_key_too_small(self): obj = _makeTree(True) state = obj.__getstate__() # Damage an invariant by bumping the BTree key to 16. new_state = (state[0][0], 16, state[0][2]), state[1] obj.__setstate__(new_state) checker = self._makeOne(obj) e = self.assertRaises(AssertionError, checker.check) self.assertTrue("< lower bound" in str(e)) def test_walk_w_keys_swapped(self): obj = _makeTree(True) state = obj.__getstate__() # Damage an invariant by bumping the BTree key to 16. (b0, num, b1), firstbucket = state self.assertEqual(b0[4], 8) self.assertEqual(b0[5], 10) b0state = b0.__getstate__() self.assertEqual(len(b0state), 2) # b0state looks like # ((k0, v0, k1, v1, ...), nextbucket) pairs, nextbucket = b0state self.assertEqual(pairs[8], 4) self.assertEqual(pairs[9], 8) self.assertEqual(pairs[10], 5) self.assertEqual(pairs[11], 10) newpairs = pairs[:8] + (5, 10, 4, 8) + pairs[12:] b0.__setstate__((newpairs, nextbucket)) checker = self._makeOne(obj) e = self.assertRaises(AssertionError, checker.check) self.assertTrue("key 5 at index 4 >= key 4 at index 5" in str(e)) class Test_check(unittest.TestCase): def _callFUT(self, tree): from BTrees.check import check return check(tree) def _makeOne(self): from BTrees.OOBTree import OOBTree tree = OOBTree() for i in range(31): tree[i] = 2*i return tree def test_normal(self): from BTrees.OOBTree import OOBTree tree = OOBTree() for i in range(31): tree[i] = 2*i state = tree.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 3) self.assertEqual(state[0][1], 15) self._callFUT(tree) # noraise def _makeTree(fill): from BTrees.OOBTree import OOBTree from BTrees.OOBTree import OOBTreePy tree = OOBTree() if fill: for i in range(OOBTreePy.max_leaf_size + 1): tree[i] = 2*i return tree ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484530.0 BTrees-6.0/src/BTrees/tests/test_compile_flags.py0000644000076500000240000000241714330745562020774 0ustar00jensstaff############################################################################## # # Copyright (c) 2022 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import struct import unittest from BTrees import OOBTree # noqa: try to load a C module for side effects class TestFloatingPoint(unittest.TestCase): def test_no_fast_math_optimization(self): # Building with -Ofast enables -ffast-math, which sets certain FPU # flags that can cause breakage elsewhere. A library such as BTrees # has no business changing global FPU flags for the entire process. zero_bits = struct.unpack("!Q", struct.pack("!d", 0.0))[0] next_up = zero_bits + 1 smallest_subnormal = struct.unpack("!d", struct.pack("!Q", next_up))[0] self.assertNotEqual(smallest_subnormal, 0.0) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test_dynamic_btrees.py0000644000076500000240000000331314626022106021142 0ustar00jensstaff############################################################################## # # Copyright (c) 2020 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## # Dynamically creates test modules and suites for expected BTree families # that do not have their own test file on disk. import importlib import sys import types import unittest from BTrees import _FAMILIES from ._test_builder import update_module # If there is no .py file on disk, create the module in memory. # This is helpful during early development. However, it # doesn't work with zope-testrunner's ``-m`` filter. _suite = unittest.TestSuite() for family in _FAMILIES: mod_qname = "BTrees.tests.test_" + family + 'BTree' try: importlib.import_module(mod_qname) except ImportError: btree = importlib.import_module("BTrees." + family + 'BTree') mod = types.ModuleType(mod_qname) update_module(vars(mod), btree) sys.modules[mod_qname] = mod globals()[mod_qname.split('.', 1)[1]] = mod _suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(mod)) def test_suite(): # zope.testrunner protocol return _suite def load_tests(loader, standard_tests, pattern): # Pure unittest protocol. return test_suite() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/tests/test_fsBTree.py0000644000076500000240000000451014626022106017504 0ustar00jensstaff############################################################################## # # Copyright (c) 2010 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import unittest from BTrees import fsBTree from ._test_builder import update_module class fsBucketTests(unittest.TestCase): def _getTargetClass(self): return fsBTree.fsBucket def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def _makeBytesItems(self): from .._compat import _ascii return [(_ascii(c*2), _ascii(c*6)) for c in 'abcdef'] def test_toString(self): bucket = self._makeOne(self._makeBytesItems()) self.assertEqual(bucket.toString(), b'aabbccddeeffaaaaaabbbbbbccccccddddddeeeeeeffffff') def test_fromString(self): before = self._makeOne(self._makeBytesItems()) after = before.fromString(before.toString()) self.assertEqual(before.__getstate__(), after.__getstate__()) def test_fromString_empty(self): before = self._makeOne(self._makeBytesItems()) after = before.fromString(b'') self.assertEqual(after.__getstate__(), ((),)) def test_fromString_invalid_length(self): bucket = self._makeOne(self._makeBytesItems()) self.assertRaises(ValueError, bucket.fromString, b'xxx') class fsBucketPyTests(fsBucketTests): def _getTargetClass(self): return fsBTree.fsBucketPy class fsTreeTests(unittest.TestCase): def _check_sizes(self, cls): self.assertEqual(cls.max_leaf_size, 500) self.assertEqual(cls.max_internal_size, 500) def test_BTree_sizes(self): self._check_sizes(fsBTree.BTree) self._check_sizes(fsBTree.BTreePy) def test_TreeSet_sizes(self): self._check_sizes(fsBTree.TreeSet) self._check_sizes(fsBTree.TreeSetPy) update_module(globals(), fsBTree) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1672749518.0 BTrees-6.0/src/BTrees/tests/test_utils.py0000644000076500000240000000445414355020716017325 0ustar00jensstaff############################################################################## # # Copyright (c) 2001-2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import unittest class Test_non_negative(unittest.TestCase): def _callFUT(self, int_val): from BTrees.utils import non_negative return non_negative(int_val) def test_w_big_negative(self): self.assertEqual(self._callFUT(-(2**63 - 1)), 1) def test_w_negative(self): self.assertEqual(self._callFUT(-1), 2**63 - 1) def test_w_zero(self): self.assertEqual(self._callFUT(0), 0) def test_w_positive(self): self.assertEqual(self._callFUT(1), 1) class Test_oid_repr(unittest.TestCase): def _callFUT(self, oid): from BTrees.utils import oid_repr return oid_repr(oid) def test_w_non_strings(self): self.assertEqual(self._callFUT(None), repr(None)) self.assertEqual(self._callFUT(()), repr(())) self.assertEqual(self._callFUT([]), repr([])) self.assertEqual(self._callFUT({}), repr({})) self.assertEqual(self._callFUT(0), repr(0)) def test_w_short_strings(self): for length in range(8): faux = 'x' * length self.assertEqual(self._callFUT(faux), repr(faux)) def test_w_long_strings(self): for length in range(9, 1024): faux = 'x' * length self.assertEqual(self._callFUT(faux), repr(faux)) def test_w_zero(self): self.assertEqual(self._callFUT(b'\0\0\0\0\0\0\0\0'), b'0x00') def test_w_one(self): self.assertEqual(self._callFUT(b'\0\0\0\0\0\0\0\1'), b'0x01') def test_w_even_length(self): self.assertEqual(self._callFUT(b'\0\0\0\0\0\0\xAB\xC4'), b'0xabc4') def test_w_odd_length(self): self.assertEqual(self._callFUT(b'\0\0\0\0\0\0\x0D\xEF'), b'0x0def') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/src/BTrees/utils.py0000644000076500000240000000340714626022106015115 0ustar00jensstaff############################################################################## # # Copyright (c) 2001-2012 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## # Copied from ZODB/utils.py from binascii import hexlify def non_negative(int_val): if int_val < 0: # Coerce to non-negative. int_val &= 0x7FFFFFFFFFFFFFFF return int_val def positive_id(obj): # pragma: no cover """Return id(obj) as a non-negative integer.""" return non_negative(id(obj)) def oid_repr(oid): if isinstance(oid, bytes) and len(oid) == 8: # Convert to hex and strip leading zeroes. as_hex = hexlify(oid).lstrip(b'0') # Ensure two characters per input byte. chunks = [b'0x'] if len(as_hex) & 1: chunks.append(b'0') elif as_hex == b'': as_hex = b'00' chunks.append(as_hex) return b''.join(chunks) else: return repr(oid) class Lazy: """ A simple version of ``Lazy`` from ``zope.cachedescriptors`` """ __slots__ = ('func',) def __init__(self, func): self.func = func def __get__(self, inst, class_): if inst is None: return self func = self.func value = func(inst) inst.__dict__[func.__name__] = value return value ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1717060227.4946203 BTrees-6.0/src/BTrees.egg-info/0000755000076500000240000000000014626041203015070 5ustar00jensstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717060227.0 BTrees-6.0/src/BTrees.egg-info/PKG-INFO0000644000076500000240000005106114626041203016170 0ustar00jensstaffMetadata-Version: 2.1 Name: BTrees Version: 6.0 Summary: Scalable persistent object containers Home-page: https://github.com/zopefoundation/BTrees Author: Zope Foundation Author-email: zodb-dev@zope.org License: ZPL 2.1 Project-URL: Documentation, https://btrees.readthedocs.io Project-URL: Issue Tracker, https://github.com/zopefoundation/BTrees/issues Project-URL: Sources, https://github.com/zopefoundation/BTrees Platform: any Classifier: Development Status :: 6 - Mature Classifier: License :: OSI Approved :: Zope Public License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Framework :: ZODB Classifier: Topic :: Database Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: Unix Requires-Python: >=3.8 License-File: LICENSE.txt Requires-Dist: persistent>=4.1.0 Requires-Dist: zope.interface>=5.0.0 Provides-Extra: test Requires-Dist: persistent>=4.4.3; extra == "test" Requires-Dist: transaction; extra == "test" Requires-Dist: zope.testrunner; extra == "test" Provides-Extra: zodb Requires-Dist: ZODB; extra == "zodb" Provides-Extra: docs Requires-Dist: Sphinx; extra == "docs" Requires-Dist: repoze.sphinx.autointerface; extra == "docs" Requires-Dist: sphinx_rtd_theme; extra == "docs" ============================================= ``BTrees``: scalable persistent components ============================================= .. image:: https://github.com/zopefoundation/BTrees/actions/workflows/tests.yml/badge.svg :target: https://github.com/zopefoundation/BTrees/actions/workflows/tests.yml .. image:: https://ci.appveyor.com/api/projects/status/github/zopefoundation/BTrees?branch=master&svg=true :target: https://ci.appveyor.com/project/mgedmin/BTrees .. image:: https://coveralls.io/repos/github/zopefoundation/BTrees/badge.svg?branch=master :target: https://coveralls.io/github/zopefoundation/BTrees?branch=master .. image:: https://readthedocs.org/projects/btrees/badge/?version=latest :target: https://btrees.readthedocs.io/en/latest/ :alt: Documentation Status .. image:: https://img.shields.io/pypi/v/BTrees.svg :target: https://pypi.org/project/BTrees/ :alt: Current version on PyPI .. image:: https://img.shields.io/pypi/pyversions/BTrees.svg :target: https://pypi.org/project/BTrees/ :alt: Supported Python versions This package contains a set of persistent object containers built around a modified BTree data structure. The trees are optimized for use inside ZODB's "optimistic concurrency" paradigm, and include explicit resolution of conflicts detected by that mechanism. Please see `the Sphinx documentation `_ for further information. ================== BTrees Changelog ================== 6.0 (2024-05-30) ================ - Drop support for Python 3.7. - Build Windows wheels on GHA. 5.2 (2024-02-07) ================ - Add preliminary support for Python 3.13 as of 3.13a3. 5.1 (2023-10-05) ================ - Drop using ``setup_requires`` due to constant problems on GHA. - Add support for Python 3.12. 5.0 (2023-02-10) ================ - Build Linux binary wheels for Python 3.11. - Drop support for Python 2.7, 3.5, 3.6. 4.11.3 (2022-11-17) =================== - point release to rebuild full set of wheels 4.11.2 (2022-11-16) =================== - Add support for building arm64 wheels on macOS. 4.11.1 (2022-11-09) =================== - Fix macOS wheel build issues on GitHub Actions - We no longer provide 32bit wheels for the Windows platform, only x86_64. 4.11.0 (2022-11-03) =================== - Add support for Python 3.11. 4.10.1 (2022-09-12) =================== - Disable unsafe math optimizations in C code. (`#184 `_) 4.10.0 (2022-03-09) =================== - Add support for Python 3.10. 4.9.2 (2021-06-09) ================== - Fix ``fsBTree.TreeSet`` and ``fsBTree.BTree`` raising ``SystemError``. See `issue 170 `_. - Fix all the ``fsBTree`` objects to provide the correct interfaces and be instances of the appropriate collection ABCs. This was done for the other modules in release 4.8.0. - Fix the ``multiunion``, ``union``, ``intersection``, and ``difference`` functions when used with arbitrary iterables. Previously, the iterable had to be pre-sorted, meaning only sequences like ``list`` and ``tuple`` could reliably be used; this was not documented though. If the iterable wasn't sorted, the function would produce garbage output. Now, if the function detects an arbitrary iterable, it automatically sorts a copy. 4.9.1 (2021-05-27) ================== - Fix setting unknown class attributes on subclasses of BTrees when using the C extension. This prevented subclasses from being decorated with ``@component.adapter()``. See `issue 168 `_. 4.9.0 (2021-05-26) ================== - Fix the C implementation to match the Python implementation and allow setting custom node sizes for an entire application directly by changing ``BTree.max_leaf_size`` and ``BTree.max_internal_size`` attributes, without having to create a new subclass. These attributes can now also be read from the classes in the C implementation. See `issue 166 `_. - Add various small performance improvements for storing zope.interface attributes on ``BTree`` and ``TreeSet`` as well as deactivating persistent objects from this package. 4.8.0 (2021-04-14) ================== - Make Python 2 forbid the use of type objects as keys (unless a custom metaclass is used that implements comparison as required by BTrees.) On Python 3, types are not orderable so they were already forbidden, but on Python 2 types can be ordered by memory address, which makes them unsuitable for use as keys. See `issue `_. - Make the ``multiunion``, ``union``, ``intersection``, and ``difference`` functions accept arbitrary Python iterables (that iterate across the correct types). Previously, the Python implementation allowed this, but the C implementation only allowed objects (like ``TreeSet`` or ``Bucket``) defined in the same module providing the function. See `issue 24 `_. - Fix persistency bug in the Python version (`#118 `_). - Fix ``Tree.__setstate__`` to no longer accept children besides tree or bucket types to prevent crashes. See `PR 143 `_ for details. - Make BTrees, TreeSet, Set and Buckets implements the ``__and__``, ``__or__`` and ``__sub__`` special methods as shortcuts for ``BTrees.Interfaces.IMerge.intersection``, ``BTrees.Interfaces.IMerge.union`` and ``BTrees.Interfaces.IMerge.difference``. - Add support for Python 3.9. - Build and upload aarch64 wheels. - Make a value of ``0`` in the ``PURE_PYTHON`` environment variable require the C extensions (except on PyPy). Previously, and if this variable is unset, missing or unusable C extensions would be silently ignored. With this variable set to ``0``, an ``ImportError`` will be raised if the C extensions are unavailable. See `issue 156 `_. - Make the BTree objects (``BTree``, ``TreeSet``, ``Set``, ``Bucket``) of each module actually provide the interfaces defined in ``BTrees.Interfaces``. Previously, they provided no interfaces. - Make all the BTree and Bucket objects instances of ``collections.abc.MutableMapping`` (that is, ``isinstance(btree, MutableMapping)`` is now true; no actual inheritance has changed). As part of this, they now provide the ``popitem()`` method. - Make all the TreeSet and Set objects instances of ``collections.abc.MutableSet`` (that is, ``isinstance(tree_set, MutableSet)`` is now true; no actual inheritance has changed). As part of this, they now provide several more methods, including ``isdisjoint``, ``discard``, and ``pop``, and support in-place mutation operators such as ``tree_set |= other``, ``tree_set += other``, ``tree_set -= other`` and ``tree_set ^= other``. See `issue 121 `_. - Update the definitions of ``ISized`` and ``IReadSequence`` to simply be ``zope.interface.common.collections.ISized`` and ``zope.interface.common.sequence.IMinimalSequence`` respectively. - Remove the ``__nonzero__`` interface method from ``ICollection``. No objects actually implemented such a method; instead, the boolean value is typically taken from ``__len__``. - Adjust the definition of ``ISet`` to produce the same resolution order under the C3 and legacy orderings. This means that the legacy order has changed slightly, but that this package emits no warnings when ``ZOPE_INTERFACE_LOG_CHANGED_IRO=1``. Note that the legacy order was not being used for these objects because the C3 ordering was still consistent; it could only be obtained using ``ZOPE_INTERFACE_USE_LEGACY_IRO=1``. See `PR 159 `_ for all the interface updates. - Fix the ``get``, ``setdefault`` and ``pop`` methods, as well as the ``in`` operator, to not suppress ``POSKeyError`` if the object or subobjects are corrupted. Previously, such errors were logged by ZODB, but not propagated. See `issue 161 `_. 4.7.2 (2020-04-07) ================== - Fix more cases of C and Python inconsistency. The C implementation now behaves like the Python implementation when it comes to integer overflow for the integer keys for ``in``, ``get`` and ``has_key``. Now they return False, the default value, and False, respectively in both versions if the tested value would overflow or underflow. Previously, the C implementation would raise ``OverflowError`` or ``KeyError``, while the Python implementation functioned as expected. See `issue 140 `_. .. note:: The unspecified true return values of ``has_key`` have changed. 4.7.1 (2020-03-22) ================== - Fix the definitions of ``__all__`` in modules. In 4.7.0, they incorrectly left out names. See `PR 132 `_. - Ensure the interface resolution order of all objects is consistent. See `issue 137 `_. 4.7.0 (2020-03-17) ================== - Add unsigned variants of the trees. These use the initial "U" for 32-bit data and "Q" for 64-bit data (for "quad", which is similar to what the C ``printf`` function uses and the Python struct module uses). - Fix the value for ``BTrees.OIBTree.using64bits`` when using the pure Python implementation (PyPy and when ``PURE_PYTHON`` is in the environment). - Make the errors that are raised when values are out of range more consistent between Python 2 and Python 3 and between 32-bit and 64-bit variants. - Make the Bucket types consistent with the BTree types as updated in versions 4.3.2: Querying for keys with default comparisons or that are not integers no longer raises ``TypeError``. 4.6.1 (2019-11-07) ================== - Add support for Python 3.8. 4.6.0 (2019-07-30) ================== - Drop support for Python 3.4. - Fix tests against persistent 4.4. - Stop accidentally installing the 'terryfy' package in macOS wheels. See `issue 98 `_. - Fix segmentation fault in ``bucket_repr()``. See `issue 106 `_. 4.5.1 (2018-08-09) ================== - Produce binary wheels for Python 3.7. - Use pyproject.toml to specify build dependencies. This requires pip 18 or later to build from source. 4.5.0 (2018-04-23) ================== - Add support for Python 3.6 and 3.7. - Drop support for Python 3.3. - Raise an ``ImportError`` consistently on Python 3 if the C extension for BTrees is used but the ``persistent`` C extension is not available. Previously this could result in an odd ``AttributeError``. See https://github.com/zopefoundation/BTrees/pull/55 - Fix the possibility of a rare crash in the C extension when deallocating items. See https://github.com/zopefoundation/BTrees/issues/75 - Respect the ``PURE_PYTHON`` environment variable at runtime even if the C extensions are available. See https://github.com/zopefoundation/BTrees/issues/78 - Always attempt to build the C extensions, but make their success optional. - Fix a ``DeprecationWarning`` that could come from I and L objects in Python 2 in pure-Python mode. See https://github.com/zopefoundation/BTrees/issues/79 4.4.1 (2017-01-24) ================== Fixed a packaging bug that caused extra files to be included (some of which caused problems in some platforms). 4.4.0 (2017-01-11) ================== - Allow None as a special key (sorted smaller than all others). This is a bit of a return to BTrees 3 behavior in that Nones are allowed as keys again. Other objects with default ordering are still not allowed as keys. 4.3.2 (2017-01-05) ================== - Make the CPython implementation consistent with the pure-Python implementation and only check object keys for default comparison when setting keys. In Python 2 this makes it possible to remove keys that were added using a less restrictive version of BTrees. (In Python 3 keys that are unorderable still cannot be removed.) Likewise, all versions can unpickle trees that already had such keys. See: https://github.com/zopefoundation/BTrees/issues/53 and https://github.com/zopefoundation/BTrees/issues/51 - Make the Python implementation consistent with the CPython implementation and check object key identity before checking equality and performing comparisons. This can allow fixing trees that have keys that now have broken comparison functions. See https://github.com/zopefoundation/BTrees/issues/50 - Make the CPython implementation consistent with the pure-Python implementation and no longer raise ``TypeError`` for an object key (in object-keyed trees) with default comparison on ``__getitem__``, ``get`` or ``in`` operations. Instead, the results will be a ``KeyError``, the default value, and ``False``, respectively. Previously, CPython raised a ``TypeError`` in those cases, while the Python implementation behaved as specified. Likewise, non-integer keys in integer-keyed trees will raise ``KeyError``, return the default and return ``False``, respectively, in both implementations. Previously, pure-Python raised a ``KeyError``, returned the default, and raised a ``TypeError``, while CPython raised ``TypeError`` in all three cases. 4.3.1 (2016-05-16) ================== - Packaging: fix password used to automate wheel creation on Travis. 4.3.0 (2016-05-10) ================== - Fix unexpected ``OverflowError`` when passing 64bit values to long keys / values on Win64. See: https://github.com/zopefoundation/BTrees/issues/32 - When testing ``PURE_PYTHON`` environments under ``tox``, avoid poisoning the user's global wheel cache. - Ensure that the pure-Python implementation, used on PyPy and when a C compiler isn't available for CPython, pickles identically to the C version. Unpickling will choose the best available implementation. This change prevents interoperability problems and database corruption if both implementations are in use. While it is no longer possible to pickle a Python implementation and have it unpickle to the Python implementation if the C implementation is available, existing Python pickles will still unpickle to the Python implementation (until pickled again). See: https://github.com/zopefoundation/BTrees/issues/19 - Avoid creating invalid objects when unpickling empty BTrees in a pure-Python environment. - Drop support for Python 2.6 and 3.2. 4.2.0 (2015-11-13) ================== - Add support for Python 3.5. 4.1.4 (2015-06-02) ================== - Ensure that pure-Python Bucket and Set objects have a human readable ``__repr__`` like the C versions. 4.1.3 (2015-05-19) ================== - Fix ``_p_changed`` when removing items from small pure-Python BTrees/TreeSets and when adding items to small pure-Python Sets. See: https://github.com/zopefoundation/BTrees/issues/13 4.1.2 (2015-04-07) ================== - Suppress testing 64-bit values in OLBTrees on 32 bit machines. See: https://github.com/zopefoundation/BTrees/issues/9 - Fix ``_p_changed`` when adding items to small pure-Python BTrees/TreeSets. See: https://github.com/zopefoundation/BTrees/issues/11 4.1.1 (2014-12-27) ================== - Accomodate long values in pure-Python OLBTrees. 4.1.0 (2014-12-26) ================== - Add support for PyPy and PyPy3. - Add support for Python 3.4. - BTree subclasses can define ``max_leaf_size`` or ``max_internal_size`` to control maximum sizes for Bucket/Set and BTree/TreeSet nodes. - Detect integer overflow on 32-bit machines correctly under Python 3. - Update pure-Python and C trees / sets to accept explicit None to indicate max / min value for ``minKey``, ``maxKey``. (PR #3) - Update pure-Python trees / sets to accept explicit None to indicate open ranges for ``keys``, ``values``, ``items``. (PR #3) 4.0.8 (2013-05-25) ================== - Fix value-based comparison for objects under Py3k: addresses invalid merges of ``[OLI]OBTrees/OBuckets``. - Ensure that pure-Python implementation of ``OOBTree.byValue`` matches semantics (reversed-sort) of C implementation. 4.0.7 (2013-05-22) ================== - Issue #2: compilation error on 32-bit mode of OS/X. - Test ``PURE_PYTHON`` environment variable support: if set, the C extensions will not be built, imported, or tested. 4.0.6 (2013-05-14) ================== - Changed the ``ZODB`` extra to require only the real ``ZODB`` package, rather than the ``ZODB3`` metapackage: depending on the version used, the metapackage could pull in stale versions of **this** package and ``persistent``. - Fixed Python version check in ``setup.py``. 4.0.5 (2013-01-15) ================== - Fit the ``repr`` of bucket objects, which could contain garbage characters. 4.0.4 (2013-01-12) ================== - Emulate the (private) iterators used by the C extension modules from pure Python. This change is "cosmetic" only: it prevents the ZCML ``zope.app.security:permission.zcml`` from failing. The emulated classes are **not** functional, and should be considered implementation details. - Accomodate buildout to the fact that we no longer bundle a copy of 'persistent.h'. - Fix test failures on Windows: no longer rely on overflows from ``sys.maxint``. 4.0.3 (2013-01-04) ================== - Added ``setup_requires==['persistent']``. 4.0.2 (2013-01-03) ================== - Updated Trove classifiers. - Added explicit support for Python 3.2, Python 3.3, and PyPy. Note that the C extensions are not (yet) available on PyPy. - Python reference implementations now tested separately from the C verions on all platforms. - 100% unit test coverage. 4.0.1 (2012-10-21) ================== - Provide local fallback for persistent C header inclusion if the persistent distribution isn't installed. This makes the winbot happy. 4.0.0 (2012-10-20) ================== Platform Changes ---------------- - Dropped support for Python < 2.6. - Factored ``BTrees`` as a separate distribution. Testing Changes --------------- - All covered platforms tested under ``tox``. - Added support for continuous integration using ``tox`` and ``jenkins``. - Added ``setup.py dev`` alias (installs ``nose`` and ``coverage``). - Dropped dependency on ``zope.testing`` / ``zope.testrunner``: tests now run with ``setup.py test``. Documentation Changes --------------------- - Added API reference, generated via Spinx' autodoc. - Added Sphinx documentation based on ZODB Guide (snippets are exercised via 'tox'). - Added ``setup.py docs`` alias (installs ``Sphinx`` and ``repoze.sphinx.autointerface``). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717060227.0 BTrees-6.0/src/BTrees.egg-info/SOURCES.txt0000644000076500000240000000551414626041203016761 0ustar00jensstaff.coveragerc .manylinux-install.sh .manylinux.sh .readthedocs.yaml CHANGES.rst CONTRIBUTING.md COPYRIGHT.txt LICENSE.txt MANIFEST.in README.rst buildout.cfg pyproject.toml setup.cfg setup.py tox.ini docs/Makefile docs/api.rst docs/changes.rst docs/conf.py docs/development.rst docs/index.rst docs/make.bat docs/overview.rst docs/requirements.txt docs/_build/doctest/output.txt docs/_build/html/_sources/api.rst.txt docs/_build/html/_sources/changes.rst.txt docs/_build/html/_sources/development.rst.txt docs/_build/html/_sources/index.rst.txt docs/_build/html/_sources/overview.rst.txt docs/_build/html/_static/basic.css docs/_build/html/_static/custom.css docs/_build/html/_static/placeholder.txt docs/_build/html/_static/pygments.css docs/_build/html/_static/css/badge_only.css docs/_build/html/_static/css/theme.css docs/_static/custom.css docs/_static/placeholder.txt docs/_templates/placeholder.txt include/persistent/persistent/_compat.h include/persistent/persistent/cPersistence.h include/persistent/persistent/ring.h src/BTrees/BTreeItemsTemplate.c src/BTrees/BTreeModuleTemplate.c src/BTrees/BTreeTemplate.c src/BTrees/BucketTemplate.c src/BTrees/Interfaces.py src/BTrees/Length.py src/BTrees/MergeTemplate.c src/BTrees/SetOpTemplate.c src/BTrees/SetOpTemplate.h src/BTrees/SetTemplate.c src/BTrees/TreeSetTemplate.c src/BTrees/_IFBTree.c src/BTrees/_IIBTree.c src/BTrees/_IOBTree.c src/BTrees/_IUBTree.c src/BTrees/_LFBTree.c src/BTrees/_LLBTree.c src/BTrees/_LOBTree.c src/BTrees/_LQBTree.c src/BTrees/_OIBTree.c src/BTrees/_OLBTree.c src/BTrees/_OOBTree.c src/BTrees/_OQBTree.c src/BTrees/_OUBTree.c src/BTrees/_QFBTree.c src/BTrees/_QLBTree.c src/BTrees/_QOBTree.c src/BTrees/_QQBTree.c src/BTrees/_UFBTree.c src/BTrees/_UIBTree.c src/BTrees/_UOBTree.c src/BTrees/_UUBTree.c src/BTrees/__init__.py src/BTrees/_base.py src/BTrees/_compat.h src/BTrees/_compat.py src/BTrees/_datatypes.py src/BTrees/_fsBTree.c src/BTrees/_module_builder.py src/BTrees/check.py src/BTrees/floatvaluemacros.h src/BTrees/intkeymacros.h src/BTrees/intvaluemacros.h src/BTrees/objectkeymacros.h src/BTrees/objectvaluemacros.h src/BTrees/sorters.c src/BTrees/utils.py src/BTrees.egg-info/PKG-INFO src/BTrees.egg-info/SOURCES.txt src/BTrees.egg-info/dependency_links.txt src/BTrees.egg-info/not-zip-safe src/BTrees.egg-info/requires.txt src/BTrees.egg-info/top_level.txt src/BTrees/tests/__init__.py src/BTrees/tests/_test_builder.py src/BTrees/tests/common.py src/BTrees/tests/testBTrees.py src/BTrees/tests/testConflict.py src/BTrees/tests/testPersistency.py src/BTrees/tests/test_Length.py src/BTrees/tests/test_OOBTree.py src/BTrees/tests/test__base.py src/BTrees/tests/test__datatypes.py src/BTrees/tests/test_btreesubclass.py src/BTrees/tests/test_check.py src/BTrees/tests/test_compile_flags.py src/BTrees/tests/test_dynamic_btrees.py src/BTrees/tests/test_fsBTree.py src/BTrees/tests/test_utils.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717060227.0 BTrees-6.0/src/BTrees.egg-info/dependency_links.txt0000644000076500000240000000000114626041203021136 0ustar00jensstaff ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667484661.0 BTrees-6.0/src/BTrees.egg-info/not-zip-safe0000644000076500000240000000000114330745765017336 0ustar00jensstaff ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717060227.0 BTrees-6.0/src/BTrees.egg-info/requires.txt0000644000076500000240000000024714626041203017473 0ustar00jensstaffpersistent>=4.1.0 zope.interface>=5.0.0 [ZODB] ZODB [docs] Sphinx repoze.sphinx.autointerface sphinx_rtd_theme [test] persistent>=4.4.3 transaction zope.testrunner ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717060227.0 BTrees-6.0/src/BTrees.egg-info/top_level.txt0000644000076500000240000000000714626041203017617 0ustar00jensstaffBTrees ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1717052486.0 BTrees-6.0/tox.ini0000644000076500000240000000424714626022106012746 0ustar00jensstaff# Generated from: # https://github.com/zopefoundation/meta/tree/master/config/c-code [tox] minversion = 4.0 envlist = lint py38,py38-pure py39,py39-pure py310,py310-pure py311,py311-pure py312,py312-pure py313,py313-pure pypy3 docs coverage w_zodb w_zodb-pure [testenv] usedevelop = true pip_pre = py313: true deps = setuptools < 69 Sphinx setenv = pure: PURE_PYTHON=1 !pure-!pypy3: PURE_PYTHON=0 PYTHONFAULTHANDLER=1 PYTHONDEVMODE=1 ZOPE_INTERFACE_STRICT_IRO=1 ZOPE_INTERFACE_LOG_CHANGED_IRO=1 py312: VIRTUALENV_PIP=23.1.2 py312: PIP_REQUIRE_VIRTUALENV=0 commands = zope-testrunner --test-path=src {posargs:-vc} sphinx-build -b doctest -d {envdir}/.cache/doctrees docs {envdir}/.cache/doctest extras = test docs [testenv:w_zodb] basepython = python3.11 deps = ZODB [testenv:w_zodb-pure] basepython = python3.11 deps = ZODB [testenv:coverage] basepython = python3 allowlist_externals = mkdir deps = coverage setenv = PURE_PYTHON=1 commands = mkdir -p {toxinidir}/parts/htmlcov coverage run -m zope.testrunner --test-path=src {posargs:-vc} coverage html -i coverage report -i -m --fail-under=93 [testenv:release-check] description = ensure that the distribution is ready to release basepython = python3 skip_install = true deps = twine build check-manifest check-python-versions >= 0.20.0 wheel commands_pre = commands = check-manifest check-python-versions --only setup.py,tox.ini,.github/workflows/tests.yml python -m build --sdist --no-isolation twine check dist/* [testenv:lint] basepython = python3 skip_install = true deps = isort flake8 commands = isort --check-only --diff {toxinidir}/src {toxinidir}/setup.py flake8 src setup.py [testenv:isort-apply] basepython = python3 skip_install = true commands_pre = deps = isort commands = isort {toxinidir}/src {toxinidir}/setup.py [] [testenv:docs] basepython = python3 skip_install = false commands_pre = commands = sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html sphinx-build -b doctest -d docs/_build/doctrees docs docs/_build/doctest