pax_global_header00006660000000000000000000000064147621560130014517gustar00rootroot0000000000000052 comment=778f52f5c91dbfcd44c501575be61bc3a1d74ad4 rtree-1.4.0/000077500000000000000000000000001476215601300126425ustar00rootroot00000000000000rtree-1.4.0/.github/000077500000000000000000000000001476215601300142025ustar00rootroot00000000000000rtree-1.4.0/.github/dependabot.yml000066400000000000000000000003321476215601300170300ustar00rootroot00000000000000version: 2 updates: # Maintain dependencies for GitHub Actions - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates to GitHub Actions every week interval: "weekly" rtree-1.4.0/.github/workflows/000077500000000000000000000000001476215601300162375ustar00rootroot00000000000000rtree-1.4.0/.github/workflows/deploy.yml000066400000000000000000000030261476215601300202570ustar00rootroot00000000000000name: Build and upload to PyPI on: workflow_dispatch: pull_request: push: branches: - master paths: - '.github/workflows/deploy.yml' release: types: - published jobs: build_wheels: name: Build wheel on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - windows-latest - ubuntu-latest - ubuntu-24.04-arm - macos-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 name: Install Python with: python-version: '3.11' - uses: ilammy/msvc-dev-cmd@v1 if: startsWith(matrix.os, 'windows') - name: Build wheels uses: pypa/cibuildwheel@v2.23.0 - uses: actions/upload-artifact@v4 with: name: cibw-wheels-${{ matrix.os }} path: ./wheelhouse/*.whl build_sdist: name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Build sdist run: pipx run build --sdist - uses: actions/upload-artifact@v4 with: name: cibw-sdist path: dist/*.tar.gz upload_pypi: needs: [build_wheels, build_sdist] runs-on: ubuntu-latest environment: pypi permissions: id-token: write if: github.event_name == 'release' && github.event.action == 'published' steps: - uses: actions/download-artifact@v4 with: pattern: cibw-* path: dist merge-multiple: true - uses: pypa/gh-action-pypi-publish@release/v1 rtree-1.4.0/.github/workflows/test.yml000066400000000000000000000035761476215601300177540ustar00rootroot00000000000000name: Test on: push: branches: - master pull_request: workflow_dispatch: schedule: - cron: '0 6 * * 1' jobs: conda: name: Conda ${{ matrix.python-version }} - ${{ matrix.os }} defaults: run: shell: bash -l {0} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: ['ubuntu-latest', 'macos-latest', 'windows-latest'] # test oldest and newest versions of python and libspatialindex python-version: ['3.9', '3.13'] sidx-version: ['1.8.5', '2.1.0'] exclude: - os: 'macos-latest' - sidx-version: '1.8.5' steps: - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v3 with: channels: conda-forge auto-update-conda: true python-version: ${{ matrix.python-version }} - name: Setup run: conda install -c conda-forge numpy pytest libspatialindex=${{ matrix.sidx-version }} -y - name: Install run: pip install -e . - name: Test with pytest run: pytest --import-mode=importlib -Werror -v --doctest-modules rtree tests ubuntu: name: Ubuntu Python ${{ matrix.python-version }} defaults: run: shell: bash -l {0} runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 name: Install Python with: python-version: ${{ matrix.python-version }} allow-prereleases: true - name: Setup run: | sudo apt-get -y install libspatialindex-c6 pip install --upgrade pip pip install numpy pytest - name: Build run: pip install --user . - name: Test with pytest run: pytest --import-mode=importlib -Werror -v --doctest-modules rtree tests rtree-1.4.0/.gitignore000066400000000000000000000001541476215601300146320ustar00rootroot00000000000000*.egg-info/ *.pyc docs/build build/ dist/ *.idx *.dat include lib .coverage .tox wheelhouse .vscode/ *venv* rtree-1.4.0/.pre-commit-config.yaml000066400000000000000000000012271476215601300171250ustar00rootroot00000000000000ci: autoupdate_schedule: quarterly repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema rev: 0.31.2 hooks: - id: check-github-workflows args: ["--verbose"] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.9.9 hooks: # Run the linter - id: ruff args: [ --fix ] # Run the formatter - id: ruff-format - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy exclude: 'docs/.' rtree-1.4.0/.readthedocs.yaml000066400000000000000000000012271476215601300160730ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the version of Python and other tools you might need build: apt_packages: - libspatialindex-dev os: ubuntu-lts-latest tools: python: latest # Build documentation in the docs/source directory with Sphinx sphinx: configuration: docs/source/conf.py fail_on_warning: true # Optionally build your docs in additional formats such as PDF formats: - pdf # Declare the Python requirements required to build your docs python: install: - requirements: docs/requirements.txt rtree-1.4.0/CHANGES.rst000066400000000000000000000135301476215601300144460ustar00rootroot000000000000001.4.0: 2025-03-06 ================= - Python 3.9+ is now required (:PR:`321`) - Add support for array-based bulk insert with NumPy (:PR:`340` by :user:`FreddieWitherden`) - Upgrade binary wheels with libspatialindex-2.1.0 (:PR:`353`) - Rename project and other build components to "rtree" (:PR:`350`) 1.3.0: 2024-07-10 ================= - Upgrade binary wheels with libspatialindex-2.0.0 (:PR:`316`) - Fix binary wheels for musllinux wheels (:PR:`316`) - Update code style, replace isort and black with ruff, modern numpy rng (:PR:`319`) - Remove libsidx version testing (:PR:`313`) 1.2.0: 2024-01-19 ================= - Fix test failure with built library (:PR:`291` by :user:`sebastic`) - Include spatialindex headers and add :py:meth:`~rtree.finder.get_include` (:PR:`292` by :user:`JDBetteridge`) 1.1.0: 2023-10-17 ================= - Python 3.8+ is now required (:PR:`273`) - Move project metadata to pyproject.toml (:PR:`269`) - Refactor built wheels for PyPI (:PR:`276`) - Fix memory leak when breaking mid-way in _get_objects and _get_ids (:PR:`266`) (thanks :user:`akariv`!) 1.0.1: 2022-10-12 ================= - Fix up type hints :PR:`243` (thanks :user:`oderby`) - Python 3.11 wheels :PR:`250` (thanks :user:`ewouth`) 1.0.0: 2022-04-05 ================= - Python 3.7+ is now required (:PR:`212`) (thanks :user:`adamjstewart`!) - Type hints (:PR:`215` and others) (thanks :user:`adamjstewart`!) - Python 3.10 wheels, including osx-arm64 :PR:`224` - Clean up libspatialindex C API mismatches :PR:`222` (thanks :user:`musicinmybrain`!) - Many doc updates, fixes, and type hints (thanks :user:`adamjstewart`!) :PR:`212` :PR:`221` :PR:`217` :PR:`215` - __len__ method for index :PR:`194` - Prevent get_coordinate_pointers from mutating inputs #205 (thanks :user:`sjones94549`!) - linux-aarch64 wheels :PR:`183` (thanks :user:`odidev`!) - black (:PR:`218`) and flake8 (:PR:`145`) linting 0.9.3: 2019-12-10 ================= - find_library and libspatialindex library loading :PR:`131` 0.9.2: 2019-12-09 ================= - Refactored tests to be based on unittest :PR:`129` - Update libspatialindex library loading code to adapt previous behavior :PR:`128` - Empty data streams throw exceptions and do not partially construct indexes :PR:`127` 0.9.0: 2019-11-24 ================= - Add Index.GetResultSetOffset() - Add Index.contains() method for object and id (requires libspatialindex 1.9.3+) :PR:`116` - Add Index.Flush() :PR:`107` - Add TPRTree index support (thanks :user:`sdhiscocks` :PR:`117`) - Return container sizes without returning objects :PR:`90` - Add set_result_limit and set_result_offset for Index paging :commit:`44ad21aecd3f7b49314b9be12f3334d8bae7e827` Bug fixes: - Better exceptions in cases where stream functions throw :PR:`80` - Migrated CI platform to Azure Pipelines https://dev.azure.com/hobuinc/rtree/_build?definitionId=5 - Minor test enhancements and fixups. Both libspatialindex 1.8.5 and libspatialindex 1.9.3 are tested with CI 0.8: 2014-07-17 =============== - Support for Python 3 added. 0.7.0: 2011-12-29 ================= - 0.7.0 relies on libspatialindex 1.7.1+. - int64_t's should be used for IDs instead of uint64_t (requires libspatialindex 1.7.1 C API changes) - Fix __version__ - More documentation at http://toblerity.github.com/rtree/ - Class documentation at http://toblerity.github.com/rtree/class.html - Tweaks for PyPy compatibility. Still not compatible yet, however. - Custom storage support by Mattias (requires libspatialindex 1.7.1) 0.6.0: 2010-04-13 ================= - 0.6.0 relies on libspatialindex 1.5.0+. - :py:meth:`~rtree.index.Index.intersection` and :py:meth:`~rtree.index.Index.nearest` methods return iterators over results instead of lists. - Number of results for :py:meth:`~rtree.index.Index.nearest` defaults to 1. - libsidx C library of 0.5.0 removed and included in libspatialindex - objects="raw" in :py:meth:`~rtree.index.Index.intersection` to return the object sent in (for speed). - :py:meth:`~rtree.index.Index.count` method to return the intersection count without the overhead of returning a list (thanks Leonard NorrgÄrd). - Improved bulk loading performance - Supposedly no memory leaks :) - Many other performance tweaks (see docs). - Bulk loader supports interleaved coordinates - Leaf queries. You can return the box and ids of the leaf nodes of the index. Useful for visualization, etc. - Many more docstrings, sphinx docs, etc 0.5.0: 2009-08-06 ================= 0.5.0 was a complete refactoring to use libsidx - a C API for libspatialindex. The code is now ctypes over libsidx, and a number of new features are now available as a result of this refactoring. * ability to store pickles within the index (clustered index) * ability to use custom extension names for disk-based indexes * ability to modify many index parameters at instantiation time * storage of point data reduced by a factor of 4 * bulk loading of indexes at instantiation time * ability to quickly return the bounds of the entire index * ability to return the bounds of index entries * much better windows support * libspatialindex 1.4.0 required. 0.4.3: 2009-06-05 ================= - Fix reference counting leak #181 0.4.2: 2009-05-25 ================= - Windows support 0.4.1: 2008-03-24 ================= - Eliminate uncounted references in add, delete, nearestNeighbor (#157). 0.4: 2008-01-24 =============== - Testing improvements. - Switch dependency to the single consolidated spatialindex library (1.3). 0.3: 26 November 2007 ===================== - Change to Python long integer identifiers (#126). - Allow deletion of objects from indexes. - Reraise index query errors as Python exceptions. - Improved persistence. 0.2: 19 May 2007 ================ - Link spatialindex system library. 0.1: 13 April 2007 ================== - Add disk storage option for indexes (#320). - Change license to LGPL. - Moved from Pleiades to GIS-Python repo. - Initial release. rtree-1.4.0/CREDITS.txt000066400000000000000000000014541476215601300145040ustar00rootroot00000000000000 Sean Gillies * Initial effort and basic API design based on QGIS' usage of libspatialindex C++ APIs Howard Butler * libspatialindex C API * rewrite to use ctypes and libspatialindex C API * Streaming/bulk loading support * Disk serialization of indexes * Pickle serialization and clustered index support * .count() and .intersection() methods * Windows support * Node fetching * Index property access Brent Pedersen * Pickle protocol support * Documentation, doctests * Variable coordinate ordering * Testing Matthias * Custom storage API (both Rtree and libspatialindex) Adam Stewart * intersection/union support * __len__ method Mike Taves * cibuildwheel configuration * general maintenance rtree-1.4.0/DEPENDENCIES.txt000066400000000000000000000001361476215601300152310ustar00rootroot00000000000000- python 3.9+ - setuptools - libspatialindex C library 1.8.5+: https://libspatialindex.org/ rtree-1.4.0/FAQ.txt000066400000000000000000000000001476215601300140000ustar00rootroot00000000000000rtree-1.4.0/LICENSE.txt000066400000000000000000000021331476215601300144640ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2018: Sean C. Gillies, Howard Butler and contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. rtree-1.4.0/MANIFEST.in000066400000000000000000000001771476215601300144050ustar00rootroot00000000000000include *.md *.rst *.txt include MANIFEST.in recursive-include benchmarks * recursive-include tests * recursive-include docs * rtree-1.4.0/README.md000066400000000000000000000017061476215601300141250ustar00rootroot00000000000000# Rtree: Spatial indexing for Python ![Build](https://github.com/Toblerity/rtree/workflows/Build/badge.svg) [![PyPI version](https://badge.fury.io/py/rtree.svg)](https://badge.fury.io/py/rtree) Rtree is a [ctypes](https://docs.python.org/3/library/ctypes.html) Python wrapper of [libspatialindex](https://libspatialindex.org/) that provides a number of advanced spatial indexing features for the spatially curious Python user. These features include: * Nearest neighbor search * Intersection search * Multi-dimensional indexes * Clustered indexes (store Python pickles directly with index entries) * Bulk loading * Deletion * Disk serialization * Custom storage implementation (to implement spatial indexing in ZODB, for example) Wheels are available for most major platforms, and `rtree` with bundled `libspatialindex` can be installed via pip: ``` pip install rtree ``` See [changes](https://rtree.readthedocs.io/en/latest/changes.html) for all versions. rtree-1.4.0/benchmarks/000077500000000000000000000000001476215601300147575ustar00rootroot00000000000000rtree-1.4.0/benchmarks/benchmarks.py000066400000000000000000000102731476215601300174510ustar00rootroot00000000000000# hobu's latest results on his 2006-era machine # Stream load: # 293710.04 usec/pass # # One-at-a-time load: # 527883.95 usec/pass # # 30000 points # Query box: (1240000, 1010000, 1400000, 1390000) # # Brute Force: # 46 hits # 13533.60 usec/pass # # Memory-based Rtree Intersection: # 46 hits # 7516.19 usec/pass # # Disk-based Rtree Intersection: # 46 hits # 7543.00 usec/pass # # Disk-based Rtree Intersection without Item() wrapper (objects='raw'): # 46 raw hits # 347.60 usec/pass import random import timeit from pathlib import Path import rtree from rtree import Rtree as _Rtree print(f"Benchmarking Rtree-{rtree.__version__} from {Path(rtree.__file__).parent}") print(f"Using {rtree.core.rt._name} version {rtree.core.rt.SIDX_Version().decode()}") print() TEST_TIMES = 20 class Point: """A very basic Geometry.""" def __init__(self, x, y): self.x = x self.y = y class Rtree(_Rtree): pickle_protocol = -1 # Scatter points randomly in a 1x1 box bounds = (0, 0, 6000000, 6000000) count = 30000 points = [] insert_object = None insert_object = { "a": list(range(100)), "b": 10, "c": object(), "d": dict(x=1), "e": Point(2, 3), } index = Rtree() disk_index = Rtree("test", overwrite=1) coordinates = [] random.seed("Rtree", version=2) for i in range(count): x = random.randrange(bounds[0], bounds[2]) + random.random() y = random.randrange(bounds[1], bounds[3]) + random.random() point = Point(x, y) points.append(point) index.add(i, (x, y), insert_object) disk_index.add(i, (x, y), insert_object) coordinates.append((i, (x, y, x, y), insert_object)) s = """ bulk = Rtree(coordinates[:2000]) """ t = timeit.Timer(stmt=s, setup="from __main__ import coordinates, Rtree, insert_object") print("Stream load:") print(f"{1e6 * t.timeit(number=TEST_TIMES) / TEST_TIMES:.2f} usec/pass") print() s = """ idx = Rtree() i = 0 for point in points[:2000]: idx.add(i, (point.x, point.y), insert_object) i+=1 """ t = timeit.Timer(stmt=s, setup="from __main__ import points, Rtree, insert_object") print("One-at-a-time load:") print(f"{1e6 * t.timeit(number=TEST_TIMES) / TEST_TIMES:.2f} usec/pass") print() bbox = (1240000, 1010000, 1400000, 1390000) print(count, "points") print("Query box: ", bbox) print() # Brute force all points within a 0.1x0.1 box s = """ hits = [p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]] """ t = timeit.Timer(stmt=s, setup="from __main__ import points, bbox") print("Brute Force:") print( len( [ p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3] ] ), "hits", ) print(f"{1e6 * t.timeit(number=TEST_TIMES) / TEST_TIMES:.2f} usec/pass") print() # 0.1x0.1 box using intersection if insert_object is None: s = """ hits = [points[id] for id in index.intersection(bbox)] """ else: s = """ hits = [p.object for p in index.intersection(bbox, objects=insert_object)] """ t = timeit.Timer( stmt=s, setup="from __main__ import points, index, bbox, insert_object" ) print("Memory-based Rtree Intersection:") print(len([points[id] for id in index.intersection(bbox)]), "hits") print(f"{1e6 * t.timeit(number=100) / 100:.2f} usec/pass") print() # run same test on disk_index. s = s.replace("index.", "disk_index.") t = timeit.Timer( stmt=s, setup="from __main__ import points, disk_index, bbox, insert_object" ) print("Disk-based Rtree Intersection:") hits = list(disk_index.intersection(bbox)) print(len(hits), "hits") print(f"{1e6 * t.timeit(number=TEST_TIMES) / TEST_TIMES:.2f} usec/pass") print() if insert_object: s = """ hits = disk_index.intersection(bbox, objects="raw") """ t = timeit.Timer( stmt=s, setup="from __main__ import points, disk_index, bbox, insert_object" ) print("Disk-based Rtree Intersection without Item() wrapper (objects='raw'):") result = list(disk_index.intersection(bbox, objects="raw")) print(len(result), "raw hits") print(f"{1e6 * t.timeit(number=TEST_TIMES) / TEST_TIMES:.2f} usec/pass") assert "a" in result[0], result[0] # type: ignore Path("test.dat").unlink() Path("test.idx").unlink() rtree-1.4.0/docs/000077500000000000000000000000001476215601300135725ustar00rootroot00000000000000rtree-1.4.0/docs/Makefile000066400000000000000000000072071476215601300152400ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latex_paper_size to make LaTeX files and run them through pdflatex" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf build/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html @echo @echo "Build finished. The HTML pages are in build/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml @echo @echo "Build finished. The HTML pages are in build/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in build/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in build/qthelp, like this:" @echo "# qcollectiongenerator build/qthelp/Rtree.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile build/qthelp/Rtree.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) build/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Rtree" @echo "# ln -s build/devhelp $$HOME/.local/share/devhelp/Rtree" @echo "# devhelp" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex @echo @echo "Build finished; the LaTeX files are in build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." latexpdf: latex $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex @echo "Running LaTeX files through pdflatex..." make -C build/latex all-pdf @echo "pdflatex finished; the PDF files are in build/latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes @echo @echo "The overview file is in build/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in build/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in build/doctest/output.txt." pdf: $(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) build/pdf @echo @echo "Build finished; now you can process the PDF files." rtree-1.4.0/docs/requirements.txt000066400000000000000000000000301476215601300170470ustar00rootroot00000000000000sphinx>=4 sphinx-issues rtree-1.4.0/docs/source/000077500000000000000000000000001476215601300150725ustar00rootroot00000000000000rtree-1.4.0/docs/source/changes.rst000066400000000000000000000002051476215601300172310ustar00rootroot00000000000000.. _changes: Changes .............................................................................. .. include:: ../../CHANGES.rst rtree-1.4.0/docs/source/class.rst000066400000000000000000000006101476215601300167260ustar00rootroot00000000000000.. _class: Class Documentation ------------------------------------------------------------------------------ .. autoclass:: rtree.index.Index :members: __init__, insert, intersection, intersection_v, nearest, nearest_v, delete, bounds, count, close, dumps, loads .. autoclass:: rtree.index.Property :members: .. autoclass:: rtree.index.Item :members: __init__, bbox, object rtree-1.4.0/docs/source/conf.py000066400000000000000000000041041476215601300163700ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html import sys sys.path.append("../../") import rtree # noqa: E402 # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "Rtree" copyright = "2019, Sean Gilles, Howard Butler, and contributors" author = "Sean Gilles, Howard Butler, and contributors" version = release = rtree.__version__ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.ifconfig", "sphinx_issues", ] templates_path = ["_templates"] exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = "nature" htmlhelp_basename = "Rtreedoc" # -- Options for LaTeX output -------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [("index", "Rtree.tex", "Rtree Documentation", author, "manual")] pdf_documents = [("index", "Rtree", "Rtree Documentation", "The Rtree Team")] pdf_language = "en_US" pdf_fit_mode = "overflow" # -- Extension configuration ------------------------------------------------- intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # sphinx.ext.autodoc autodoc_typehints = "description" autodoc_typehints_description_target = "documented" # sphinx-issues issues_github_path = "Toblerity/rtree" issues_commit_prefix = "" rtree-1.4.0/docs/source/history.rst000066400000000000000000000044461476215601300173350ustar00rootroot00000000000000.. _history: History of Rtree ------------------------------------------------------------------------------ `Rtree`_ was started by `Sean Gillies`_ as a port of the `libspatialindex`_ linkages that `QGIS`_ maintained to provide on-the-fly indexing support for GUI operations. A notable feature of `R-trees`_ is the ability to insert data into the structure without the need for a global partitioning bounds, and this drove Sean's adoption of this code. `Howard Butler`_ later picked up `Rtree`_ and added a number of features that `libspatialindex`_ provided including disk serialization and bulk loading by writing a C API for `libspatialindex`_ and re-writing `Rtree`_ as a `ctypes`_ wrapper to utilize this C API. `Brent Pedersen`_ came along and added features to support alternative coordinate ordering, augmentation of the pickle storage, and lots of documentation. Mattias (http://dr-code.org) added support for custom storage backends to support using `Rtree`_ as an indexing type in `ZODB`_. `Rtree`_ has gone through a number of iterations, and at 0.5.0, it was completely refactored to use a new internal architecture (ctypes + a C API over `libspatialindex`_). This refactoring has resulted in a number of new features and much more flexibility. See :ref:`changes` for more detail. .. note:: A significant bug in the 1.6.1+ `libspatialindex`_ C API was found where it was using unsigned integers for index entry IDs instead of signed integers. Because `Rtree`_ appeared to be the only significant user of the C API at this time, it was corrected immediately. You should update immediately and re-insert data into new indexes if this is an important consideration for your application. Rtree 0.5.0 included a C library that is now the C API for libspatialindex and is part of that source tree. The code bases are independent from each other and can now evolve separately. Rtree is pure Python as of 0.6.0+. .. _`Sean Gillies`: https://sgillies.net .. _`Howard Butler`: https://hobu.co .. _`Brent Pedersen`: https://github.com/brentp .. _`QGIS`: https://qgis.org .. _`ZODB`: https://zodb.org .. _`R-trees`: https://en.wikipedia.org/wiki/R-tree .. _`ctypes`: https://docs.python.org/3/library/ctypes.html .. _`libspatialindex`: https://libspatialindex.org .. _`Rtree`: https://rtree.readthedocs.io rtree-1.4.0/docs/source/index.rst000066400000000000000000000020351476215601300167330ustar00rootroot00000000000000.. _home: Rtree: Spatial indexing for Python ------------------------------------------------------------------------------ `Rtree`_ is a `ctypes`_ Python wrapper of `libspatialindex`_ that provides a number of advanced spatial indexing features for the spatially curious Python user. These features include: * Nearest neighbor search * Intersection search * Multi-dimensional indexes * Clustered indexes (store Python pickles directly with index entries) * Bulk loading * Deletion * Disk serialization * Custom storage implementation (to implement spatial indexing in ZODB, for example) Documentation .............................................................................. .. toctree:: :maxdepth: 2 install tutorial class misc changes performance history * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. _`R-trees`: https://en.wikipedia.org/wiki/R-tree .. _`ctypes`: https://docs.python.org/3/library/ctypes.html .. _`libspatialindex`: https://libspatialindex.org .. _`Rtree`: https://rtree.readthedocs.io rtree-1.4.0/docs/source/install.rst000066400000000000000000000021521476215601300172720ustar00rootroot00000000000000.. _installation: Installation ------------------------------------------------------------------------------ \*nix .............................................................................. First, download and install version 1.8.5+ of the `libspatialindex`_ library from: https://libspatialindex.org The library supports CMake builds, so it is a matter of:: $ mkdir build && cd build $ cmake .. $ cmake --build . -j $ cmake --install . You may need to run the ``ldconfig`` command after installing the library to ensure that applications can find it at startup time. Rtree can be easily installed via pip:: $ pip install rtree or by running in a local source directory:: $ pip install -e . You can build and test in place like:: $ pytest Windows .............................................................................. The Windows DLLs of `libspatialindex`_ are pre-compiled in windows installers that are available from `PyPI`_. Installation on Windows is as easy as:: pip install rtree .. _`PyPI`: https://pypi.org/project/rtree/ .. _`libspatialindex`: https://libspatialindex.org rtree-1.4.0/docs/source/misc.rst000066400000000000000000000004241476215601300165570ustar00rootroot00000000000000.. _misc: Miscellaneous Documentation ------------------------------------------------------------------------------ Exceptions ========== .. autoexception:: rtree.exceptions.RTreeError :members: Finder module ============= .. automodule:: rtree.finder :members: rtree-1.4.0/docs/source/performance.rst000066400000000000000000000061601476215601300201300ustar00rootroot00000000000000.. _performance: Performance ------------------------------------------------------------------------------ See the `benchmarks.py`_ file for a comparison of various query methods and how much acceleration can be obtained from using Rtree. .. _benchmarks.py: https://github.com/Toblerity/rtree/blob/master/benchmarks/benchmarks.py There are a few simple things that will improve performance. Use stream loading .............................................................................. This will substantially (orders of magnitude in many cases) improve performance over :py:meth:`~rtree.index.Index.insert` by allowing the data to be pre-sorted :: >>> def generator_function(somedata): ... for i, obj in enumerate(somedata): ... yield (i, (obj.xmin, obj.ymin, obj.xmax, obj.ymax), obj) >>> r = index.Index(generator_function(somedata)) After bulk loading the index, you can then insert additional records into the index using :py:meth:`~rtree.index.Index.insert` Override :py:data:`~rtree.index.Index.dumps` to use the highest pickle protocol ............................................................................... :: >>> import cPickle, rtree >>> class FastRtree(rtree.Rtree): ... def dumps(self, obj): ... return cPickle.dumps(obj, -1) >>> r = FastRtree() .. topic:: Update from January 2024 Pickling is currently broken and awaiting a pull request to fix it. For more information, see the `pull request on GitHub`_. .. _pull request on GitHub: https://github.com/Toblerity/rtree/pull/197 Use objects='raw' ............................................................................... In any :py:meth:`~rtree.index.Index.intersection` or :py:meth:`~rtree.index.Index.nearest` or query, use objects='raw' keyword argument :: >>> objs = r.intersection((xmin, ymin, xmax, ymax), objects="raw") Adjust index properties ............................................................................... Adjust :py:class:`rtree.index.Property` appropriate to your index. * Set your :py:data:`~rtree.index.Property.leaf_capacity` to a higher value than the default 100. 1000+ is fine for the default pagesize of 4096 in many cases. * Increase the :py:data:`~rtree.index.Property.fill_factor` to something near 0.9. Smaller fill factors mean more splitting, which means more nodes. This may be bad or good depending on your usage. Limit dimensionality to the amount you need ............................................................................... Don't use more dimensions than you actually need. If you only need 2, only use two. Otherwise, you will waste lots of storage and add that many more floating point comparisons for each query, search, and insert operation of the index. Use the correct query method ............................................................................... Use :py:meth:`~rtree.index.Index.count` if you only need a count and :py:meth:`~rtree.index.Index.intersection` if you only need the ids. Otherwise, lots of data may potentially be copied. If possible also make use of the bulk query methods suffixed with `_v`. rtree-1.4.0/docs/source/tutorial.rst000066400000000000000000000172101476215601300174700ustar00rootroot00000000000000.. _tutorial: Tutorial ------------------------------------------------------------------------------ This tutorial demonstrates how to take advantage of :ref:`Rtree ` for querying data that have a spatial component that can be modeled as bounding boxes. Creating an index .............................................................................. The following section describes the basic instantiation and usage of :ref:`Rtree `. Import ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After :ref:`installing ` :ref:`Rtree `, you should be able to open up a Python prompt and issue the following:: >>> from rtree import index :py:mod:`rtree` is organized as a Python package with a couple of modules and two major classes - :py:class:`rtree.index.Index` and :py:class:`rtree.index.Property`. Users manipulate these classes to interact with the index. Construct an instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After importing the index module, construct an index with the default construction:: >>> idx = index.Index() .. note:: While the default construction is useful in many cases, if you want to manipulate how the index is constructed you will need pass in a :py:class:`rtree.index.Property` instance when creating the index. Create a bounding box ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After instantiating the index, create a bounding box that we can insert into the index:: >>> left, bottom, right, top = (0.0, 0.0, 1.0, 1.0) .. note:: The coordinate ordering for all functions are sensitive the the index's :py:attr:`~rtree.index.Index.interleaved` data member. If :py:attr:`~rtree.index.Index.interleaved` is False, the coordinates must be in the form [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If :py:attr:`~rtree.index.Index.interleaved` is True, the coordinates must be in the form [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax]. Insert records into the index ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Insert an entry into the index:: >>> idx.insert(0, (left, bottom, right, top)) .. note:: Entries that are inserted into the index are not unique in either the sense of the `id` or of the bounding box that is inserted with index entries. If you need to maintain uniqueness, you need to manage that before inserting entries into the Rtree. .. note:: Inserting a point, i.e. where left == right && top == bottom, will essentially insert a single point entry into the index instead of copying extra coordinates and inserting them. There is no shortcut to explicitly insert a single point, however. Query the index ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are three primary methods for querying the index. :py:meth:`rtree.index.Index.intersection` will return you index entries that *cross* or are *contained* within the given query window. :py:meth:`rtree.index.Index.intersection` Intersection ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Given a query window, return ids that are contained within the window:: >>> list(idx.intersection((1.0, 1.0, 2.0, 2.0))) [0] Given a query window that is beyond the bounds of data we have in the index:: >>> list(idx.intersection((1.0000001, 1.0000001, 2.0, 2.0))) [] Nearest Neighbors ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The following finds the 1 nearest item to the given bounds. If multiple items are of equal distance to the bounds, both are returned:: >>> idx.insert(1, (left, bottom, right, top)) >>> list(idx.nearest((1.0000001, 1.0000001, 2.0, 2.0), 1)) [0, 1] .. _clustered: Using Rtree as a cheapo spatial database .............................................................................. Rtree also supports inserting any object you can pickle into the index (called a clustered index in `libspatialindex`_ parlance). The following inserts the picklable object ``42`` into the index with the given id:: >>> idx.insert(id=id, coordinates=(left, bottom, right, top), obj=42) You can then return a list of objects by giving the ``objects=True`` flag to intersection:: >>> [n.object for n in idx.intersection((left, bottom, right, top), objects=True)] [None, None, 42] .. warning:: `libspatialindex`_'s clustered indexes were not designed to be a database. You get none of the data integrity protections that a database would purport to offer, but this behavior of :ref:`Rtree ` can be useful nonetheless. Consider yourself warned. Now go do cool things with it. Serializing your index to a file .............................................................................. One of :ref:`Rtree `'s most useful properties is the ability to serialize Rtree indexes to disk. These include the clustered indexes described :ref:`here `:: >>> file_idx = index.Rtree('rtree') >>> file_idx.insert(1, (left, bottom, right, top)) >>> file_idx.insert(2, (left - 1.0, bottom - 1.0, right + 1.0, top + 1.0)) >>> [n for n in file_idx.intersection((left, bottom, right, top))] [1, 2] .. note:: By default, if an index file with the given name `rtree` in the example above already exists on the file system, it will be opened in append mode and not be re-created. You can control this behavior with the :py:attr:`rtree.index.Property.overwrite` property of the index property that can be given to the :py:class:`rtree.index.Index` constructor. .. seealso:: :ref:`performance` describes some parameters you can tune to make file-based indexes run a bit faster. The choices you make for the parameters is entirely dependent on your usage. Modifying file names ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Rtree uses the extensions `dat` and `idx` by default for the two index files that are created when serializing index data to disk. These file extensions are controllable using the :py:attr:`rtree.index.Property.dat_extension` and :py:attr:`rtree.index.Property.idx_extension` index properties. :: >>> p = rtree.index.Property() >>> p.dat_extension = 'data' >>> p.idx_extension = 'index' >>> file_idx = index.Index('rtree', properties = p) 3D indexes .............................................................................. As of Rtree version 0.5.0, you can create 3D (actually kD) indexes. The following is a 3D index that is to be stored on disk. Persisted indexes are stored on disk using two files -- an index file (.idx) and a data (.dat) file. You can modify the extensions these files use by altering the properties of the index at instantiation time. The following creates a 3D index that is stored on disk as the files ``3d_index.data`` and ``3d_index.index``:: >>> from rtree import index >>> p = index.Property() >>> p.dimension = 3 >>> p.dat_extension = 'data' >>> p.idx_extension = 'index' >>> idx3d = index.Index('3d_index',properties=p) >>> idx3d.insert(1, (0, 60, 23.0, 0, 60, 42.0)) >>> idx3d.intersection( (-1, 62, 22, -1, 62, 43)) [1L] ZODB and Custom Storages .............................................................................. https://mail.zope.org/pipermail/zodb-dev/2010-June/013491.html contains a custom storage backend for `ZODB`_ and you can find example python code `here`_. Note that the code was written in 2011, hasn't been updated and was only an alpha version. .. _`here`: https://github.com/Toblerity/zope.index.rtree .. _`ZODB`: https://zodb.org .. _`libspatialindex`: https://libspatialindex.org rtree-1.4.0/environment.yml000066400000000000000000000001451476215601300157310ustar00rootroot00000000000000name: _rtree channels: - defaults - conda-forge dependencies: - python>=3.9 - libspatialindex>=1.8.5 rtree-1.4.0/pyproject.toml000066400000000000000000000053751476215601300155700ustar00rootroot00000000000000[build-system] requires = ["setuptools>=61", "wheel"] build-backend = "setuptools.build_meta" [project] name = "rtree" authors = [ {name = "Sean Gillies", email = "sean.gillies@gmail.com"}, ] maintainers = [ {name = "Howard Butler", email = "howard@hobu.co"}, {name = "Mike Taves", email = "mwtoews@gmail.com"}, ] description = "R-Tree spatial index for Python GIS" readme = "README.md" requires-python = ">=3.9" keywords = ["gis", "spatial", "index", "r-tree"] license = {text = "MIT"} classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering :: GIS", "Topic :: Database", ] dynamic = ["version"] [project.urls] Documentation = "https://rtree.readthedocs.io" Repository = "https://github.com/Toblerity/rtree" [tool.setuptools] packages = ["rtree"] zip-safe = false include-package-data = false [tool.setuptools.dynamic] version = {attr = "rtree.__version__"} [tool.setuptools.package-data] rtree = ["py.typed"] [tool.cibuildwheel] build = "cp39-*" build-verbosity = 3 before-all = "pip install wheel" repair-wheel-command = "python scripts/repair_wheel.py -w {dest_dir} {wheel}" test-requires = "tox" test-command = "tox --conf {project} --installpkg {wheel}" test-skip = [ "*-macosx_arm64", ] [tool.cibuildwheel.linux] archs = ["auto"] before-build = [ "yum install -y cmake libffi-devel", "sh {project}/scripts/install_libspatialindex.sh", ] [[tool.cibuildwheel.overrides]] select = "*-musllinux*" before-build = [ "apk add cmake libffi-dev", "sh {project}/scripts/install_libspatialindex.sh", ] [tool.cibuildwheel.macos] archs = ["x86_64", "arm64"] environment = { MACOSX_DEPLOYMENT_TARGET="10.9" } before-build = [ "brew install coreutils cmake", "sh {project}/scripts/install_libspatialindex.sh", ] [tool.cibuildwheel.windows] archs = ["AMD64"] before-build = [ "call {project}\\scripts\\install_libspatialindex.bat", ] [tool.coverage.report] # Ignore warnings for overloads # https://github.com/nedbat/coveragepy/issues/970#issuecomment-612602180 exclude_lines = [ "pragma: no cover", "@overload", ] [tool.ruff.lint] select = [ "E", "W", # pycodestyle "F", # Pyflakes "UP", # pyupgrade "I", # isort "NPY", # NumPy-specific ] [tool.mypy] exclude = ["docs", "build"] ignore_missing_imports = true show_error_codes = true rtree-1.4.0/rtree/000077500000000000000000000000001476215601300137635ustar00rootroot00000000000000rtree-1.4.0/rtree/__init__.py000066400000000000000000000003301476215601300160700ustar00rootroot00000000000000""" # rtree Rtree provides Python bindings to libspatialindex for quick hyperrectangular intersection queries. """ from __future__ import annotations __version__ = "1.4.0" from .index import Index, Rtree # noqa rtree-1.4.0/rtree/core.py000066400000000000000000000647561476215601300153070ustar00rootroot00000000000000from __future__ import annotations import ctypes from . import finder from .exceptions import RTreeError def check_return(result, func, cargs): "Error checking for Error calls" if result != 0: s = rt.Error_GetLastErrorMsg().decode() msg = f'Error in "{func.__name__}": {s}' rt.Error_Reset() raise RTreeError(msg) return True def check_void(result, func, cargs): "Error checking for void* returns" if not bool(result): s = rt.Error_GetLastErrorMsg().decode() msg = f'Error in "{func.__name__}": {s}' rt.Error_Reset() raise RTreeError(msg) return result def check_void_done(result, func, cargs): "Error checking for void* returns that might be empty with no error" if rt.Error_GetErrorCount(): s = rt.Error_GetLastErrorMsg().decode() msg = f'Error in "{func.__name__}": {s}' rt.Error_Reset() raise RTreeError(msg) return result def check_value(result, func, cargs): "Error checking proper value returns" count = rt.Error_GetErrorCount() if count != 0: s = rt.Error_GetLastErrorMsg().decode() msg = f'Error in "{func.__name__}": {s}' rt.Error_Reset() raise RTreeError(msg) return result def check_value_free(result, func, cargs): "Error checking proper value returns" count = rt.Error_GetErrorCount() if count != 0: s = rt.Error_GetLastErrorMsg().decode() msg = f'Error in "{func.__name__}": {s}' rt.Error_Reset() raise RTreeError(msg) return result def free_returned_char_p(result, func, cargs): retvalue = ctypes.string_at(result) p = ctypes.cast(result, ctypes.POINTER(ctypes.c_void_p)) rt.Index_Free(p) return retvalue def free_error_msg_ptr(result, func, cargs): retvalue = ctypes.string_at(result) p = ctypes.cast(result, ctypes.POINTER(ctypes.c_void_p)) rt.Index_Free(p) return retvalue # load the shared library by looking in likely places rt = finder.load() rt.SIDX_Version.argtypes = [] rt.SIDX_Version.restype = ctypes.POINTER(ctypes.c_char) rt.SIDX_Version.errcheck = free_returned_char_p # type: ignore rt.Error_GetLastErrorNum.argtypes = [] rt.Error_GetLastErrorNum.restype = ctypes.c_int rt.Error_GetLastErrorMsg.argtypes = [] rt.Error_GetLastErrorMsg.restype = ctypes.POINTER(ctypes.c_char) rt.Error_GetLastErrorMsg.errcheck = free_error_msg_ptr # type: ignore rt.Error_GetLastErrorMethod.argtypes = [] rt.Error_GetLastErrorMethod.restype = ctypes.POINTER(ctypes.c_char) rt.Error_GetLastErrorMethod.errcheck = free_returned_char_p # type: ignore rt.Error_GetErrorCount.argtypes = [] rt.Error_GetErrorCount.restype = ctypes.c_int rt.Error_Reset.argtypes = [] rt.Error_Reset.restype = None rt.Index_Create.argtypes = [ctypes.c_void_p] rt.Index_Create.restype = ctypes.c_void_p rt.Index_Create.errcheck = check_void # type: ignore _nDataLength_size_t = True try: _major, _minor, _patch = ( int(part) for part in rt.SIDX_Version().decode("ascii").split(".") ) except (ValueError, UnicodeDecodeError): pass # weird version; assume latest ABI else: if (_major, _minor, _patch) < (1, 9, 0): # Headers had size_t*, but implementation had uint32_t* _nDataLength_size_t = False NEXTFUNC = ctypes.CFUNCTYPE( ctypes.c_int, ctypes.POINTER(ctypes.c_int64), ctypes.POINTER(ctypes.POINTER(ctypes.c_double)), ctypes.POINTER(ctypes.POINTER(ctypes.c_double)), ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), ctypes.POINTER(ctypes.c_size_t if _nDataLength_size_t else ctypes.c_uint32), ) rt.Index_CreateWithStream.argtypes = [ctypes.c_void_p, NEXTFUNC] rt.Index_CreateWithStream.restype = ctypes.c_void_p rt.Index_CreateWithStream.errcheck = check_void # type: ignore try: rt.Index_CreateWithArray.argtypes = [ ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint32, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ] rt.Index_CreateWithArray.restype = ctypes.c_void_p rt.Index_CreateWithArray.errcheck = check_void # type: ignore except AttributeError: pass rt.Index_Destroy.argtypes = [ctypes.c_void_p] rt.Index_Destroy.restype = None rt.Index_Destroy.errcheck = check_void_done # type: ignore rt.Index_GetProperties.argtypes = [ctypes.c_void_p] rt.Index_GetProperties.restype = ctypes.c_void_p rt.Index_GetProperties.errcheck = check_void # type: ignore rt.Index_DeleteData.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ] rt.Index_DeleteData.restype = ctypes.c_int rt.Index_DeleteData.errcheck = check_return # type: ignore rt.Index_InsertData.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_uint32, ] rt.Index_InsertData.restype = ctypes.c_int rt.Index_InsertData.errcheck = check_return # type: ignore rt.Index_GetBounds.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_double)), ctypes.POINTER(ctypes.POINTER(ctypes.c_double)), ctypes.POINTER(ctypes.c_uint32), ] rt.Index_GetBounds.restype = ctypes.c_int rt.Index_GetBounds.errcheck = check_value # type: ignore rt.Index_IsValid.argtypes = [ctypes.c_void_p] rt.Index_IsValid.restype = ctypes.c_int rt.Index_IsValid.errcheck = check_value # type: ignore rt.Index_Intersects_obj.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_Intersects_obj.restype = ctypes.c_int rt.Index_Intersects_obj.errcheck = check_return # type: ignore rt.Index_Intersects_id.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_Intersects_id.restype = ctypes.c_int rt.Index_Intersects_id.errcheck = check_return # type: ignore rt.Index_Intersects_count.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint64), ] rt.Index_NearestNeighbors_obj.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_NearestNeighbors_obj.restype = ctypes.c_int rt.Index_NearestNeighbors_obj.errcheck = check_return # type: ignore rt.Index_NearestNeighbors_id.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_NearestNeighbors_id.restype = ctypes.c_int rt.Index_NearestNeighbors_id.errcheck = check_return # type: ignore try: rt.Index_NearestNeighbors_id_v.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.c_int64, ctypes.c_uint32, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int64), ] rt.Index_NearestNeighbors_id_v.restype = ctypes.c_int rt.Index_NearestNeighbors_id_v.errcheck = check_return # type: ignore rt.Index_Intersects_id_v.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.c_uint32, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int64), ] rt.Index_Intersects_id_v.restype = ctypes.c_int rt.Index_Intersects_id_v.errcheck = check_return # type: ignore except AttributeError: pass rt.Index_GetLeaves.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.POINTER(ctypes.c_uint32)), ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_int64))), ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))), ctypes.POINTER(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))), ctypes.POINTER(ctypes.c_uint32), ] rt.Index_GetLeaves.restype = ctypes.c_int rt.Index_GetLeaves.errcheck = check_return # type: ignore rt.Index_DestroyObjResults.argtypes = [ ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.c_uint32, ] rt.Index_DestroyObjResults.restype = None rt.Index_DestroyObjResults.errcheck = check_void_done # type: ignore rt.Index_ClearBuffer.argtypes = [ctypes.c_void_p] rt.Index_ClearBuffer.restype = None rt.Index_ClearBuffer.errcheck = check_void_done # type: ignore rt.Index_Free.argtypes = [ctypes.POINTER(ctypes.c_void_p)] rt.Index_Free.restype = None rt.IndexItem_Destroy.argtypes = [ctypes.c_void_p] rt.IndexItem_Destroy.restype = None rt.IndexItem_Destroy.errcheck = check_void_done # type: ignore rt.IndexItem_GetData.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), ctypes.POINTER(ctypes.c_uint64), ] rt.IndexItem_GetData.restype = ctypes.c_int rt.IndexItem_GetData.errcheck = check_value # type: ignore rt.IndexItem_GetBounds.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_double)), ctypes.POINTER(ctypes.POINTER(ctypes.c_double)), ctypes.POINTER(ctypes.c_uint32), ] rt.IndexItem_GetBounds.restype = ctypes.c_int rt.IndexItem_GetBounds.errcheck = check_value # type: ignore rt.IndexItem_GetID.argtypes = [ctypes.c_void_p] rt.IndexItem_GetID.restype = ctypes.c_int64 rt.IndexItem_GetID.errcheck = check_value # type: ignore try: rt.Index_GetResultSetOffset.argtypes = [ctypes.c_void_p] rt.Index_GetResultSetOffset.restype = ctypes.c_int64 rt.Index_GetResultSetOffset.errcheck = check_value # type: ignore rt.Index_SetResultSetOffset.argtypes = [ctypes.c_void_p, ctypes.c_int64] rt.Index_SetResultSetOffset.restype = ctypes.c_int rt.Index_SetResultSetOffset.errcheck = check_return # type: ignore rt.Index_GetResultSetLimit.argtypes = [ctypes.c_void_p] rt.Index_GetResultSetLimit.restype = ctypes.c_int64 rt.Index_GetResultSetLimit.errcheck = check_value # type: ignore rt.Index_SetResultSetLimit.argtypes = [ctypes.c_void_p, ctypes.c_int64] rt.Index_SetResultSetLimit.restype = ctypes.c_int rt.Index_SetResultSetLimit.errcheck = check_return # type: ignore rt.Index_Flush.argtypes = [ctypes.c_void_p] rt.Index_Flush.restype = None rt.Index_Flush.errcheck = check_void_done # type: ignore rt.Index_Contains_obj.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_Contains_obj.restype = ctypes.c_int rt.Index_Contains_obj.errcheck = check_return # type: ignore rt.Index_Contains_id.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_Contains_id.restype = ctypes.c_int rt.Index_Contains_id.errcheck = check_return # type: ignore except AttributeError: pass rt.IndexProperty_Create.argtypes = [] rt.IndexProperty_Create.restype = ctypes.c_void_p rt.IndexProperty_Create.errcheck = check_void # type: ignore rt.IndexProperty_Destroy.argtypes = [ctypes.c_void_p] rt.IndexProperty_Destroy.restype = None rt.IndexProperty_Destroy.errcheck = check_void_done # type: ignore rt.IndexProperty_SetIndexType.argtypes = [ctypes.c_void_p, ctypes.c_int] rt.IndexProperty_SetIndexType.restype = ctypes.c_int rt.IndexProperty_SetIndexType.errcheck = check_return # type: ignore rt.IndexProperty_GetIndexType.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetIndexType.restype = ctypes.c_int rt.IndexProperty_GetIndexType.errcheck = check_value # type: ignore rt.IndexProperty_SetDimension.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetDimension.restype = ctypes.c_int rt.IndexProperty_SetDimension.errcheck = check_return # type: ignore rt.IndexProperty_GetDimension.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetDimension.restype = ctypes.c_uint32 rt.IndexProperty_GetDimension.errcheck = check_value # type: ignore rt.IndexProperty_SetIndexVariant.argtypes = [ctypes.c_void_p, ctypes.c_int] rt.IndexProperty_SetIndexVariant.restype = ctypes.c_int rt.IndexProperty_SetIndexVariant.errcheck = check_return # type: ignore rt.IndexProperty_GetIndexVariant.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetIndexVariant.restype = ctypes.c_int rt.IndexProperty_GetIndexVariant.errcheck = check_value # type: ignore rt.IndexProperty_SetIndexStorage.argtypes = [ctypes.c_void_p, ctypes.c_int] rt.IndexProperty_SetIndexStorage.restype = ctypes.c_int rt.IndexProperty_SetIndexStorage.errcheck = check_return # type: ignore rt.IndexProperty_GetIndexStorage.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetIndexStorage.restype = ctypes.c_int rt.IndexProperty_GetIndexStorage.errcheck = check_value # type: ignore rt.IndexProperty_SetIndexCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetIndexCapacity.restype = ctypes.c_int rt.IndexProperty_SetIndexCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetIndexCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetIndexCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetIndexCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetLeafCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetLeafCapacity.restype = ctypes.c_int rt.IndexProperty_SetLeafCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetLeafCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetLeafCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetLeafCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetPagesize.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetPagesize.restype = ctypes.c_int rt.IndexProperty_SetPagesize.errcheck = check_return # type: ignore rt.IndexProperty_GetPagesize.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetPagesize.restype = ctypes.c_uint32 rt.IndexProperty_GetPagesize.errcheck = check_value # type: ignore rt.IndexProperty_SetLeafPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetLeafPoolCapacity.restype = ctypes.c_int rt.IndexProperty_SetLeafPoolCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetLeafPoolCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetLeafPoolCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetLeafPoolCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetIndexPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetIndexPoolCapacity.restype = ctypes.c_int rt.IndexProperty_SetIndexPoolCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetIndexPoolCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetIndexPoolCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetIndexPoolCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetRegionPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetRegionPoolCapacity.restype = ctypes.c_int rt.IndexProperty_SetRegionPoolCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetRegionPoolCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetRegionPoolCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetRegionPoolCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetPointPoolCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetPointPoolCapacity.restype = ctypes.c_int rt.IndexProperty_SetPointPoolCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetPointPoolCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetPointPoolCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetPointPoolCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetBufferingCapacity.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetBufferingCapacity.restype = ctypes.c_int rt.IndexProperty_SetBufferingCapacity.errcheck = check_return # type: ignore rt.IndexProperty_GetBufferingCapacity.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetBufferingCapacity.restype = ctypes.c_uint32 rt.IndexProperty_GetBufferingCapacity.errcheck = check_value # type: ignore rt.IndexProperty_SetEnsureTightMBRs.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetEnsureTightMBRs.restype = ctypes.c_int rt.IndexProperty_SetEnsureTightMBRs.errcheck = check_return # type: ignore rt.IndexProperty_GetEnsureTightMBRs.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetEnsureTightMBRs.restype = ctypes.c_uint32 rt.IndexProperty_GetEnsureTightMBRs.errcheck = check_value # type: ignore rt.IndexProperty_SetOverwrite.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetOverwrite.restype = ctypes.c_int rt.IndexProperty_SetOverwrite.errcheck = check_return # type: ignore rt.IndexProperty_GetOverwrite.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetOverwrite.restype = ctypes.c_uint32 rt.IndexProperty_GetOverwrite.errcheck = check_value # type: ignore rt.IndexProperty_SetNearMinimumOverlapFactor.argtypes = [ ctypes.c_void_p, ctypes.c_uint32, ] rt.IndexProperty_SetNearMinimumOverlapFactor.restype = ctypes.c_int rt.IndexProperty_SetNearMinimumOverlapFactor.errcheck = check_return # type: ignore rt.IndexProperty_GetNearMinimumOverlapFactor.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetNearMinimumOverlapFactor.restype = ctypes.c_uint32 rt.IndexProperty_GetNearMinimumOverlapFactor.errcheck = check_value # type: ignore rt.IndexProperty_SetWriteThrough.argtypes = [ctypes.c_void_p, ctypes.c_uint32] rt.IndexProperty_SetWriteThrough.restype = ctypes.c_int rt.IndexProperty_SetWriteThrough.errcheck = check_return # type: ignore rt.IndexProperty_GetWriteThrough.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetWriteThrough.restype = ctypes.c_uint32 rt.IndexProperty_GetWriteThrough.errcheck = check_value # type: ignore rt.IndexProperty_SetFillFactor.argtypes = [ctypes.c_void_p, ctypes.c_double] rt.IndexProperty_SetFillFactor.restype = ctypes.c_int rt.IndexProperty_SetFillFactor.errcheck = check_return # type: ignore rt.IndexProperty_GetFillFactor.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetFillFactor.restype = ctypes.c_double rt.IndexProperty_GetFillFactor.errcheck = check_value # type: ignore rt.IndexProperty_SetSplitDistributionFactor.argtypes = [ ctypes.c_void_p, ctypes.c_double, ] rt.IndexProperty_SetSplitDistributionFactor.restype = ctypes.c_int rt.IndexProperty_SetSplitDistributionFactor.errcheck = check_return # type: ignore rt.IndexProperty_GetSplitDistributionFactor.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetSplitDistributionFactor.restype = ctypes.c_double rt.IndexProperty_GetSplitDistributionFactor.errcheck = check_value # type: ignore rt.IndexProperty_SetTPRHorizon.argtypes = [ctypes.c_void_p, ctypes.c_double] rt.IndexProperty_SetTPRHorizon.restype = ctypes.c_int rt.IndexProperty_SetTPRHorizon.errcheck = check_return # type: ignore rt.IndexProperty_GetTPRHorizon.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetTPRHorizon.restype = ctypes.c_double rt.IndexProperty_GetTPRHorizon.errcheck = check_value # type: ignore rt.IndexProperty_SetReinsertFactor.argtypes = [ctypes.c_void_p, ctypes.c_double] rt.IndexProperty_SetReinsertFactor.restype = ctypes.c_int rt.IndexProperty_SetReinsertFactor.errcheck = check_return # type: ignore rt.IndexProperty_GetReinsertFactor.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetReinsertFactor.restype = ctypes.c_double rt.IndexProperty_GetReinsertFactor.errcheck = check_value # type: ignore rt.IndexProperty_SetFileName.argtypes = [ctypes.c_void_p, ctypes.c_char_p] rt.IndexProperty_SetFileName.restype = ctypes.c_int rt.IndexProperty_SetFileName.errcheck = check_return # type: ignore rt.IndexProperty_GetFileName.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetFileName.errcheck = free_returned_char_p # type: ignore rt.IndexProperty_GetFileName.restype = ctypes.POINTER(ctypes.c_char) rt.IndexProperty_SetFileNameExtensionDat.argtypes = [ctypes.c_void_p, ctypes.c_char_p] rt.IndexProperty_SetFileNameExtensionDat.restype = ctypes.c_int rt.IndexProperty_SetFileNameExtensionDat.errcheck = check_return # type: ignore rt.IndexProperty_GetFileNameExtensionDat.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetFileNameExtensionDat.errcheck = free_returned_char_p # type: ignore rt.IndexProperty_GetFileNameExtensionDat.restype = ctypes.POINTER(ctypes.c_char) rt.IndexProperty_SetFileNameExtensionIdx.argtypes = [ctypes.c_void_p, ctypes.c_char_p] rt.IndexProperty_SetFileNameExtensionIdx.restype = ctypes.c_int rt.IndexProperty_SetFileNameExtensionIdx.errcheck = check_return # type: ignore rt.IndexProperty_GetFileNameExtensionIdx.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetFileNameExtensionIdx.errcheck = free_returned_char_p # type: ignore rt.IndexProperty_GetFileNameExtensionIdx.restype = ctypes.POINTER(ctypes.c_char) rt.IndexProperty_SetCustomStorageCallbacksSize.argtypes = [ ctypes.c_void_p, ctypes.c_uint32, ] rt.IndexProperty_SetCustomStorageCallbacksSize.restype = ctypes.c_int rt.IndexProperty_SetCustomStorageCallbacksSize.errcheck = check_return # type: ignore rt.IndexProperty_GetCustomStorageCallbacksSize.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetCustomStorageCallbacksSize.restype = ctypes.c_uint32 rt.IndexProperty_GetCustomStorageCallbacksSize.errcheck = check_value # type: ignore rt.IndexProperty_SetCustomStorageCallbacks.argtypes = [ctypes.c_void_p, ctypes.c_void_p] rt.IndexProperty_SetCustomStorageCallbacks.restype = ctypes.c_int rt.IndexProperty_SetCustomStorageCallbacks.errcheck = check_return # type: ignore rt.IndexProperty_GetCustomStorageCallbacks.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetCustomStorageCallbacks.restype = ctypes.c_void_p rt.IndexProperty_GetCustomStorageCallbacks.errcheck = check_value # type: ignore rt.IndexProperty_SetIndexID.argtypes = [ctypes.c_void_p, ctypes.c_int64] rt.IndexProperty_SetIndexID.restype = ctypes.c_int rt.IndexProperty_SetIndexID.errcheck = check_return # type: ignore rt.IndexProperty_GetIndexID.argtypes = [ctypes.c_void_p] rt.IndexProperty_GetIndexID.restype = ctypes.c_int64 rt.IndexProperty_GetIndexID.errcheck = check_value # type: ignore rt.SIDX_NewBuffer.argtypes = [ctypes.c_size_t] rt.SIDX_NewBuffer.restype = ctypes.c_void_p rt.SIDX_NewBuffer.errcheck = check_void # type: ignore rt.SIDX_DeleteBuffer.argtypes = [ctypes.c_void_p] rt.SIDX_DeleteBuffer.restype = None # TPR-Tree API try: rt.Index_InsertTPData.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_size_t, ] rt.Index_InsertTPData.restype = ctypes.c_int rt.Index_InsertTPData.errcheck = check_return # type: ignore rt.Index_DeleteTPData.argtypes = [ ctypes.c_void_p, ctypes.c_int64, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ] rt.Index_DeleteTPData.restype = ctypes.c_int rt.Index_DeleteTPData.errcheck = check_return # type: ignore rt.Index_TPIntersects_id.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_TPIntersects_id.restype = ctypes.c_int rt.Index_TPIntersects_id.errcheck = check_return # type: ignore rt.Index_TPIntersects_obj.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_TPIntersects_obj.restype = ctypes.c_int rt.Index_TPIntersects_obj.errcheck = check_return # type: ignore rt.Index_TPIntersects_count.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint64), ] rt.Index_TPIntersects_count.restype = ctypes.c_int rt.Index_TPIntersects_count.errcheck = check_return # type: ignore rt.Index_TPNearestNeighbors_id.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_TPNearestNeighbors_id.restype = ctypes.c_int rt.Index_TPNearestNeighbors_id.errcheck = check_return # type: ignore rt.Index_TPNearestNeighbors_obj.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_double, ctypes.c_double, ctypes.c_uint32, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p)), ctypes.POINTER(ctypes.c_uint64), ] rt.Index_TPNearestNeighbors_obj.restype = ctypes.c_int rt.Index_TPNearestNeighbors_obj.errcheck = check_return # type: ignore except AttributeError: pass rtree-1.4.0/rtree/exceptions.py000066400000000000000000000002041476215601300165120ustar00rootroot00000000000000from __future__ import annotations class RTreeError(Exception): "RTree exception, indicates a RTree-related error." pass rtree-1.4.0/rtree/finder.py000066400000000000000000000131131476215601300156030ustar00rootroot00000000000000""" Locate `libspatialindex` shared library and header files. """ from __future__ import annotations import ctypes import importlib.metadata import os import platform import sys from ctypes.util import find_library from pathlib import Path _cwd = Path(__file__).parent _sys_prefix = Path(sys.prefix) # generate a bunch of candidate locations where the # libspatialindex shared library *might* be hanging out _candidates = [] if "SPATIALINDEX_C_LIBRARY" in os.environ: _candidates.append(Path(os.environ["SPATIALINDEX_C_LIBRARY"])) _candidates += [_cwd / "lib", _cwd, Path("")] def load() -> ctypes.CDLL: """Load the `libspatialindex` shared library. :returns: Loaded shared library """ if os.name == "nt": # check the platform architecture if "64" in platform.architecture()[0]: arch = "64" else: arch = "32" lib_name = f"spatialindex_c-{arch}.dll" # add search paths for conda installs if (_sys_prefix / "conda-meta").exists() or "conda" in sys.version: _candidates.append(_sys_prefix / "Library" / "bin") # get the current PATH oldenv = os.environ.get("PATH", "").strip().rstrip(";") # run through our list of candidate locations for path in _candidates: if not path.exists(): continue # temporarily add the path to the PATH environment variable # so Windows can find additional DLL dependencies. os.environ["PATH"] = ";".join([str(path), oldenv]) try: rt = ctypes.cdll.LoadLibrary(str(path / lib_name)) if rt is not None: return rt except OSError: pass except BaseException as err: print(f"rtree.finder unexpected error: {err!s}", file=sys.stderr) finally: os.environ["PATH"] = oldenv raise OSError(f"could not find or load {lib_name}") elif os.name == "posix": # posix includes both mac and linux # use the extension for the specific platform if platform.system() == "Darwin": # macos shared libraries are `.dylib` lib_name = "libspatialindex_c.dylib" else: # linux shared libraries are `.so` lib_name = "libspatialindex_c.so" # add path for binary wheel prepared with cibuildwheel/auditwheel try: pkg_files = importlib.metadata.files("rtree") if pkg_files is not None: for file in pkg_files: # type: ignore if ( file.parent.name == "rtree.libs" and file.stem.startswith("libspatialindex") and ".so" in file.suffixes ): _candidates.insert(1, Path(file.locate())) break except importlib.metadata.PackageNotFoundError: pass # get the starting working directory cwd = os.getcwd() for cand in _candidates: if cand.is_dir(): # if our candidate is a directory use best guess path = cand target = cand / lib_name elif cand.is_file(): # if candidate is just a file use that path = cand.parent target = cand else: continue if not target.exists(): continue try: # move to the location we're checking os.chdir(path) # try loading the target file candidate rt = ctypes.cdll.LoadLibrary(str(target)) if rt is not None: return rt except BaseException as err: print( f"rtree.finder ({target}) unexpected error: {err!s}", file=sys.stderr, ) finally: os.chdir(cwd) try: # try loading library using LD path search pth = find_library("spatialindex_c") if pth is not None: return ctypes.cdll.LoadLibrary(pth) except BaseException: pass raise OSError("Could not load libspatialindex_c library") def get_include() -> str: """Return the directory that contains the spatialindex \\*.h files. :returns: Path to include directory or "" if not found. """ # check if was bundled with a binary wheel try: pkg_files = importlib.metadata.files("rtree") if pkg_files is not None: for path in pkg_files: # type: ignore if path.name == "SpatialIndex.h": return str(Path(path.locate()).parent.parent) except importlib.metadata.PackageNotFoundError: pass # look for this header file in a few directories path_to_spatialindex_h = Path("include/spatialindex/SpatialIndex.h") # check sys.prefix, e.g. conda's libspatialindex package if os.name == "nt": file = _sys_prefix / "Library" / path_to_spatialindex_h else: file = _sys_prefix / path_to_spatialindex_h if file.is_file(): return str(file.parent.parent) # check if relative to lib libdir = Path(load()._name).parent file = libdir.parent / path_to_spatialindex_h if file.is_file(): return str(file.parent.parent) # check system install file = Path("/usr") / path_to_spatialindex_h if file.is_file(): return str(file.parent.parent) # not found return "" rtree-1.4.0/rtree/index.py000066400000000000000000002661411476215601300154560ustar00rootroot00000000000000from __future__ import annotations import ctypes import os import os.path import pickle import pprint import warnings from collections.abc import Iterator, Sequence from typing import Any, Literal, overload from . import core from .exceptions import RTreeError RT_Memory = 0 RT_Disk = 1 RT_Custom = 2 RT_Linear = 0 RT_Quadratic = 1 RT_Star = 2 RT_RTree = 0 RT_MVRTree = 1 RT_TPRTree = 2 __c_api_version__ = core.rt.SIDX_Version() major_version, minor_version, patch_version = ( int(t) for t in __c_api_version__.decode("utf-8").split(".") ) if (major_version, minor_version, patch_version) < (1, 8, 5): raise Exception("Rtree requires libspatialindex 1.8.5 or greater") __all__ = ["Rtree", "Index", "Property"] def _get_bounds(handle, bounds_fn, interleaved): pp_mins = ctypes.pointer(ctypes.c_double()) pp_maxs = ctypes.pointer(ctypes.c_double()) dimension = ctypes.c_uint32(0) bounds_fn( handle, ctypes.byref(pp_mins), ctypes.byref(pp_maxs), ctypes.byref(dimension) ) if dimension.value == 0: return None mins = ctypes.cast(pp_mins, ctypes.POINTER(ctypes.c_double * dimension.value)) maxs = ctypes.cast(pp_maxs, ctypes.POINTER(ctypes.c_double * dimension.value)) results = [mins.contents[i] for i in range(dimension.value)] results += [maxs.contents[i] for i in range(dimension.value)] p_mins = ctypes.cast(mins, ctypes.POINTER(ctypes.c_double)) p_maxs = ctypes.cast(maxs, ctypes.POINTER(ctypes.c_double)) core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p))) core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p))) if interleaved: # they want bbox order. return results return Index.deinterleave(results) def _get_data(handle): length = ctypes.c_uint64(0) d = ctypes.pointer(ctypes.c_uint8(0)) core.rt.IndexItem_GetData(handle, ctypes.byref(d), ctypes.byref(length)) c = ctypes.cast(d, ctypes.POINTER(ctypes.c_void_p)) if length.value == 0: core.rt.Index_Free(c) return None s = ctypes.string_at(d, length.value) core.rt.Index_Free(c) return s class Index: """An R-Tree, MVR-Tree, or TPR-Tree indexing object""" def __init__(self, *args: Any, **kwargs: Any) -> None: """Creates a new index :param filename: The first argument in the constructor is assumed to be a filename determining that a file-based storage for the index should be used. If the first argument is not of type basestring, it is then assumed to be an instance of ICustomStorage or derived class. If the first argument is neither of type basestring nor an instance of ICustomStorage, it is then assumed to be an input index item stream. :param stream: If the first argument in the constructor is not of type basestring, it is assumed to be an iterable stream of data that will raise a StopIteration. It must be in the form defined by the :attr:`interleaved` attribute of the index. The following example would assume :attr:`interleaved` is False:: (id, (minx, maxx, miny, maxy, minz, maxz, ..., ..., mink, maxk), object) The object can be None, but you must put a place holder of ``None`` there. For a TPR-Tree, this would be in the form:: (id, ((minx, maxx, miny, maxy, ..., ..., mink, maxk), (minvx, maxvx, minvy, maxvy, ..., ..., minvk, maxvk), time), object) :param storage: If the first argument in the constructor is an instance of ICustomStorage then the given custom storage is used. :param interleaved: True or False, defaults to True. This parameter determines the coordinate order for all methods that take in coordinates. :param properties: An :class:`index.Property` object. This object sets both the creation and instantiation properties for the object and they are passed down into libspatialindex. A few properties are curried from instantiation parameters for you like ``pagesize`` and ``overwrite`` to ensure compatibility with previous versions of the library. All other properties must be set on the object. .. warning:: The coordinate ordering for all functions are sensitive the index's :attr:`interleaved` data member. If :attr:`interleaved` is False, the coordinates must be in the form [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If :attr:`interleaved` is True, the coordinates must be in the form [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax]. This also applies to velocities when using a TPR-Tree. A basic example :: >>> from rtree import index >>> p = index.Property() >>> idx = index.Index(properties=p) >>> idx # doctest: +NORMALIZE_WHITESPACE rtree.index.Index(bounds=[1.7976931348623157e+308, 1.7976931348623157e+308, -1.7976931348623157e+308, -1.7976931348623157e+308], size=0) Insert an item into the index:: >>> idx.insert(4321, ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... obj=42) Query:: >>> hits = idx.intersection((0, 0, 60, 60), objects=True) >>> for i in hits: ... if i.id == 4321: ... i.object ... i.bbox ... # doctest: +ELLIPSIS 42 [34.37768294..., 26.73758537..., 49.37768294..., 41.73758537...] Using custom serializers:: >>> class JSONIndex(index.Index): ... def dumps(self, obj): ... # This import is nested so that the doctest doesn't ... # require simplejson. ... import simplejson ... return simplejson.dumps(obj).encode('ascii') ... ... def loads(self, string): ... import simplejson ... return simplejson.loads(string.decode('ascii')) >>> stored_obj = {"nums": [23, 45], "letters": "abcd"} >>> json_idx = JSONIndex() >>> try: ... json_idx.insert(1, (0, 1, 0, 1), stored_obj) ... list(json_idx.nearest((0, 0), 1, ... objects="raw")) == [stored_obj] ... except ImportError: ... True True """ self.properties = kwargs.get("properties", Property()) if self.properties.type == RT_TPRTree and not hasattr( core.rt, "Index_InsertTPData" ): raise RuntimeError( "TPR-Tree type not supported with version of libspatialindex" ) # interleaved True gives 'bbox' order. self.interleaved = bool(kwargs.get("interleaved", True)) stream = None arrays = None basename = None storage = None if args: if isinstance(args[0], str) or isinstance(args[0], bytes): # they sent in a filename basename = args[0] # they sent in a filename, stream or filename, buffers if len(args) > 1: if isinstance(args[1], tuple): arrays = args[1] else: stream = args[1] elif isinstance(args[0], ICustomStorage): storage = args[0] # they sent in a storage, stream if len(args) > 1: stream = args[1] elif isinstance(args[0], tuple): arrays = args[0] else: stream = args[0] if basename: self.properties.storage = RT_Disk self.properties.filename = basename # check we can read the file f = str(basename) + "." + self.properties.idx_extension p = os.path.abspath(f) # assume if the file exists, we're not going to overwrite it # unless the user explicitly set the property to do so if os.path.exists(p): self.properties.overwrite = bool(kwargs.get("overwrite", False)) # assume we're fetching the first index_id. If the user # set it, we'll fetch that one. if not self.properties.overwrite: try: self.properties.index_id except RTreeError: self.properties.index_id = 1 d = os.path.dirname(p) if not os.access(d, os.W_OK): message = f"Unable to open file '{f}' for index storage" raise OSError(message) elif storage: self.properties.storage = RT_Custom if storage.hasData: self.properties.overwrite = bool(kwargs.get("overwrite", False)) if not self.properties.overwrite: try: self.properties.index_id except RTreeError: self.properties.index_id = 1 else: storage.clear() self.customstorage = storage storage.registerCallbacks(self.properties) else: self.properties.storage = RT_Memory ps = kwargs.get("pagesize", None) if ps: self.properties.pagesize = int(ps) if stream and self.properties.type == RT_RTree: self._exception = None self.handle = self._create_idx_from_stream(stream) if self._exception: raise self._exception elif arrays and self.properties.type == RT_RTree: self._exception = None try: self.handle = self._create_idx_from_array(*arrays) except NameError: raise NotImplementedError( "libspatialindex >= 2.1 needed for bulk insert" ) if self._exception: raise self._exception else: self.handle = IndexHandle(self.properties.handle) if stream: # Bulk insert not supported, so add one by one for item in stream: self.insert(*item) elif arrays: raise NotImplementedError("Bulk insert only supported for RTrees") def get_size(self) -> int: warnings.warn( "index.get_size() is deprecated, use len(index) instead", DeprecationWarning ) return len(self) def __len__(self) -> int: """The number of entries in the index. :return: number of entries """ try: return self.count(self.bounds) except RTreeError: return 0 def __repr__(self) -> str: return f"rtree.index.Index(bounds={self.bounds}, size={len(self)})" def __getstate__(self) -> dict[str, Any]: state = self.__dict__.copy() del state["handle"] return state def __setstate__(self, state: dict[str, Any]) -> None: self.__dict__.update(state) self.handle = IndexHandle(self.properties.handle) def dumps(self, obj: object) -> bytes: return pickle.dumps(obj) def loads(self, string: bytes) -> object: return pickle.loads(string) def close(self) -> None: """Force a flush of the index to storage. Renders index inaccessible.""" if self.handle: self.handle.destroy() self.handle = None else: raise OSError("Unclosable index") def flush(self) -> None: """Force a flush of the index to storage.""" if self.handle: self.handle.flush() def get_coordinate_pointers( self, coordinates: Sequence[float] ) -> tuple[float, float]: dimension = self.properties.dimension coordinates = list(coordinates) arr = ctypes.c_double * dimension mins = arr() # Point if len(coordinates) == dimension: mins[:] = coordinates maxs = mins # Bounding box else: maxs = arr() # Interleaved box if self.interleaved: p = coordinates[:dimension] q = coordinates[dimension:] # Non-interleaved box else: p = coordinates[::2] q = coordinates[1::2] mins[:] = p maxs[:] = q if not p <= q: raise RTreeError( "Coordinates must not have minimums more than maximums" ) return mins, maxs @staticmethod def _get_time_doubles(times): if times[0] > times[1]: raise RTreeError("Start time must be less than end time") t_start = ctypes.c_double(times[0]) t_end = ctypes.c_double(times[1]) return t_start, t_end def _serialize(self, obj): serialized = self.dumps(obj) size = len(serialized) d = ctypes.create_string_buffer(serialized) # d.value = serialized p = ctypes.pointer(d) # return serialized to keep it alive for the pointer. return size, ctypes.cast(p, ctypes.POINTER(ctypes.c_uint8)), serialized def set_result_limit(self, value): return core.rt.Index_SetResultSetOffset(self.handle, value) def get_result_limit(self): return core.rt.Index_GetResultSetOffset(self.handle) result_limit = property(get_result_limit, set_result_limit) def set_result_offset(self, value): return core.rt.Index_SetResultSetLimit(self.handle, value) def get_result_offset(self): return core.rt.Index_GetResultSetLimit(self.handle) result_offset = property(get_result_offset, set_result_offset) def insert(self, id: int, coordinates: Any, obj: object = None) -> None: """Inserts an item into the index with the given coordinates. :param id: A long integer that is the identifier for this index entry. IDs need not be unique to be inserted into the index, and it is up to the user to ensure they are unique if this is a requirement. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time value as a float. :param obj: a pickleable object. If not None, this object will be stored in the index with the :attr:`id`. The following example inserts an entry into the index with id `4321`, and the object it stores with that id is the number `42`. The coordinate ordering in this instance is the default (interleaved=True) ordering:: >>> from rtree import index >>> idx = index.Index() >>> idx.insert(4321, ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... obj=42) This example is inserting the same object for a TPR-Tree, additionally including a set of velocities at time `3`:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.Index(properties=p) # doctest: +SKIP >>> idx.insert(4321, ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... 3.0), ... obj=42) # doctest: +SKIP """ if self.properties.type == RT_TPRTree: # https://github.com/python/mypy/issues/6799 return self._insertTP(id, *coordinates, obj=obj) # type: ignore[misc] p_mins, p_maxs = self.get_coordinate_pointers(coordinates) data = ctypes.c_ubyte(0) size = 0 pyserialized = None if obj is not None: size, data, pyserialized = self._serialize(obj) core.rt.Index_InsertData( self.handle, id, p_mins, p_maxs, self.properties.dimension, data, size ) add = insert def _insertTP( self, id: int, coordinates: Sequence[float], velocities: Sequence[float], time: float, obj: object = None, ) -> None: p_mins, p_maxs = self.get_coordinate_pointers(coordinates) pv_mins, pv_maxs = self.get_coordinate_pointers(velocities) # End time isn't used t_start, t_end = self._get_time_doubles((time, time + 1)) data = ctypes.c_ubyte(0) size = 0 if obj is not None: size, data, _ = self._serialize(obj) core.rt.Index_InsertTPData( self.handle, id, p_mins, p_maxs, pv_mins, pv_maxs, t_start, t_end, self.properties.dimension, data, size, ) def count(self, coordinates: Any) -> int: """Return number of objects that intersect the given coordinates. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the time range as a float. The following example queries the index for any objects any objects that were stored in the index intersect the bounds given in the coordinates:: >>> from rtree import index >>> idx = index.Index() >>> idx.insert(4321, ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... obj=42) >>> print(idx.count((0, 0, 60, 60))) 1 This example is similar for a TPR-Tree:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.Index(properties=p) # doctest: +SKIP >>> idx.insert(4321, ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... 3.0), ... obj=42) # doctest: +SKIP >>> print(idx.count(((0, 0, 60, 60), (0, 0, 0, 0), (3, 5)))) ... # doctest: +SKIP 1 """ if self.properties.type == RT_TPRTree: return self._countTP(*coordinates) p_mins, p_maxs = self.get_coordinate_pointers(coordinates) p_num_results = ctypes.c_uint64(0) core.rt.Index_Intersects_count( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(p_num_results), ) return p_num_results.value def _countTP( self, coordinates: Sequence[float], velocities: Sequence[float], times: float ) -> int: p_mins, p_maxs = self.get_coordinate_pointers(coordinates) pv_mins, pv_maxs = self.get_coordinate_pointers(velocities) t_start, t_end = self._get_time_doubles(times) p_num_results = ctypes.c_uint64(0) core.rt.Index_TPIntersects_count( self.handle, p_mins, p_maxs, pv_mins, pv_maxs, t_start, t_end, self.properties.dimension, ctypes.byref(p_num_results), ) return p_num_results.value @overload def contains(self, coordinates: Any, objects: Literal[True]) -> Iterator[Item]: ... @overload def contains( self, coordinates: Any, objects: Literal[False] = False ) -> Iterator[int] | None: ... @overload def contains( self, coordinates: Any, objects: Literal["raw"] ) -> Iterator[object]: ... def contains( self, coordinates: Any, objects: bool | Literal["raw"] = False ) -> Iterator[Item | int | object] | None: """Return ids or objects in the index that contains within the given coordinates. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. :param objects: If True, the intersection method will return index objects that were pickled when they were stored with each index entry, as well as the id and bounds of the index entries. If 'raw', the objects will be returned without the :class:`rtree.index.Item` wrapper. The following example queries the index for any objects any objects that were stored in the index intersect the bounds given in the coordinates:: >>> from rtree import index >>> idx = index.Index() >>> idx.insert(4321, ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... obj=42) >>> hits = list(idx.contains((0, 0, 60, 60), objects=True)) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +SKIP >>> [(item.object, item.bbox) for item in hits if item.id == 4321] ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +SKIP [(42, [34.37768294..., 26.73758537..., 49.37768294..., 41.73758537...])] If the :class:`rtree.index.Item` wrapper is not used, it is faster to request the 'raw' objects:: >>> list(idx.contains((0, 0, 60, 60), objects="raw")) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +SKIP [42] """ if objects: return self._contains_obj(coordinates, objects) p_mins, p_maxs = self.get_coordinate_pointers(coordinates) p_num_results = ctypes.c_uint64(0) it = ctypes.pointer(ctypes.c_int64()) try: core.rt.Index_Contains_id except AttributeError: return None core.rt.Index_Contains_id( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(it), ctypes.byref(p_num_results), ) return self._get_ids(it, p_num_results.value) def __and__(self, other: Index) -> Index: """Take the intersection of two Index objects. :param other: another index :return: a new index :raises AssertionError: if self and other have different interleave or dimension """ assert self.interleaved == other.interleaved assert self.properties.dimension == other.properties.dimension i = 0 new_idx = Index(interleaved=self.interleaved, properties=self.properties) # For each Item in self... for item1 in self.intersection(self.bounds, objects=True): if self.interleaved: # For each Item in other that intersects... for item2 in other.intersection(item1.bbox, objects=True): # Compute the intersection bounding box bbox = [] for j in range(len(item1.bbox)): if j < len(item1.bbox) // 2: bbox.append(max(item1.bbox[j], item2.bbox[j])) else: bbox.append(min(item1.bbox[j], item2.bbox[j])) new_idx.insert(i, bbox, (item1.object, item2.object)) i += 1 else: # For each Item in other that intersects... for item2 in other.intersection(item1.bounds, objects=True): # Compute the intersection bounding box bounds = [] for j in range(len(item1.bounds)): if j % 2 == 0: bounds.append(max(item1.bounds[j], item2.bounds[j])) else: bounds.append(min(item1.bounds[j], item2.bounds[j])) new_idx.insert(i, bounds, (item1.object, item2.object)) i += 1 return new_idx def __or__(self, other: Index) -> Index: """Take the union of two Index objects. :param other: another index :return: a new index :raises AssertionError: if self and other have different interleave or dimension """ assert self.interleaved == other.interleaved assert self.properties.dimension == other.properties.dimension new_idx = Index(interleaved=self.interleaved, properties=self.properties) # For each index... for old_idx in [self, other]: # For each item... for item in old_idx.intersection(old_idx.bounds, objects=True): if self.interleaved: new_idx.insert(item.id, item.bbox, item.object) else: new_idx.insert(item.id, item.bounds, item.object) return new_idx @overload def intersection( self, coordinates: Any, objects: Literal[True] ) -> Iterator[Item]: ... @overload def intersection( self, coordinates: Any, objects: Literal[False] = False ) -> Iterator[int]: ... @overload def intersection( self, coordinates: Any, objects: Literal["raw"] ) -> Iterator[object]: ... def intersection( self, coordinates: Any, objects: bool | Literal["raw"] = False ) -> Iterator[Item | int | object]: """Return ids or objects in the index that intersect the given coordinates. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the time range as a float. :param objects: If True, the intersection method will return index objects that were pickled when they were stored with each index entry, as well as the id and bounds of the index entries. If 'raw', the objects will be returned without the :class:`rtree.index.Item` wrapper. The following example queries the index for any objects any objects that were stored in the index intersect the bounds given in the coordinates:: >>> from rtree import index >>> idx = index.Index() >>> idx.insert(4321, ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... obj=42) >>> hits = list(idx.intersection((0, 0, 60, 60), objects=True)) >>> [(item.object, item.bbox) for item in hits if item.id == 4321] ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS [(42, [34.37768294..., 26.73758537..., 49.37768294..., 41.73758537...])] If the :class:`rtree.index.Item` wrapper is not used, it is faster to request the 'raw' objects:: >>> list(idx.intersection((0, 0, 60, 60), objects="raw")) [42] Similar for the TPR-Tree:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.Index(properties=p) # doctest: +SKIP >>> idx.insert(4321, ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... 3.0), ... obj=42) # doctest: +SKIP >>> hits = list(idx.intersection( ... ((0, 0, 60, 60), (0, 0, 0, 0), (3, 5)), objects=True)) ... # doctest: +SKIP >>> [(item.object, item.bbox) for item in hits if item.id == 4321] ... # doctest: +SKIP [(42, [34.37768294..., 26.73758537..., 49.37768294..., 41.73758537...])] """ if self.properties.type == RT_TPRTree: # https://github.com/python/mypy/issues/6799 return self._intersectionTP( # type: ignore[misc] *coordinates, objects=objects ) if objects: return self._intersection_obj(coordinates, objects) p_mins, p_maxs = self.get_coordinate_pointers(coordinates) p_num_results = ctypes.c_uint64(0) it = ctypes.pointer(ctypes.c_int64()) core.rt.Index_Intersects_id( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(it), ctypes.byref(p_num_results), ) return self._get_ids(it, p_num_results.value) def _intersectionTP(self, coordinates, velocities, times, objects=False): p_mins, p_maxs = self.get_coordinate_pointers(coordinates) pv_mins, pv_maxs = self.get_coordinate_pointers(velocities) t_start, t_end = self._get_time_doubles(times) p_num_results = ctypes.c_uint64(0) if objects: call = core.rt.Index_TPIntersects_obj it = ctypes.pointer(ctypes.c_void_p()) else: call = core.rt.Index_TPIntersects_id it = ctypes.pointer(ctypes.c_int64()) call( self.handle, p_mins, p_maxs, pv_mins, pv_maxs, t_start, t_end, self.properties.dimension, ctypes.byref(it), ctypes.byref(p_num_results), ) if objects: return self._get_objects(it, p_num_results.value, objects) else: return self._get_ids(it, p_num_results.value) def _intersection_obj(self, coordinates, objects): p_mins, p_maxs = self.get_coordinate_pointers(coordinates) p_num_results = ctypes.c_uint64(0) it = ctypes.pointer(ctypes.c_void_p()) core.rt.Index_Intersects_obj( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(it), ctypes.byref(p_num_results), ) return self._get_objects(it, p_num_results.value, objects) def _contains_obj(self, coordinates: Any, objects): p_mins, p_maxs = self.get_coordinate_pointers(coordinates) p_num_results = ctypes.c_uint64(0) it = ctypes.pointer(ctypes.c_void_p()) try: core.rt.Index_Contains_obj except AttributeError: return None core.rt.Index_Contains_obj( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(it), ctypes.byref(p_num_results), ) return self._get_objects(it, p_num_results.value, objects) def _get_objects(self, it, num_results, objects): # take the pointer, yield the result objects and free items = ctypes.cast( it, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p * num_results)) ) its = ctypes.cast(items, ctypes.POINTER(ctypes.POINTER(ctypes.c_void_p))) try: if objects != "raw": for i in range(num_results): yield Item(self.loads, items[i]) else: for i in range(num_results): data = _get_data(items[i]) if data is None: yield data else: yield self.loads(data) finally: core.rt.Index_DestroyObjResults(its, num_results) def _get_ids(self, it, num_results): # take the pointer, yield the results and free items = ctypes.cast(it, ctypes.POINTER(ctypes.c_int64 * num_results)) its = ctypes.cast(items, ctypes.POINTER(ctypes.c_void_p)) try: for i in range(num_results): yield items.contents[i] finally: core.rt.Index_Free(its) def _nearest_obj(self, coordinates, num_results, objects): p_mins, p_maxs = self.get_coordinate_pointers(coordinates) p_num_results = ctypes.pointer(ctypes.c_uint64(num_results)) it = ctypes.pointer(ctypes.c_void_p()) core.rt.Index_NearestNeighbors_obj( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(it), p_num_results, ) return self._get_objects(it, p_num_results.contents.value, objects) @overload def nearest( self, coordinates: Any, num_results: int, objects: Literal[True] ) -> Iterator[Item]: ... @overload def nearest( self, coordinates: Any, num_results: int, objects: Literal[False] = False ) -> Iterator[int]: ... @overload def nearest( self, coordinates: Any, num_results: int, objects: Literal["raw"] ) -> Iterator[object]: ... def nearest( self, coordinates: Any, num_results: int = 1, objects: bool | Literal["raw"] = False, ) -> Iterator[Item | int | object]: """Returns the ``k``-nearest objects to the given coordinates. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the time range as a float. :param num_results: The number of results to return nearest to the given coordinates. If two index entries are equidistant, *both* are returned. This property means that :attr:`num_results` may return more items than specified :param objects: If True, the nearest method will return index objects that were pickled when they were stored with each index entry, as well as the id and bounds of the index entries. If 'raw', it will return the object as entered into the database without the :class:`rtree.index.Item` wrapper. .. warning:: This is currently not implemented for the TPR-Tree. Example of finding the three items nearest to this one:: >>> from rtree import index >>> idx = index.Index() >>> idx.insert(4321, (34.37, 26.73, 49.37, 41.73), obj=42) >>> hits = idx.nearest((0, 0, 10, 10), 3, objects=True) """ if self.properties.type == RT_TPRTree: # https://github.com/python/mypy/issues/6799 return self._nearestTP(*coordinates, objects=objects) # type: ignore[misc] if objects: return self._nearest_obj(coordinates, num_results, objects) p_mins, p_maxs = self.get_coordinate_pointers(coordinates) # p_num_results is an input and output for C++ lib # as an input it says "get n closest neighbors" # but if multiple neighbors are at the same distance, both # will be returned # so the number of returned neighbors may be > p_num_results # thus p_num_results.contents.value gets set as an output by the # C++ lib to indicate the actual number of results for # _get_ids to use p_num_results = ctypes.pointer(ctypes.c_uint64(num_results)) it = ctypes.pointer(ctypes.c_int64()) core.rt.Index_NearestNeighbors_id( self.handle, p_mins, p_maxs, self.properties.dimension, ctypes.byref(it), p_num_results, ) return self._get_ids(it, p_num_results.contents.value) def intersection_v(self, mins, maxs): """Bulk intersection query for obtaining the ids of entries which intersect with the provided bounding boxes. The return value is a tuple consisting of two 1D NumPy arrays: one of intersecting ids and another containing the counts for each bounding box. :param mins: A NumPy array of shape `(n, d)` containing the minima to query. :param maxs: A NumPy array of shape `(n, d)` containing the maxima to query. """ import numpy as np assert mins.shape == maxs.shape assert mins.strides == maxs.strides # Cast mins = mins.astype(np.float64) maxs = maxs.astype(np.float64) # Extract counts n, d = mins.shape # Compute strides d_i_stri = mins.strides[0] // mins.itemsize d_j_stri = mins.strides[1] // mins.itemsize ids = np.empty(2 * n, dtype=np.int64) counts = np.empty(n, dtype=np.uint64) nr = ctypes.c_int64(0) offn, offi = 0, 0 while True: core.rt.Index_Intersects_id_v( self.handle, n - offn, d, len(ids), d_i_stri, d_j_stri, mins[offn:].ctypes.data, maxs[offn:].ctypes.data, ids[offi:].ctypes.data, counts[offn:].ctypes.data, ctypes.byref(nr), ) # If we got the expected nuber of results then return if nr.value == n - offn: return ids[: counts.sum()], counts # Otherwise, if our array is too small then resize else: offi += counts[offn : offn + nr.value].sum() offn += nr.value ids = ids.resize(2 * len(ids), refcheck=False) def nearest_v( self, mins, maxs, num_results=1, max_dists=None, strict=False, return_max_dists=False, ): """Bulk ``k``-nearest query for the given bounding boxes. The return value is a tuple consisting of, by default, two 1D NumPy arrays: one of intersecting ids and another containing the counts for each bounding box. :param mins: A NumPy array of shape `(n, d)` containing the minima to query. :param maxs: A NumPy array of shape `(n, d)` containing the maxima to query. :param num_results: The maximum number of neighbors to return for each bounding box. If there are multiple equidistant furthest neighbors then, by default, they are *all* returned. Hence, the actual number of results can be greater than requested. :param max_dists: Optional; a NumPy array of shape `(n,)` containing the maximum distance to consider for each bounding box. :param strict: If True then each point will never return more than `num_results` even in cases of equidistant furthest neighbors. :param return_max_dists: If True, the distance of the furthest neighbor for each bounding box will also be returned. """ import numpy as np assert mins.shape == maxs.shape assert mins.strides == maxs.strides # Cast mins = mins.astype(np.float64) maxs = maxs.astype(np.float64) # Extract counts n, d = mins.shape # Compute strides d_i_stri = mins.strides[0] // mins.itemsize d_j_stri = mins.strides[1] // mins.itemsize ids = np.empty(n * num_results, dtype=np.int64) counts = np.empty(n, dtype=np.uint64) nr = ctypes.c_int64(0) offn, offi = 0, 0 if max_dists is not None: assert len(max_dists) == n dists = max_dists.astype(np.float64).copy() elif return_max_dists: dists = np.zeros(n) else: dists = None while True: core.rt.Index_NearestNeighbors_id_v( self.handle, num_results if not strict else -num_results, n - offn, d, len(ids), d_i_stri, d_j_stri, mins[offn:].ctypes.data, maxs[offn:].ctypes.data, ids[offi:].ctypes.data, counts[offn:].ctypes.data, dists[offn:].ctypes.data if dists is not None else None, ctypes.byref(nr), ) # If we got the expected nuber of results then return if nr.value == n - offn: if return_max_dists: return ids[: counts.sum()], counts, dists else: return ids[: counts.sum()], counts # Otherwise, if our array is too small then resize else: offi += counts[offn : offn + nr.value].sum() offn += nr.value ids = ids.resize(2 * len(ids), refcheck=False) def _nearestTP(self, coordinates, velocities, times, num_results=1, objects=False): p_mins, p_maxs = self.get_coordinate_pointers(coordinates) pv_mins, pv_maxs = self.get_coordinate_pointers(velocities) t_start, t_end = self._get_time_doubles(times) p_num_results = ctypes.pointer(ctypes.c_uint64(num_results)) if objects: it = ctypes.pointer(ctypes.c_void_p()) call = core.rt.Index_TPNearestNeighbors_obj else: it = ctypes.pointer(ctypes.c_int64()) call = core.rt.Index_TPNearestNeighbors_id call( self.handle, p_mins, p_maxs, pv_mins, pv_maxs, t_start, t_end, self.properties.dimension, ctypes.byref(it), p_num_results, ) if objects: return self._get_objects(it, p_num_results.contents.value, objects) else: return self._get_ids(it, p_num_results.contents.value) def get_bounds(self, coordinate_interleaved=None): """Returns the bounds of the index :param coordinate_interleaved: If True, the coordinates are turned in the form [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax], otherwise they are returned as [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If not specified, the :attr:`interleaved` member of the index is used, which defaults to True. """ if coordinate_interleaved is None: coordinate_interleaved = self.interleaved return _get_bounds(self.handle, core.rt.Index_GetBounds, coordinate_interleaved) bounds = property(get_bounds) def delete(self, id: int, coordinates: Any) -> None: """Deletes an item from the index with the given ``'id'`` and coordinates given by the ``coordinates`` sequence. As the index can contain multiple items with the same ID and coordinates, deletion is not guaranteed to delete all items in the index with the given ID and coordinates. :param id: A long integer ID for the entry, which need not be unique. The index can contain multiple entries with identical IDs and coordinates. Uniqueness of items should be enforced at the application level by the user. :param coordinates: Dimension * 2 coordinate pairs, representing the min and max coordinates in each dimension of the item to be deleted from the index. Their ordering will depend on the index's :attr:`interleaved` data member. These are not the coordinates of a space containing the item, but those of the item itself. Together with the id parameter, they determine which item will be deleted. This may be an object that satisfies the numpy array protocol. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the original time the object was inserted and the current time as a float. Example:: >>> from rtree import index >>> idx = index.Index() >>> idx.delete(4321, ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734)) For the TPR-Tree:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.Index(properties=p) # doctest: +SKIP >>> idx.delete(4321, ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... (3.0, 5.0))) # doctest: +SKIP """ if self.properties.type == RT_TPRTree: return self._deleteTP(id, *coordinates) p_mins, p_maxs = self.get_coordinate_pointers(coordinates) core.rt.Index_DeleteData( self.handle, id, p_mins, p_maxs, self.properties.dimension ) def _deleteTP( self, id: int, coordinates: Sequence[float], velocities: Sequence[float], times: float, ) -> None: p_mins, p_maxs = self.get_coordinate_pointers(coordinates) pv_mins, pv_maxs = self.get_coordinate_pointers(velocities) t_start, t_end = self._get_time_doubles(times) core.rt.Index_DeleteTPData( self.handle, id, p_mins, p_maxs, pv_mins, pv_maxs, t_start, t_end, self.properties.dimension, ) def valid(self) -> bool: return bool(core.rt.Index_IsValid(self.handle)) def clearBuffer(self): return core.rt.Index_ClearBuffer(self.handle) @classmethod def deinterleave(self, interleaved: Sequence[object]) -> list[object]: """ [xmin, ymin, xmax, ymax] => [xmin, xmax, ymin, ymax] >>> Index.deinterleave([0, 10, 1, 11]) [0, 1, 10, 11] >>> Index.deinterleave([0, 1, 2, 10, 11, 12]) [0, 10, 1, 11, 2, 12] """ assert len(interleaved) % 2 == 0, "must be a pairwise list" dimension = len(interleaved) // 2 di = [] for i in range(dimension): di.extend([interleaved[i], interleaved[i + dimension]]) return di @classmethod def interleave(self, deinterleaved: Sequence[float]) -> list[float]: """ [xmin, xmax, ymin, ymax, zmin, zmax] => [xmin, ymin, zmin, xmax, ymax, zmax] >>> Index.interleave([0, 1, 10, 11]) [0, 10, 1, 11] >>> Index.interleave([0, 10, 1, 11, 2, 12]) [0, 1, 2, 10, 11, 12] >>> Index.interleave((-1, 1, 58, 62, 22, 24)) [-1, 58, 22, 1, 62, 24] """ assert len(deinterleaved) % 2 == 0, "must be a pairwise list" # dimension = len(deinterleaved) / 2 interleaved = [] for i in range(2): interleaved.extend( [deinterleaved[i + j] for j in range(0, len(deinterleaved), 2)] ) return interleaved def _create_idx_from_stream(self, stream): """This function is used to instantiate the index given an iterable stream of data.""" stream_iter = iter(stream) dimension = self.properties.dimension darray = ctypes.c_double * dimension mins = darray() maxs = darray() no_data = ctypes.cast( ctypes.pointer(ctypes.c_ubyte(0)), ctypes.POINTER(ctypes.c_ubyte) ) def py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length): """This function must fill pointers to individual entries that will be added to the index. The C API will actually call this function to fill out the pointers. If this function returns anything other than 0, it is assumed that the stream of data is done.""" try: p_id[0], coordinates, obj = next(stream_iter) except StopIteration: # we're done return -1 except Exception as exc: self._exception = exc return -1 if self.interleaved: mins[:] = coordinates[:dimension] maxs[:] = coordinates[dimension:] else: mins[:] = coordinates[::2] maxs[:] = coordinates[1::2] p_mins[0] = mins p_maxs[0] = maxs # set the dimension p_dimension[0] = dimension if obj is None: p_data[0] = no_data p_length[0] = 0 else: p_length[0], data, _ = self._serialize(obj) p_data[0] = ctypes.cast(data, ctypes.POINTER(ctypes.c_ubyte)) return 0 stream = core.NEXTFUNC(py_next_item) return IndexStreamHandle(self.properties.handle, stream) def _create_idx_from_array(self, ibuf, minbuf, maxbuf): assert len(ibuf) == len(minbuf) assert len(ibuf) == len(maxbuf) assert minbuf.strides == maxbuf.strides # Cast ibuf = ibuf.astype(int) minbuf = minbuf.astype(float) maxbuf = maxbuf.astype(float) # Extract counts n, d = minbuf.shape # Compute strides i_stri = ibuf.strides[0] // 8 d_i_stri = minbuf.strides[0] // 8 d_j_stri = minbuf.strides[1] // 8 return IndexArrayHandle( self.properties.handle, n, d, i_stri, d_i_stri, d_j_stri, ibuf.ctypes.data, minbuf.ctypes.data, maxbuf.ctypes.data, ) def leaves(self): leaf_node_count = ctypes.c_uint32() p_leafsizes = ctypes.pointer(ctypes.c_uint32()) p_leafids = ctypes.pointer(ctypes.c_int64()) pp_childids = ctypes.pointer(ctypes.pointer(ctypes.c_int64())) pp_mins = ctypes.pointer(ctypes.pointer(ctypes.c_double())) pp_maxs = ctypes.pointer(ctypes.pointer(ctypes.c_double())) dimension = ctypes.c_uint32(0) core.rt.Index_GetLeaves( self.handle, ctypes.byref(leaf_node_count), ctypes.byref(p_leafsizes), ctypes.byref(p_leafids), ctypes.byref(pp_childids), ctypes.byref(pp_mins), ctypes.byref(pp_maxs), ctypes.byref(dimension), ) output = [] count = leaf_node_count.value sizes = ctypes.cast(p_leafsizes, ctypes.POINTER(ctypes.c_uint32 * count)) ids = ctypes.cast(p_leafids, ctypes.POINTER(ctypes.c_int64 * count)) child = ctypes.cast( pp_childids, ctypes.POINTER(ctypes.POINTER(ctypes.c_int64) * count) ) mins = ctypes.cast( pp_mins, ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count) ) maxs = ctypes.cast( pp_maxs, ctypes.POINTER(ctypes.POINTER(ctypes.c_double) * count) ) for i in range(count): p_child_ids = child.contents[i] id = ids.contents[i] size = sizes.contents[i] child_ids_array = ctypes.cast( p_child_ids, ctypes.POINTER(ctypes.c_int64 * size) ) child_ids = [] for j in range(size): child_ids.append(child_ids_array.contents[j]) # free the child ids list core.rt.Index_Free( ctypes.cast(p_child_ids, ctypes.POINTER(ctypes.c_void_p)) ) p_mins = mins.contents[i] p_maxs = maxs.contents[i] p_mins = ctypes.cast( p_mins, ctypes.POINTER(ctypes.c_double * dimension.value) ) p_maxs = ctypes.cast( p_maxs, ctypes.POINTER(ctypes.c_double * dimension.value) ) bounds = [] bounds = [p_mins.contents[i] for i in range(dimension.value)] bounds += [p_maxs.contents[i] for i in range(dimension.value)] # free the bounds p_mins = ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_double)) p_maxs = ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_double)) core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p))) core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p))) output.append((id, child_ids, bounds)) return output # An alias to preserve backward compatibility Rtree = Index class Item: """A container for index entries""" __slots__ = ("handle", "owned", "id", "object", "bounds") def __init__(self, loads, handle, owned=False) -> None: """There should be no reason to instantiate these yourself. Items are created automatically when you call :meth:`rtree.index.Index.intersection` (or other index querying methods) with objects=True given the parameters of the function.""" if handle: self.handle = handle self.owned = owned self.id = core.rt.IndexItem_GetID(self.handle) self.object = None self.object = self.get_object(loads) self.bounds = _get_bounds(self.handle, core.rt.IndexItem_GetBounds, False) def __lt__(self, other: Item) -> bool: return self.id < other.id def __gt__(self, other: Item) -> bool: return self.id > other.id @property def bbox(self) -> list[float]: """Returns the bounding box of the index entry""" return Index.interleave(self.bounds) def get_object(self, loads): # short circuit this so we only do it at construction time if self.object is not None: return self.object data = _get_data(self.handle) if data is None: return None return loads(data) class InvalidHandleException(Exception): """Handle has been destroyed and can no longer be used""" class Handle: def __init__(self, *args: Any, **kwargs: Any) -> None: self._ptr = self._create(*args, **kwargs) def _create(self, *args: Any, **kwargs: Any): raise NotImplementedError def _destroy(self, ptr): raise NotImplementedError def destroy(self) -> None: try: if self._ptr is not None: self._destroy(self._ptr) self._ptr = None except AttributeError: pass @property def _as_parameter_(self): if self._ptr is None: raise InvalidHandleException return self._ptr def __del__(self) -> None: try: self.destroy() except NameError: # The core.py model doesn't have # core.rt available anymore and it was tore # down. We don't want to try to do anything # in that instance return class IndexHandle(Handle): _create = core.rt.Index_Create _destroy = core.rt.Index_Destroy def flush(self) -> None: try: core.rt.Index_Flush if self._ptr is not None: core.rt.Index_Flush(self._ptr) except AttributeError: pass class IndexStreamHandle(IndexHandle): _create = core.rt.Index_CreateWithStream try: class IndexArrayHandle(IndexHandle): _create = core.rt.Index_CreateWithArray except AttributeError: pass class PropertyHandle(Handle): _create = core.rt.IndexProperty_Create _destroy = core.rt.IndexProperty_Destroy class Property: """An index property object is a container that contains a number of settable index properties. Many of these properties must be set at index creation times, while others can be used to adjust performance or behavior.""" pkeys = ( "buffering_capacity", "custom_storage_callbacks", "custom_storage_callbacks_size", "dat_extension", "dimension", "filename", "fill_factor", "idx_extension", "index_capacity", "index_id", "leaf_capacity", "near_minimum_overlap_factor", "overwrite", "pagesize", "point_pool_capacity", "region_pool_capacity", "reinsert_factor", "split_distribution_factor", "storage", "tight_mbr", "tpr_horizon", "type", "variant", "writethrough", ) def __init__(self, handle=None, owned: bool = True, **kwargs: Any) -> None: if handle is None: handle = PropertyHandle() self.handle = handle self.initialize_from_dict(kwargs) def initialize_from_dict(self, state: dict[str, Any]) -> None: for k, v in state.items(): if v is not None: setattr(self, k, v) # Consistency checks if "near_minimum_overlap_factor" not in state: nmof = self.near_minimum_overlap_factor ilc = min(self.index_capacity, self.leaf_capacity) if nmof >= ilc: self.near_minimum_overlap_factor = ilc // 3 + 1 def __getstate__(self) -> dict[Any, Any]: return self.as_dict() def __setstate__(self, state): self.handle = PropertyHandle() self.initialize_from_dict(state) def as_dict(self) -> dict[str, Any]: d = {} for k in self.pkeys: try: v = getattr(self, k) except RTreeError: v = None d[k] = v return d def __repr__(self) -> str: return repr(self.as_dict()) def __str__(self) -> str: return pprint.pformat(self.as_dict()) def get_index_type(self) -> int: try: return self._type except AttributeError: type = core.rt.IndexProperty_GetIndexType(self.handle) self._type: int = type return type def set_index_type(self, value: int) -> None: self._type = value return core.rt.IndexProperty_SetIndexType(self.handle, value) type = property(get_index_type, set_index_type) """Index type. Valid index type values are :data:`RT_RTree`, :data:`RT_MVTree`, or :data:`RT_TPRTree`. Only RT_RTree (the default) is practically supported at this time.""" def get_variant(self) -> int: return core.rt.IndexProperty_GetIndexVariant(self.handle) def set_variant(self, value: int) -> None: return core.rt.IndexProperty_SetIndexVariant(self.handle, value) variant = property(get_variant, set_variant) """Index variant. Valid index variant values are :data:`RT_Linear`, :data:`RT_Quadratic`, and :data:`RT_Star`""" def get_dimension(self) -> int: try: return self._dimension except AttributeError: dim = core.rt.IndexProperty_GetDimension(self.handle) self._dimension: int = dim return dim def set_dimension(self, value: int) -> None: if value <= 0: raise RTreeError("Negative or 0 dimensional indexes are not allowed") self._dimension = value return core.rt.IndexProperty_SetDimension(self.handle, value) dimension = property(get_dimension, set_dimension) """Index dimension. Must be greater than 0, though a dimension of 1 might have undefined behavior.""" def get_storage(self) -> int: return core.rt.IndexProperty_GetIndexStorage(self.handle) def set_storage(self, value: int) -> None: return core.rt.IndexProperty_SetIndexStorage(self.handle, value) storage = property(get_storage, set_storage) """Index storage. One of :data:`RT_Disk`, :data:`RT_Memory` or :data:`RT_Custom`. If a filename is passed as the first parameter to :class:index.Index, :data:`RT_Disk` is assumed. If a CustomStorage instance is passed, :data:`RT_Custom` is assumed. Otherwise, :data:`RT_Memory` is the default. """ def get_pagesize(self) -> int: return core.rt.IndexProperty_GetPagesize(self.handle) def set_pagesize(self, value: int) -> None: if value <= 0: raise RTreeError("Pagesize must be > 0") return core.rt.IndexProperty_SetPagesize(self.handle, value) pagesize = property(get_pagesize, set_pagesize) """The pagesize when disk storage is used. It is ideal to ensure that your index entries fit within a single page for best performance.""" def get_index_capacity(self) -> int: return core.rt.IndexProperty_GetIndexCapacity(self.handle) def set_index_capacity(self, value: int) -> None: if value <= 0: raise RTreeError("index_capacity must be > 0") return core.rt.IndexProperty_SetIndexCapacity(self.handle, value) index_capacity = property(get_index_capacity, set_index_capacity) """Index capacity""" def get_leaf_capacity(self) -> int: return core.rt.IndexProperty_GetLeafCapacity(self.handle) def set_leaf_capacity(self, value: int) -> None: if value <= 0: raise RTreeError("leaf_capacity must be > 0") return core.rt.IndexProperty_SetLeafCapacity(self.handle, value) leaf_capacity = property(get_leaf_capacity, set_leaf_capacity) """Leaf capacity""" def get_index_pool_capacity(self) -> int: return core.rt.IndexProperty_GetIndexPoolCapacity(self.handle) def set_index_pool_capacity(self, value: int) -> None: if value <= 0: raise RTreeError("index_pool_capacity must be > 0") return core.rt.IndexProperty_SetIndexPoolCapacity(self.handle, value) index_pool_capacity = property(get_index_pool_capacity, set_index_pool_capacity) """Index pool capacity""" def get_point_pool_capacity(self) -> int: return core.rt.IndexProperty_GetPointPoolCapacity(self.handle) def set_point_pool_capacity(self, value: int) -> None: if value <= 0: raise RTreeError("point_pool_capacity must be > 0") return core.rt.IndexProperty_SetPointPoolCapacity(self.handle, value) point_pool_capacity = property(get_point_pool_capacity, set_point_pool_capacity) """Point pool capacity""" def get_region_pool_capacity(self) -> int: return core.rt.IndexProperty_GetRegionPoolCapacity(self.handle) def set_region_pool_capacity(self, value: int) -> None: if value <= 0: raise RTreeError("region_pool_capacity must be > 0") return core.rt.IndexProperty_SetRegionPoolCapacity(self.handle, value) region_pool_capacity = property(get_region_pool_capacity, set_region_pool_capacity) """Region pool capacity""" def get_buffering_capacity(self) -> int: return core.rt.IndexProperty_GetBufferingCapacity(self.handle) def set_buffering_capacity(self, value: int) -> None: if value <= 0: raise RTreeError("buffering_capacity must be > 0") return core.rt.IndexProperty_SetBufferingCapacity(self.handle, value) buffering_capacity = property(get_buffering_capacity, set_buffering_capacity) """Buffering capacity""" def get_tight_mbr(self): return bool(core.rt.IndexProperty_GetEnsureTightMBRs(self.handle)) def set_tight_mbr(self, value): value = bool(value) return bool(core.rt.IndexProperty_SetEnsureTightMBRs(self.handle, value)) tight_mbr = property(get_tight_mbr, set_tight_mbr) """Uses tight bounding rectangles""" def get_overwrite(self): return bool(core.rt.IndexProperty_GetOverwrite(self.handle)) def set_overwrite(self, value): value = bool(value) return bool(core.rt.IndexProperty_SetOverwrite(self.handle, value)) overwrite = property(get_overwrite, set_overwrite) """Overwrite existing index files""" def get_near_minimum_overlap_factor(self) -> int: return core.rt.IndexProperty_GetNearMinimumOverlapFactor(self.handle) def set_near_minimum_overlap_factor(self, value: int) -> None: if value <= 0: raise RTreeError("near_minimum_overlap_factor must be > 0") return core.rt.IndexProperty_SetNearMinimumOverlapFactor(self.handle, value) near_minimum_overlap_factor = property( get_near_minimum_overlap_factor, set_near_minimum_overlap_factor ) """Overlap factor for MVRTrees""" def get_writethrough(self): return bool(core.rt.IndexProperty_GetWriteThrough(self.handle)) def set_writethrough(self, value): value = bool(value) return bool(core.rt.IndexProperty_SetWriteThrough(self.handle, value)) writethrough = property(get_writethrough, set_writethrough) """Write through caching""" def get_fill_factor(self) -> int: return core.rt.IndexProperty_GetFillFactor(self.handle) def set_fill_factor(self, value: int) -> None: return core.rt.IndexProperty_SetFillFactor(self.handle, value) fill_factor = property(get_fill_factor, set_fill_factor) """Index node fill factor before branching""" def get_split_distribution_factor(self) -> int: return core.rt.IndexProperty_GetSplitDistributionFactor(self.handle) def set_split_distribution_factor(self, value: int) -> None: return core.rt.IndexProperty_SetSplitDistributionFactor(self.handle, value) split_distribution_factor = property( get_split_distribution_factor, set_split_distribution_factor ) """Split distribution factor""" def get_tpr_horizon(self): return core.rt.IndexProperty_GetTPRHorizon(self.handle) def set_tpr_horizon(self, value): return core.rt.IndexProperty_SetTPRHorizon(self.handle, value) tpr_horizon = property(get_tpr_horizon, set_tpr_horizon) """TPR horizon""" def get_reinsert_factor(self): return core.rt.IndexProperty_GetReinsertFactor(self.handle) def set_reinsert_factor(self, value): return core.rt.IndexProperty_SetReinsertFactor(self.handle, value) reinsert_factor = property(get_reinsert_factor, set_reinsert_factor) """Reinsert factor""" def get_filename(self): return core.rt.IndexProperty_GetFileName(self.handle).decode() def set_filename(self, value): if isinstance(value, str): value = value.encode("utf-8") return core.rt.IndexProperty_SetFileName(self.handle, value) filename = property(get_filename, set_filename) """Index filename for disk storage""" def get_dat_extension(self): ext = core.rt.IndexProperty_GetFileNameExtensionDat(self.handle) return ext.decode() def set_dat_extension(self, value): if isinstance(value, str): value = value.encode("utf-8") return core.rt.IndexProperty_SetFileNameExtensionDat(self.handle, value) dat_extension = property(get_dat_extension, set_dat_extension) """Extension for .dat file""" def get_idx_extension(self): ext = core.rt.IndexProperty_GetFileNameExtensionIdx(self.handle) return ext.decode() def set_idx_extension(self, value): if isinstance(value, str): value = value.encode("utf-8") return core.rt.IndexProperty_SetFileNameExtensionIdx(self.handle, value) idx_extension = property(get_idx_extension, set_idx_extension) """Extension for .idx file""" def get_custom_storage_callbacks_size(self) -> int: return core.rt.IndexProperty_GetCustomStorageCallbacksSize(self.handle) def set_custom_storage_callbacks_size(self, value: int) -> None: return core.rt.IndexProperty_SetCustomStorageCallbacksSize(self.handle, value) custom_storage_callbacks_size = property( get_custom_storage_callbacks_size, set_custom_storage_callbacks_size ) """Size of callbacks for custom storage""" def get_custom_storage_callbacks(self): return core.rt.IndexProperty_GetCustomStorageCallbacks(self.handle) def set_custom_storage_callbacks(self, value): return core.rt.IndexProperty_SetCustomStorageCallbacks(self.handle, value) custom_storage_callbacks = property( get_custom_storage_callbacks, set_custom_storage_callbacks ) """Callbacks for custom storage""" def get_index_id(self): return core.rt.IndexProperty_GetIndexID(self.handle) def set_index_id(self, value): return core.rt.IndexProperty_SetIndexID(self.handle, value) index_id = property(get_index_id, set_index_id) """First node index id""" # custom storage implementation id_type = ctypes.c_int64 class CustomStorageCallbacks(ctypes.Structure): # callback types createCallbackType = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int) ) destroyCallbackType = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int) ) flushCallbackType = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int) ) loadCallbackType = ctypes.CFUNCTYPE( None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.POINTER(ctypes.c_uint8)), ctypes.POINTER(ctypes.c_int), ) storeCallbackType = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.POINTER(id_type), ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.c_int), ) deleteCallbackType = ctypes.CFUNCTYPE( None, ctypes.c_void_p, id_type, ctypes.POINTER(ctypes.c_int) ) _fields_ = [ ("context", ctypes.c_void_p), ("createCallback", createCallbackType), ("destroyCallback", destroyCallbackType), ("flushCallback", flushCallbackType), ("loadCallback", loadCallbackType), ("storeCallback", storeCallbackType), ("deleteCallback", deleteCallbackType), ] def __init__( self, context, createCallback, destroyCallback, flushCallback, loadCallback, storeCallback, deleteCallback, ) -> None: ctypes.Structure.__init__( self, ctypes.c_void_p(context), self.createCallbackType(createCallback), self.destroyCallbackType(destroyCallback), self.flushCallbackType(flushCallback), self.loadCallbackType(loadCallback), self.storeCallbackType(storeCallback), self.deleteCallbackType(deleteCallback), ) class ICustomStorage: # error codes NoError = 0 InvalidPageError = 1 IllegalStateError = 2 # special pages EmptyPage = -0x1 NewPage = -0x1 def allocateBuffer(self, length): return core.rt.SIDX_NewBuffer(length) def registerCallbacks(self, properties): raise NotImplementedError() def clear(self): raise NotImplementedError() hasData = property(lambda self: False) """Override this property to allow for reloadable storages""" class CustomStorageBase(ICustomStorage): """Derive from this class to create your own storage manager with access to the raw C buffers.""" def registerCallbacks(self, properties): callbacks = CustomStorageCallbacks( ctypes.c_void_p(), self.create, self.destroy, self.flush, self.loadByteArray, self.storeByteArray, self.deleteByteArray, ) properties.custom_storage_callbacks_size = ctypes.sizeof(callbacks) self.callbacks = callbacks properties.custom_storage_callbacks = ctypes.cast( ctypes.pointer(callbacks), ctypes.c_void_p ) # the user must override these callback functions def create(self, context, returnError): returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def destroy(self, context, returnError): """please override""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def loadByteArray(self, context, page, resultLen, resultData, returnError): """please override""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def storeByteArray(self, context, page, len, data, returnError): """please override""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def deleteByteArray(self, context, page, returnError): """please override""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def flush(self, context, returnError): """please override""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") class CustomStorage(ICustomStorage): """Provides a useful default custom storage implementation which marshals the buffers on the C side from/to python strings. Derive from this class and override the necessary methods to provide your own custom storage manager.""" def registerCallbacks(self, properties): callbacks = CustomStorageCallbacks( 0, self._create, self._destroy, self._flush, self._loadByteArray, self._storeByteArray, self._deleteByteArray, ) properties.custom_storage_callbacks_size = ctypes.sizeof(callbacks) self.callbacks = callbacks properties.custom_storage_callbacks = ctypes.cast( ctypes.pointer(callbacks), ctypes.c_void_p ) # these functions handle the C callbacks and massage the data, then # delegate to the function without underscore below def _create(self, context, returnError): self.create(returnError) def _destroy(self, context, returnError): self.destroy(returnError) def _flush(self, context, returnError): self.flush(returnError) def _loadByteArray(self, context, page, resultLen, resultData, returnError): resultString = self.loadByteArray(page, returnError) if returnError.contents.value != self.NoError: return # Copy python string over into a buffer allocated on the C side. # The buffer will later be freed by the C side. This prevents # possible heap corruption issues as buffers allocated by ctypes # and the c library might be allocated on different heaps. # Freeing a buffer allocated on another heap might make the application # crash. count = len(resultString) resultLen.contents.value = count buffer = self.allocateBuffer(count) ctypes.memmove(buffer, ctypes.c_char_p(resultString), count) resultData[0] = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_uint8)) def _storeByteArray(self, context, page, len, data, returnError): str = ctypes.string_at(data, len) newPageId = self.storeByteArray(page.contents.value, str, returnError) page.contents.value = newPageId def _deleteByteArray(self, context, page, returnError): self.deleteByteArray(page, returnError) # the user must override these callback functions def create(self, returnError): """Must be overridden. No return value.""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def destroy(self, returnError): """Must be overridden. No return value.""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def flush(self, returnError): """Must be overridden. No return value.""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") def loadByteArray(self, page, returnError): """Must be overridden. Must return a string with the loaded data.""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") return "" def storeByteArray(self, page, data, returnError): """Must be overridden. Must return the new 64-bit page ID of the stored data if a new page had to be created (i.e. page is not NewPage).""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") return 0 def deleteByteArray(self, page, returnError): """please override""" returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.") class RtreeContainer(Rtree): """An R-Tree, MVR-Tree, or TPR-Tree indexed container for python objects""" def __init__(self, *args: Any, **kwargs: Any) -> None: """Creates a new index :param stream: If the first argument in the constructor is not of type basestring, it is assumed to be an iterable stream of data that will raise a StopIteration. It must be in the form defined by the :attr:`interleaved` attribute of the index. The following example would assume :attr:`interleaved` is False:: (obj, (minx, maxx, miny, maxy, minz, maxz, ..., ..., mink, maxk)) For a TPR-Tree, this would be in the form:: (id, ((minx, maxx, miny, maxy, ..., ..., mink, maxk), (minvx, maxvx, minvy, maxvy, ..., ..., minvk, maxvk), time), object) :param interleaved: True or False, defaults to True. This parameter determines the coordinate order for all methods that take in coordinates. :param properties: This object sets both the creation and instantiation properties for the object and they are passed down into libspatialindex. A few properties are curried from instantiation parameters for you like ``pagesize`` to ensure compatibility with previous versions of the library. All other properties must be set on the object. .. warning:: The coordinate ordering for all functions are sensitive the index's :attr:`interleaved` data member. If :attr:`interleaved` is False, the coordinates must be in the form [xmin, xmax, ymin, ymax, ..., ..., kmin, kmax]. If :attr:`interleaved` is True, the coordinates must be in the form [xmin, ymin, ..., kmin, xmax, ymax, ..., kmax]. This also applies to velocities when using a TPR-Tree. A basic example :: >>> from rtree import index >>> p = index.Property() >>> idx = index.RtreeContainer(properties=p) >>> idx # doctest: +NORMALIZE_WHITESPACE rtree.index.RtreeContainer(bounds=[1.7976931348623157e+308, 1.7976931348623157e+308, -1.7976931348623157e+308, -1.7976931348623157e+308], size=0) Insert an item into the index:: >>> idx.insert(object(), ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734)) Query:: >>> hits = idx.intersection((0, 0, 60, 60), bbox=True) >>> for obj in hits: ... obj.object ... obj.bbox # doctest: +ELLIPSIS [34.37768294..., 26.73758537..., 49.37768294..., 41.73758537...] """ if args: if ( isinstance(args[0], str) or isinstance(args[0], bytes) or isinstance(args[0], ICustomStorage) ): raise ValueError(f"{self.__class__} supports only in-memory indexes") self._objects: dict[int, tuple[int, object]] = {} return super().__init__(*args, **kwargs) def get_size(self) -> int: try: return self.count(self.bounds) except RTreeError: return 0 def __repr__(self) -> str: m = "rtree.index.RtreeContainer(bounds={}, size={})" return m.format(self.bounds, self.get_size()) def __contains__(self, obj: object) -> bool: return id(obj) in self._objects def __len__(self) -> int: return sum(count for count, obj in self._objects.values()) def __iter__(self) -> Iterator[object]: return iter(obj for count, obj in self._objects.values()) def insert(self, obj: object, coordinates: Any) -> None: # type: ignore[override] """Inserts an item into the index with the given coordinates. :param obj: Any object. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time value as a float. The following example inserts a simple object into the container. The coordinate ordering in this instance is the default (interleaved=True) ordering:: >>> from rtree import index >>> idx = index.RtreeContainer() >>> idx.insert(object(), ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734)) Similar for TPR-Tree:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.RtreeContainer(properties=p) # doctest: +SKIP >>> idx.insert(object(), ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... 3.0)) # doctest: +SKIP """ try: count = self._objects[id(obj)][0] + 1 except KeyError: count = 1 self._objects[id(obj)] = (count, obj) return super().insert(id(obj), coordinates, None) add = insert # type: ignore[assignment] @overload # type: ignore[override] def intersection(self, coordinates: Any, bbox: Literal[True]) -> Iterator[Item]: ... @overload def intersection( self, coordinates: Any, bbox: Literal[False] = False ) -> Iterator[object]: ... def intersection( self, coordinates: Any, bbox: bool = False ) -> Iterator[Item | object]: """Return ids or objects in the index that intersect the given coordinates. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the time range as a float. :param bbox: If True, the intersection method will return the stored objects, as well as the bounds of the entry. The following example queries the container for any stored objects that intersect the bounds given in the coordinates:: >>> from rtree import index >>> idx = index.RtreeContainer() >>> idx.insert(object(), ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734)) >>> hits = list(idx.intersection((0, 0, 60, 60), bbox=True)) >>> [(item.object, item.bbox) for item in hits] ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE [(, [34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734])] If the :class:`rtree.index.Item` wrapper is not used, it is faster to request only the stored objects:: >>> list(idx.intersection((0, 0, 60, 60))) # doctest: +ELLIPSIS [] Similar for the TPR-Tree:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.RtreeContainer(properties=p) # doctest: +SKIP >>> idx.insert(object(), ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... 3.0)) # doctest: +SKIP >>> hits = list(idx.intersection( ... ((0, 0, 60, 60), (0, 0, 0, 0), (3, 5)), bbox=True)) ... # doctest: +SKIP >>> [(item.object, item.bbox) for item in hits] ... # doctest: +SKIP [(, [34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734])] """ if bbox is False: for id in super().intersection(coordinates, bbox): yield self._objects[id][1] elif bbox is True: for value in super().intersection(coordinates, bbox): value.object = self._objects[value.id][1] value.id = None yield value else: raise ValueError("valid values for the bbox argument are True and False") @overload # type: ignore[override] def nearest( self, coordinates: Any, num_results: int = 1, bbox: Literal[True] = True ) -> Iterator[Item]: ... @overload def nearest( self, coordinates: Any, num_results: int = 1, bbox: Literal[False] = False ) -> Iterator[object]: ... def nearest( self, coordinates: Any, num_results: int = 1, bbox: bool = False ) -> Iterator[Item | object]: """Returns the ``k``-nearest objects to the given coordinates in increasing distance order. :param coordinates: This may be an object that satisfies the numpy array protocol, providing the index's dimension * 2 coordinate pairs representing the `mink` and `maxk` coordinates in each dimension defining the bounds of the query window. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the time range as a float. :param num_results: The number of results to return nearest to the given coordinates. If two entries are equidistant, *both* are returned. This property means that :attr:`num_results` may return more items than specified. :param bbox: If True, the nearest method will return the stored objects, as well as the bounds of the entry. .. warning:: This is currently not implemented for the TPR-Tree. Example of finding the three items nearest to this one:: >>> from rtree import index >>> idx = index.RtreeContainer() >>> idx.insert(object(), (34.37, 26.73, 49.37, 41.73)) >>> hits = idx.nearest((0, 0, 10, 10), 3, bbox=True) """ if bbox is False: for id in super().nearest(coordinates, num_results, bbox): yield self._objects[id][1] elif bbox is True: for value in super().nearest(coordinates, num_results, bbox): value.object = self._objects[value.id][1] value.id = None yield value else: raise ValueError("valid values for the bbox argument are True and False") def delete(self, obj: object, coordinates: Any) -> None: """Deletes the item from the container within the specified coordinates. :param obj: Any object. :param coordinates: Dimension * 2 coordinate pairs, representing the min and max coordinates in each dimension of the item to be deleted from the index. Their ordering will depend on the index's :attr:`interleaved` data member. These are not the coordinates of a space containing the item, but those of the item itself. Together with the id parameter, they determine which item will be deleted. This may be an object that satisfies the numpy array protocol. For a TPR-Tree, this must be a 3-element sequence including not only the positional coordinate pairs but also the velocity pairs `minvk` and `maxvk` and a time pair for the original time the object was inserted and the current time as a float. Example:: >>> from rtree import index >>> idx = index.RtreeContainer() >>> idx.delete(object(), ... (34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734)) Traceback (most recent call last): ... IndexError: object is not in the index For the TPR-Tree:: >>> p = index.Property(type=index.RT_TPRTree) # doctest: +SKIP >>> idx = index.RtreeContainer(properties=p) # doctest: +SKIP >>> idx.delete(object(), ... ((34.3776829412, 26.7375853734, 49.3776829412, ... 41.7375853734), ... (0.5, 2, 1.5, 2.5), ... (3.0, 5.0))) # doctest: +SKIP Traceback (most recent call last): ... IndexError: object is not in the index """ try: count = self._objects[id(obj)][0] - 1 except KeyError: raise IndexError("object is not in the index") if count == 0: del self._objects[id(obj)] else: self._objects[id(obj)] = (count, obj) return super().delete(id(obj), coordinates) def leaves(self): return [ ( self._objects[id][1], [self._objects[child_id][1] for child_id in child_ids], bounds, ) for id, child_ids, bounds in super().leaves() ] rtree-1.4.0/rtree/py.typed000066400000000000000000000000001476215601300154500ustar00rootroot00000000000000rtree-1.4.0/scripts/000077500000000000000000000000001476215601300143315ustar00rootroot00000000000000rtree-1.4.0/scripts/install_libspatialindex.bat000066400000000000000000000013051476215601300217220ustar00rootroot00000000000000python -c "import sys; print(sys.version)" set SIDX_VERSION=2.1.0 curl -LO --retry 5 --retry-max-time 120 "https://github.com/libspatialindex/libspatialindex/archive/%SIDX_VERSION%.zip" tar xvf "%SIDX_VERSION%.zip" cd libspatialindex-%SIDX_VERSION% mkdir build cd build pip install ninja set INSTALL_PREFIX=%~dp0\..\rtree cmake -G Ninja ^ -D CMAKE_BUILD_TYPE=Release ^ -D BUILD_SHARED_LIBS="ON" ^ -D CMAKE_INSTALL_PREFIX="%INSTALL_PREFIX%" ^ -D CMAKE_INSTALL_BINDIR=lib ^ -D CMAKE_INSTALL_LIBDIR=libdir ^ .. ninja install :: remove unneeded libdir rmdir %INSTALL_PREFIX%\libdir /s /q dir %INSTALL_PREFIX% dir %INSTALL_PREFIX%\lib dir %INSTALL_PREFIX%\include /s rtree-1.4.0/scripts/install_libspatialindex.sh000077500000000000000000000031551476215601300215760ustar00rootroot00000000000000#!/bin/sh set -xe # A simple script to install libspatialindex from a Github Release VERSION=2.1.0 SHA256=86aa0925dd151ff9501a5965c4f8d7fb3dcd8accdc386a650dbdd62660399926 # where to copy resulting files # this has to be run before `cd`-ing anywhere install_prefix() { OURPWD=$PWD cd "$(dirname "$0")" cd ../rtree arr=$(pwd) cd "$OURPWD" echo $arr } scriptloc() { OURPWD=$PWD cd "$(dirname "$0")" arr=$(pwd) cd "$OURPWD" echo $arr } # note that we're doing this convoluted thing to get # an absolute path so mac doesn't yell at us INSTALL_PREFIX=`install_prefix` SL=`scriptloc` rm -f $VERSION.zip curl -LOs --retry 5 --retry-max-time 120 https://github.com/libspatialindex/libspatialindex/archive/${VERSION}.zip # check the file hash if [ "$(uname)" = "Darwin" ] then echo "${SHA256} ${VERSION}.zip" | shasum -a 256 -c - else echo "${SHA256} ${VERSION}.zip" | sha256sum -c - fi rm -rf "libspatialindex-${VERSION}" unzip -q $VERSION cd libspatialindex-${VERSION} mkdir build cd build printenv if [ "$(uname)" = "Darwin" ]; then CMAKE_ARGS="-D CMAKE_OSX_ARCHITECTURES=${ARCHFLAGS##* } \ -D CMAKE_INSTALL_RPATH=@loader_path" fi cmake ${CMAKE_ARGS} \ -D CMAKE_BUILD_TYPE=Release \ -D BUILD_SHARED_LIBS=ON \ -D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \ -D CMAKE_INSTALL_LIBDIR=lib \ -D CMAKE_PLATFORM_NO_VERSIONED_SONAME=ON \ .. make -j 4 # copy built libraries relative to path of this script make install # remove unneeded extras in lib rm -rfv ${INSTALL_PREFIX}/lib/cmake rm -rfv ${INSTALL_PREFIX}/lib/pkgconfig ls -R ${INSTALL_PREFIX}/lib ls -R ${INSTALL_PREFIX}/include rtree-1.4.0/scripts/repair_wheel.py000077500000000000000000000072211476215601300173560ustar00rootroot00000000000000#!/usr/bin/env python3 import argparse import os import shutil import subprocess import sys import tempfile from pathlib import Path def main(): if sys.platform.startswith("linux"): os_ = "linux" elif sys.platform.startswith("darwin"): os_ = "macos" elif sys.platform.startswith("win32"): os_ = "windows" else: raise NotImplementedError( f"sys.platform '{sys.platform}' is not supported yet." ) p = argparse.ArgumentParser( description="Convert wheel to be independent of python implementation and ABI" ) p.set_defaults(prog=Path(sys.argv[0]).name) p.add_argument("WHEEL_FILE", help="Path to wheel file.") p.add_argument( "-w", "--wheel-dir", dest="WHEEL_DIR", help=('Directory to store delocated wheels (default: "wheelhouse/")'), default="wheelhouse/", ) args = p.parse_args() file = Path(args.WHEEL_FILE).resolve(strict=True) wheelhouse = Path(args.WHEEL_DIR).resolve() wheelhouse.mkdir(parents=True, exist_ok=True) with tempfile.TemporaryDirectory() as tmpdir_: tmpdir = Path(tmpdir_) # use the platform specific repair tool first if os_ == "linux": # use path from cibuildwheel which allows auditwheel to create # rtree.libs/libspatialindex-*.so.* cibw_lib_path = "/project/rtree/lib" if os.environ.get("LD_LIBRARY_PATH"): # append path os.environ["LD_LIBRARY_PATH"] += f"{os.pathsep}{cibw_lib_path}" else: os.environ["LD_LIBRARY_PATH"] = cibw_lib_path subprocess.run( ["auditwheel", "repair", "-w", str(tmpdir), str(file)], check=True ) elif os_ == "macos": subprocess.run( [ "delocate-wheel", # "--require-archs", # "arm64,x86_64", "-w", str(tmpdir), str(file), ], check=True, ) elif os_ == "windows": # no specific tool, just copy shutil.copyfile(file, tmpdir / file.name) (file,) = tmpdir.glob("*.whl") # make this a py3 wheel subprocess.run( [ "wheel", "tags", "--python-tag", "py3", "--abi-tag", "none", "--remove", str(file), ], check=True, ) (file,) = tmpdir.glob("*.whl") # unpack subprocess.run(["wheel", "unpack", file.name], cwd=tmpdir, check=True) for unpackdir in tmpdir.iterdir(): if unpackdir.is_dir(): break else: raise RuntimeError("subdirectory not found") if os_ == "linux": # This is auditwheel's libs, which needs post-processing libs_dir = unpackdir / "rtree.libs" lsidx_list = list(libs_dir.glob("libspatialindex*.so*")) assert len(lsidx_list) == 1, list(libs_dir.iterdir()) lsidx = lsidx_list[0] subprocess.run(["patchelf", "--set-rpath", "$ORIGIN", lsidx], check=True) # remove duplicated dir lib_dir = unpackdir / "rtree" / "lib" shutil.rmtree(lib_dir) # re-pack subprocess.run(["wheel", "pack", str(unpackdir.name)], cwd=tmpdir, check=True) files = list(tmpdir.glob("*.whl")) assert len(files) == 1, files file = files[0] file.rename(wheelhouse / file.name) if __name__ == "__main__": main() rtree-1.4.0/scripts/visualize.py000077500000000000000000000102641476215601300167240ustar00rootroot00000000000000#!/usr/bin/env python import sys from liblas import file from osgeo import ogr from rtree import index def quick_create_layer_def(lyr, field_list): # Each field is a tuple of (name, type, width, precision) # Any of type, width and precision can be skipped. Default type is string. for field in field_list: name = field[0] if len(field) > 1: type = field[1] else: type = ogr.OFTString field_defn = ogr.FieldDefn(name, type) if len(field) > 2: field_defn.SetWidth(int(field[2])) if len(field) > 3: field_defn.SetPrecision(int(field[3])) lyr.CreateField(field_defn) field_defn.Destroy() shape_drv = ogr.GetDriverByName("ESRI Shapefile") shapefile_name = sys.argv[1].split(".")[0] shape_ds = shape_drv.CreateDataSource(shapefile_name) leaf_block_lyr = shape_ds.CreateLayer("leaf", geom_type=ogr.wkbPolygon) point_block_lyr = shape_ds.CreateLayer("point", geom_type=ogr.wkbPolygon) point_lyr = shape_ds.CreateLayer("points", geom_type=ogr.wkbPoint) quick_create_layer_def( leaf_block_lyr, [("BLK_ID", ogr.OFTInteger), ("COUNT", ogr.OFTInteger)] ) quick_create_layer_def( point_block_lyr, [("BLK_ID", ogr.OFTInteger), ("COUNT", ogr.OFTInteger)] ) quick_create_layer_def(point_lyr, [("ID", ogr.OFTInteger), ("BLK_ID", ogr.OFTInteger)]) p = index.Property() p.filename = sys.argv[1] p.overwrite = False p.storage = index.RT_Disk idx = index.Index(sys.argv[1]) leaves = idx.leaves() # leaves[0] == (0L, [2L, 92L, 51L, 55L, 26L], [-132.41727847799999, # -96.717721818399994, -132.41727847799999, -96.717721818399994]) f = file.File(sys.argv[1]) def area(minx, miny, maxx, maxy): width = abs(maxx - minx) height = abs(maxy - miny) return width * height def get_bounds(leaf_ids, lasfile, block_id): # read the first point and set the bounds to that p = lasfile.read(leaf_ids[0]) minx, maxx = p.x, p.x miny, maxy = p.y, p.y print(len(leaf_ids)) print(leaf_ids[0:10]) for p_id in leaf_ids: p = lasfile.read(p_id) minx = min(minx, p.x) maxx = max(maxx, p.x) miny = min(miny, p.y) maxy = max(maxy, p.y) feature = ogr.Feature(feature_def=point_lyr.GetLayerDefn()) g = ogr.CreateGeometryFromWkt(f"POINT ({p.x:.8f} {p.y:.8f})") feature.SetGeometry(g) feature.SetField("ID", p_id) feature.SetField("BLK_ID", block_id) result = point_lyr.CreateFeature(feature) del result return (minx, miny, maxx, maxy) def make_poly(minx, miny, maxx, maxy): wkt = ( f"POLYGON (({minx:.8f} {miny:.8f}, {maxx:.8f} {miny:.8f}, {maxx:.8f} " f"{maxy:.8f}, {minx:.8f} {maxy:.8f}, {minx:.8f} {miny:.8f}))" ) shp = ogr.CreateGeometryFromWkt(wkt) return shp def make_feature(lyr, geom, id, count): feature = ogr.Feature(feature_def=lyr.GetLayerDefn()) feature.SetGeometry(geom) feature.SetField("BLK_ID", id) feature.SetField("COUNT", count) result = lyr.CreateFeature(feature) del result t = 0 for leaf in leaves: id = leaf[0] ids = leaf[1] count = len(ids) # import pdb;pdb.set_trace() if len(leaf[2]) == 4: minx, miny, maxx, maxy = leaf[2] else: minx, miny, maxx, maxy, minz, maxz = leaf[2] if id == 186: print(leaf[2]) print(leaf[2]) leaf = make_poly(minx, miny, maxx, maxy) print("leaf: " + str([minx, miny, maxx, maxy])) pminx, pminy, pmaxx, pmaxy = get_bounds(ids, f, id) point = make_poly(pminx, pminy, pmaxx, pmaxy) print("point: " + str([pminx, pminy, pmaxx, pmaxy])) print("point bounds: " + str([point.GetArea(), area(pminx, pminy, pmaxx, pmaxy)])) print("leaf bounds: " + str([leaf.GetArea(), area(minx, miny, maxx, maxy)])) print("leaf - point: " + str([abs(point.GetArea() - leaf.GetArea())])) print([minx, miny, maxx, maxy]) # if shp2.GetArea() != shp.GetArea(): # import pdb;pdb.set_trace() # sys.exit(1) make_feature(leaf_block_lyr, leaf, id, count) make_feature(point_block_lyr, point, id, count) t += 1 # if t ==2: # break leaf_block_lyr.SyncToDisk() point_lyr.SyncToDisk() shape_ds.Destroy() rtree-1.4.0/setup.py000077500000000000000000000040401476215601300143550ustar00rootroot00000000000000#!/usr/bin/env python3 from pathlib import Path from setuptools import setup from setuptools.command.install import install from setuptools.dist import Distribution from wheel.bdist_wheel import bdist_wheel as _bdist_wheel # current working directory of this setup.py file _cwd = Path(__file__).resolve().parent class bdist_wheel(_bdist_wheel): # type: ignore[misc] def finalize_options(self) -> None: _bdist_wheel.finalize_options(self) self.root_is_pure = False class BinaryDistribution(Distribution): # type: ignore[misc] """Distribution which always forces a binary package with platform name""" def has_ext_modules(foo) -> bool: return True class InstallPlatlib(install): # type: ignore[misc] def finalize_options(self) -> None: """ Copy the shared libraries and header files into the wheel. Note that this will *only* check in `rtree/lib` and `include` rather than anywhere on the system so if you are building a wheel you *must* copy or symlink the `.so`/`.dll`/`.dylib` files into `rtree/lib` and `.h` into `rtree/include`. """ install.finalize_options(self) if self.distribution.has_ext_modules(): self.install_lib = self.install_platlib # source files to copy source_dir = _cwd / "rtree" # destination for the files in the build directory target_dir = Path(self.build_lib) / "rtree" # copy lib tree source_lib = source_dir / "lib" if source_lib.is_dir(): target_lib = target_dir / "lib" self.copy_tree(str(source_lib), str(target_lib)) # copy include tree source_include = source_dir / "include" if source_include.is_dir(): target_include = target_dir / "include" self.copy_tree(str(source_include), str(target_include)) # See pyproject.toml for other project metadata setup( name="rtree", distclass=BinaryDistribution, cmdclass={"bdist_wheel": bdist_wheel, "install": InstallPlatlib}, ) rtree-1.4.0/tests/000077500000000000000000000000001476215601300140045ustar00rootroot00000000000000rtree-1.4.0/tests/__init__.py000066400000000000000000000000001476215601300161030ustar00rootroot00000000000000rtree-1.4.0/tests/boxes_15x15.data000066400000000000000000000132131476215601300166220ustar00rootroot0000000000000034.3776829412 26.7375853734 49.3776829412 41.7375853734 -51.7912278527 56.5716384064 -36.7912278527 71.5716384064 -132.417278478 -96.7177218184 -117.417278478 -81.7177218184 19.9788779448 -53.1068061438 34.9788779448 -38.1068061438 50.9432853241 53.830194296 65.9432853241 68.830194296 114.777310066 -42.0534139041 129.777310066 -27.0534139041 -80.5201136918 -60.5173650142 -65.5201136918 -45.5173650142 -109.709042971 -88.8853631128 -94.7090429709 -73.8853631128 163.797701593 49.0535662325 178.797701593 64.0535662325 119.52474488 -47.8047995045 134.52474488 -32.8047995045 -49.6358346107 25.7591536504 -34.6358346107 40.7591536504 43.1951329802 -61.7003551556 58.1951329802 -46.7003551556 5.07182469992 -32.9621617938 20.0718246999 -17.9621617938 157.392784956 -59.9967638674 172.392784956 -44.9967638674 169.761387556 77.3118040104 184.761387556 92.3118040104 -90.9030625259 23.7969275036 -75.9030625259 38.7969275036 13.3161023563 35.5651016032 28.3161023563 50.5651016032 -71.4124633746 -27.8098115487 -56.4124633746 -12.8098115487 -101.490578923 40.5161619529 -86.4905789231 55.5161619529 -22.5493804457 -9.48190527182 -7.54938044566 5.51809472818 22.7819453953 81.6043699778 37.7819453953 96.6043699778 163.851232856 52.6576397095 178.851232856 67.6576397095 8.7520267341 -82.9532179134 23.7520267341 -67.9532179134 -25.1295517688 -52.9753074372 -10.1295517688 -37.9753074372 125.380855923 53.093317371 140.380855923 68.093317371 -79.9963004315 -8.58901526761 -64.9963004315 6.41098473239 -3.49476632412 -93.5592177527 11.5052336759 -78.5592177527 5.12311663372 38.9766284779 20.1231166337 53.9766284779 -126.802193031 72.7620993955 -111.802193031 87.7620993955 144.816733092 33.8296664631 159.816733092 48.8296664631 -124.187243051 30.4856075292 -109.187243051 45.4856075292 63.8011147852 -64.8232471563 78.8011147852 -49.8232471563 125.091625278 10.0243913301 140.091625278 25.0243913301 -79.6265618345 37.4238531184 -64.6265618345 52.4238531184 84.0917344559 -61.9889564492 99.0917344559 -46.9889564492 44.1303873224 36.9948838398 59.1303873224 51.9948838398 57.579189376 -44.3308895399 72.579189376 -29.3308895399 -135.915887605 -68.4604833795 -120.915887605 -53.4604833795 -52.5931165731 -83.132095062 -37.5931165731 -68.132095062 -3.66134703734 -24.6160151663 11.3386529627 -9.61601516627 50.9138603775 6.66349450637 65.9138603775 21.6634945064 -59.0308862561 -28.7050068456 -44.0308862561 -13.7050068456 51.6601755093 -32.4794848001 66.6601755093 -17.4794848001 -174.739939684 35.8453347176 -159.739939684 50.8453347176 -107.905359545 -33.9905804035 -92.9053595447 -18.9905804035 -43.8298865873 -38.8139629115 -28.8298865873 -23.8139629115 -186.673789279 15.8707951216 -171.673789279 30.8707951216 13.0878151873 18.9267257542 28.0878151873 33.9267257542 -19.7764534411 -15.1648038653 -4.7764534411 -0.16480386529 -136.725385806 -62.3357813894 -121.725385806 -47.3357813894 56.3180682679 27.7748493606 71.3180682679 42.7748493606 -117.234207271 -95.984091959 -102.234207271 -80.984091959 -112.676334783 69.8614225716 -97.6763347829 84.8614225716 63.4481415226 49.5185084111 78.4481415226 64.5185084111 -164.583933393 -24.3224792074 -149.583933393 -9.32247920738 29.8740632141 -94.4036564677 44.8740632141 -79.4036564677 111.222002785 27.3091348937 126.222002785 42.3091348937 153.388416036 -51.7982686059 168.388416036 -36.7982686059 101.187835391 -79.2096166175 116.187835391 -64.2096166175 88.5716895369 -0.592196575665 103.571689537 14.4078034243 121.697565289 -20.4740930579 136.697565289 -5.47409305786 -57.6430699458 32.6596016791 -42.6430699458 47.6596016791 -51.9988160106 -16.5263906642 -36.9988160106 -1.52639066423 -128.45654531 40.0833021378 -113.45654531 55.0833021378 104.084274855 1.04302798395 119.084274855 16.0430279839 -65.3078063084 52.8659272125 -50.3078063084 67.8659272125 -185.575231871 0.603830128936 -170.575231871 15.6038301289 -99.670852574 63.077063843 -84.670852574 78.077063843 -97.5397037499 24.1544066414 -82.5397037499 39.1544066414 17.1213365558 80.8998469932 32.1213365558 95.8998469932 -66.0514693697 -67.879371904 -51.0514693697 -52.879371904 -165.624597131 -28.2121530482 -150.624597131 -13.2121530482 -153.938620771 -22.5333324395 -138.938620771 -7.5333324395 108.059653776 -30.1015722619 123.059653776 -15.1015722619 66.3357992327 33.4460170804 81.3357992327 48.4460170804 122.051245261 62.1986667929 137.051245261 77.1986667929 -9.14331797752 -4.94220638202 5.85668202248 10.057793618 -6.21767716831 -37.4474638489 8.78232283169 -22.4474638489 -10.2422235441 -36.7771789022 4.75777645591 -21.7771789022 151.39952872 5.78259379576 166.39952872 20.7825937958 53.0412866301 27.1060539476 68.0412866301 42.1060539476 -179.969415049 -86.9431323167 -164.969415049 -71.9431323167 -122.143517094 52.4812451482 -107.143517094 67.4812451482 126.651232891 -71.3593917404 141.651232891 -56.3593917404 35.5628371672 -44.4833782826 50.5628371672 -29.4833782826 106.338230585 74.4980976394 121.338230585 89.4980976394 2.49246106376 64.4571886404 17.4924610638 79.4571886404 26.9239556956 74.8154250821 41.9239556956 89.8154250821 -145.467051901 -23.3901235678 -130.467051901 -8.39012356782 -31.1747618493 -78.3450857919 -16.1747618493 -63.3450857919 -45.6363494594 41.8549865381 -30.6363494594 56.8549865381 -139.598628861 -76.0620586165 -124.598628861 -61.0620586165 75.3893757582 -96.3227872859 90.3893757582 -81.3227872859 66.4127845964 -29.3758752649 81.4127845964 -14.3758752649 71.002709831 5.93248532466 86.002709831 20.9324853247 -166.73585749 -91.958750292 -151.73585749 -76.958750292 -122.966652056 -44.5184865975 -107.966652056 -29.5184865975 -114.787601823 -21.1179486167 -99.7876018227 -6.11794861667 -37.7449906403 -70.1494304858 -22.7449906403 -55.1494304858 70.2802523802 34.6578320934 85.2802523802 49.6578320934 rtree-1.4.0/tests/conftest.py000066400000000000000000000012721476215601300162050ustar00rootroot00000000000000from __future__ import annotations import os import shutil from collections.abc import Iterator import numpy import py import pytest import rtree data_files = ["boxes_15x15.data"] @pytest.fixture(autouse=True) def temporary_working_directory(tmpdir: py.path.local) -> Iterator[None]: for filename in data_files: filename = os.path.join(os.path.dirname(__file__), filename) shutil.copy(filename, str(tmpdir)) with tmpdir.as_cwd(): yield def pytest_report_header(config): """Header for pytest.""" vers = [ f"SIDX version: {rtree.core.rt.SIDX_Version().decode()}", f"NumPy version: {numpy.__version__}", ] return "\n".join(vers) rtree-1.4.0/tests/rungrind.dist000066400000000000000000000002041476215601300165150ustar00rootroot00000000000000#!/bin/sh valgrind --tool=memcheck --leak-check=yes --suppressions=/home/sean/Projects/valgrind-python.supp python test_doctests.py rtree-1.4.0/tests/test_finder.py000066400000000000000000000006521476215601300166670ustar00rootroot00000000000000from ctypes import CDLL from pathlib import Path from rtree import finder def test_load(): lib = finder.load() assert isinstance(lib, CDLL) def test_get_include(): incl = finder.get_include() assert isinstance(incl, str) if incl: path = Path(incl) assert path.is_dir() assert (path / "spatialindex").is_dir() assert (path / "spatialindex" / "SpatialIndex.h").is_file() rtree-1.4.0/tests/test_index.py000066400000000000000000000717761476215601300165460ustar00rootroot00000000000000from __future__ import annotations import ctypes import pickle import sys import tempfile import unittest from collections.abc import Iterator import numpy as np import pytest import rtree from rtree import core, index from rtree.exceptions import RTreeError class IndexTestCase(unittest.TestCase): def setUp(self) -> None: self.boxes15 = np.genfromtxt("boxes_15x15.data") self.idx = index.Index() for i, coords in enumerate(self.boxes15): self.idx.add(i, coords) def boxes15_stream( self, interleaved: bool = True ) -> Iterator[tuple[int, tuple[float, float, float, float], int]]: boxes15 = np.genfromtxt("boxes_15x15.data") for i, (minx, miny, maxx, maxy) in enumerate(boxes15): if interleaved: yield (i, (minx, miny, maxx, maxy), 42) else: yield (i, (minx, maxx, miny, maxy), 42) def stream_basic(self) -> None: # some versions of libspatialindex screw up indexes on stream loading # so do a very simple index check rtree_test = rtree.index.Index( [(1564, [0, 0, 0, 10, 10, 10], None)], properties=rtree.index.Property(dimension=3), ) assert next(rtree_test.intersection([1, 1, 1, 2, 2, 2])) == 1564 class IndexCount(unittest.TestCase): def setUp(self) -> None: self.boxes15 = np.genfromtxt("boxes_15x15.data") self.idx = index.Index() for i, coords in enumerate(self.boxes15): self.idx.add(i, coords) def test_len(self) -> None: self.assertEqual(len(self.idx), len(self.boxes15)) def test_get_size(self) -> None: with pytest.deprecated_call(): self.assertEqual(self.idx.get_size(), len(self.boxes15)) class IndexBounds(unittest.TestCase): def test_invalid_specifications(self) -> None: """Invalid specifications of bounds properly throw""" idx = index.Index() self.assertRaises(RTreeError, idx.add, None, (0.0, 0.0, -1.0, 1.0)) self.assertRaises(RTreeError, idx.intersection, (0.0, 0.0, -1.0, 1.0)) self.assertRaises(ctypes.ArgumentError, idx.add, None, (1, 1)) class IndexProperties(IndexTestCase): @pytest.mark.skipif( not hasattr(core.rt, "Index_GetResultSetOffset"), reason="Index_GetResultsSetOffset required in libspatialindex", ) def test_result_offset(self) -> None: idx = index.Rtree() idx.set_result_offset(3) self.assertEqual(idx.result_offset, 3) @pytest.mark.skipif( not hasattr(core.rt, "Index_GetResultSetLimit"), reason="Index_GetResultsSetOffset required in libspatialindex", ) def test_result_limit(self) -> None: idx = index.Rtree() idx.set_result_limit(44) self.assertEqual(idx.result_limit, 44) def test_invalid_properties(self) -> None: """Invalid values are guarded""" p = index.Property() self.assertRaises(RTreeError, p.set_buffering_capacity, -4321) self.assertRaises(RTreeError, p.set_region_pool_capacity, -4321) self.assertRaises(RTreeError, p.set_point_pool_capacity, -4321) self.assertRaises(RTreeError, p.set_index_pool_capacity, -4321) self.assertRaises(RTreeError, p.set_pagesize, -4321) self.assertRaises(RTreeError, p.set_index_capacity, -4321) self.assertRaises(RTreeError, p.set_storage, -4321) self.assertRaises(RTreeError, p.set_variant, -4321) self.assertRaises(RTreeError, p.set_dimension, -2) self.assertRaises(RTreeError, p.set_index_type, 6) self.assertRaises(RTreeError, p.get_index_id) def test_index_properties(self) -> None: """Setting index properties returns expected values""" idx = index.Rtree() p = index.Property() p.leaf_capacity = 100 p.fill_factor = 0.5 p.index_capacity = 10 p.near_minimum_overlap_factor = 7 p.buffering_capacity = 10 p.variant = 0 p.dimension = 3 p.storage = 0 p.pagesize = 4096 p.index_pool_capacity = 1500 p.point_pool_capacity = 1600 p.region_pool_capacity = 1700 p.tight_mbr = True p.overwrite = True p.writethrough = True p.tpr_horizon = 20.0 p.reinsert_factor = 0.3 p.idx_extension = "index" p.dat_extension = "data" idx = index.Index(properties=p) props = idx.properties self.assertEqual(props.leaf_capacity, 100) self.assertEqual(props.fill_factor, 0.5) self.assertEqual(props.index_capacity, 10) self.assertEqual(props.near_minimum_overlap_factor, 7) self.assertEqual(props.buffering_capacity, 10) self.assertEqual(props.variant, 0) self.assertEqual(props.dimension, 3) self.assertEqual(props.storage, 0) self.assertEqual(props.pagesize, 4096) self.assertEqual(props.index_pool_capacity, 1500) self.assertEqual(props.point_pool_capacity, 1600) self.assertEqual(props.region_pool_capacity, 1700) self.assertEqual(props.tight_mbr, True) self.assertEqual(props.overwrite, True) self.assertEqual(props.writethrough, True) self.assertEqual(props.tpr_horizon, 20.0) self.assertEqual(props.reinsert_factor, 0.3) self.assertEqual(props.idx_extension, "index") self.assertEqual(props.dat_extension, "data") class TestPickling(unittest.TestCase): # https://github.com/Toblerity/rtree/issues/87 @pytest.mark.xfail def test_index(self) -> None: idx = rtree.index.Index() idx.insert(0, [0, 1, 2, 3], 4) unpickled = pickle.loads(pickle.dumps(idx)) self.assertNotEqual(idx.handle, unpickled.handle) self.assertEqual(idx.properties.as_dict(), unpickled.properties.as_dict()) self.assertEqual(idx.interleaved, unpickled.interleaved) self.assertEqual(len(idx), len(unpickled)) self.assertEqual(idx.bounds, unpickled.bounds) a = next(idx.intersection(idx.bounds, objects=True)) b = next(unpickled.intersection(unpickled.bounds, objects=True)) self.assertEqual(a.id, b.id) self.assertEqual(a.bounds, b.bounds) self.assertEqual(a.object, b.object) def test_property(self) -> None: p = rtree.index.Property() unpickled = pickle.loads(pickle.dumps(p)) self.assertNotEqual(p.handle, unpickled.handle) self.assertEqual(p.as_dict(), unpickled.as_dict()) class IndexContainer(IndexTestCase): def test_container(self) -> None: """rtree.index.RtreeContainer works as expected""" container = rtree.index.RtreeContainer() objects = list() for coordinates in self.boxes15: objects.append(object()) container.insert(objects[-1], coordinates) self.assertEqual(len(container), len(self.boxes15)) assert all(obj in container for obj in objects) for obj, coordinates in zip(objects, self.boxes15[:5]): container.delete(obj, coordinates) assert all(obj in container for obj in objects[5:]) assert all(obj not in container for obj in objects[:5]) assert len(container) == len(self.boxes15) - 5 with pytest.raises(IndexError): container.delete(objects[0], self.boxes15[0]) # Insert duplicate object, at different location container.insert(objects[5], self.boxes15[0]) assert objects[5] in container # And then delete it, but check object still present container.delete(objects[5], self.boxes15[0]) assert objects[5] in container # Intersection obj = objects[10] results = container.intersection(self.boxes15[10]) assert obj in results # Intersection with bbox obj = objects[10] results = container.intersection(self.boxes15[10], bbox=True) result = [result for result in results if result.object is obj][0] assert np.array_equal(result.bbox, self.boxes15[10]) # Nearest obj = objects[8] results = container.intersection(self.boxes15[8]) assert obj in results # Nearest with bbox obj = objects[8] results = container.nearest(self.boxes15[8], bbox=True) result = [result for result in results if result.object is obj][0] assert np.array_equal(result.bbox, self.boxes15[8]) # Test iter method assert objects[12] in set(container) class IndexIntersection(IndexTestCase): def test_intersection(self) -> None: """Test basic insertion and retrieval""" self.assertTrue(0 in self.idx.intersection((0, 0, 60, 60))) hits = list(self.idx.intersection((0, 0, 60, 60))) self.assertEqual(len(hits), 10) self.assertEqual(hits, [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]) def test_objects(self) -> None: """Test insertion of objects""" idx = index.Index() for i, coords in enumerate(self.boxes15): idx.add(i, coords) idx.insert( 4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42 ) hits = idx.intersection((0, 0, 60, 60), objects=True) hit = [h for h in hits if h.id == 4321][0] self.assertEqual(hit.id, 4321) self.assertEqual(hit.object, 42) box = [f"{t:.10f}" for t in hit.bbox] expected = ["34.3776829412", "26.7375853734", "49.3776829412", "41.7375853734"] self.assertEqual(box, expected) def test_double_insertion(self) -> None: """Inserting the same id twice does not overwrite data""" idx = index.Index() idx.add(1, (2, 2)) idx.add(1, (3, 3)) self.assertEqual([1, 1], list(idx.intersection((0, 0, 5, 5)))) class TestIndexIntersectionUnion: @pytest.fixture(scope="class") def index_a_interleaved(self) -> index.Index: idx = index.Index(interleaved=True) idx.insert(1, (3, 3, 5, 5), "a_1") idx.insert(2, (4, 2, 6, 4), "a_2") return idx @pytest.fixture(scope="class") def index_a_uninterleaved(self) -> index.Index: idx = index.Index(interleaved=False) idx.insert(1, (3, 5, 3, 5), "a_1") idx.insert(2, (4, 6, 2, 4), "a_2") return idx @pytest.fixture(scope="class") def index_b_interleaved(self) -> index.Index: idx = index.Index(interleaved=True) idx.insert(3, (2, 1, 7, 6), "b_3") idx.insert(4, (8, 7, 9, 8), "b_4") return idx @pytest.fixture(scope="class") def index_b_uninterleaved(self) -> index.Index: idx = index.Index(interleaved=False) idx.insert(3, (2, 7, 1, 6), "b_3") idx.insert(4, (8, 9, 7, 8), "b_4") return idx def test_intersection_interleaved( self, index_a_interleaved: index.Index, index_b_interleaved: index.Index ) -> None: index_c_interleaved = index_a_interleaved & index_b_interleaved assert index_c_interleaved.interleaved assert len(index_c_interleaved) == 2 for hit in index_c_interleaved.intersection( index_c_interleaved.bounds, objects=True ): if hit.bbox == [3.0, 3.0, 5.0, 5.0]: assert hit.object == ("a_1", "b_3") elif hit.bbox == [4.0, 2.0, 6.0, 4.0]: assert hit.object == ("a_2", "b_3") else: assert False def test_intersection_uninterleaved( self, index_a_uninterleaved: index.Index, index_b_uninterleaved: index.Index ) -> None: index_c_uninterleaved = index_a_uninterleaved & index_b_uninterleaved assert not index_c_uninterleaved.interleaved assert len(index_c_uninterleaved) == 2 for hit in index_c_uninterleaved.intersection( index_c_uninterleaved.bounds, objects=True ): if hit.bounds == [3.0, 5.0, 3.0, 5.0]: assert hit.object == ("a_1", "b_3") elif hit.bounds == [4.0, 6.0, 2.0, 4.0]: assert hit.object == ("a_2", "b_3") else: assert False def test_intersection_mismatch( self, index_a_interleaved: index.Index, index_b_uninterleaved: index.Index ) -> None: with pytest.raises(AssertionError): index_a_interleaved & index_b_uninterleaved def test_union_interleaved( self, index_a_interleaved: index.Index, index_b_interleaved: index.Index ) -> None: index_c_interleaved = index_a_interleaved | index_b_interleaved assert index_c_interleaved.interleaved assert len(index_c_interleaved) == 4 for hit in index_c_interleaved.intersection( index_c_interleaved.bounds, objects=True ): if hit.bbox == [3.0, 3.0, 5.0, 5.0]: assert hit.object == "a_1" elif hit.bbox == [4.0, 2.0, 6.0, 4.0]: assert hit.object == "a_2" elif hit.bbox == [2.0, 1.0, 7.0, 6.0]: assert hit.object == "b_3" elif hit.bbox == [8.0, 7.0, 9.0, 8.0]: assert hit.object == "b_4" else: assert False def test_union_uninterleaved( self, index_a_uninterleaved: index.Index, index_b_uninterleaved: index.Index ) -> None: index_c_uninterleaved = index_a_uninterleaved | index_b_uninterleaved assert not index_c_uninterleaved.interleaved assert len(index_c_uninterleaved) == 4 for hit in index_c_uninterleaved.intersection( index_c_uninterleaved.bounds, objects=True ): if hit.bounds == [3.0, 5.0, 3.0, 5.0]: assert hit.object == "a_1" elif hit.bounds == [4.0, 6.0, 2.0, 4.0]: assert hit.object == "a_2" elif hit.bounds == [2.0, 7.0, 1.0, 6.0]: assert hit.object == "b_3" elif hit.bounds == [8.0, 9.0, 7.0, 8.0]: assert hit.object == "b_4" else: assert False def test_union_mismatch( self, index_a_interleaved: index.Index, index_b_uninterleaved: index.Index ) -> None: with pytest.raises(AssertionError): index_a_interleaved | index_b_uninterleaved class IndexSerialization(unittest.TestCase): def setUp(self) -> None: self.boxes15 = np.genfromtxt("boxes_15x15.data") def boxes15_stream( self, interleaved: bool = True ) -> Iterator[tuple[int, tuple[float, float, float, float], int]]: for i, (minx, miny, maxx, maxy) in enumerate(self.boxes15): if interleaved: yield (i, (minx, miny, maxx, maxy), 42) else: yield (i, (minx, maxx, miny, maxy), 42) def test_unicode_filenames(self) -> None: """Unicode filenames work as expected""" tname = tempfile.mktemp() filename = tname + "gilename\u4500abc" idx = index.Index(filename) idx.insert( 4321, (34.3776829412, 26.7375853734, 49.3776829412, 41.7375853734), obj=42 ) def test_pickling(self) -> None: """Pickling works as expected""" idx = index.Index() import json some_data = {"a": 22, "b": [1, "ccc"]} # https://github.com/python/mypy/issues/2427 idx.dumps = lambda obj: json.dumps(obj).encode( # type: ignore[assignment] "utf-8" ) idx.loads = lambda string: json.loads( # type: ignore[assignment] string.decode("utf-8") ) idx.add(0, (0, 0, 1, 1), some_data) self.assertEqual(list(idx.nearest((0, 0), 1, objects="raw"))[0], some_data) def test_custom_filenames(self) -> None: """Test using custom filenames for index serialization""" p = index.Property() p.dat_extension = "data" p.idx_extension = "index" tname = tempfile.mktemp() idx = index.Index(tname, properties=p) for i, coords in enumerate(self.boxes15): idx.add(i, coords) hits = list(idx.intersection((0, 0, 60, 60))) self.assertEqual(len(hits), 10) self.assertEqual(hits, [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]) del idx # Check we can reopen the index and get the same results idx2 = index.Index(tname, properties=p) hits = list(idx2.intersection((0, 0, 60, 60))) self.assertEqual(len(hits), 10) self.assertEqual(hits, [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]) @pytest.mark.skipif(not sys.maxsize > 2**32, reason="Fails on 32bit systems") def test_interleaving(self) -> None: """Streaming against a persisted index without interleaving""" def data_gen( interleaved: bool = True, ) -> Iterator[tuple[int, tuple[float, float, float, float], int]]: for i, (minx, miny, maxx, maxy) in enumerate(self.boxes15): if interleaved: yield (i, (minx, miny, maxx, maxy), 42) else: yield (i, (minx, maxx, miny, maxy), 42) p = index.Property() tname = tempfile.mktemp() idx = index.Index( tname, data_gen(interleaved=False), properties=p, interleaved=False ) hits1 = sorted(list(idx.intersection((0, 60, 0, 60)))) self.assertEqual(len(hits1), 10) self.assertEqual(hits1, [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]) leaves = idx.leaves() expected = [ ( 0, [ 2, 92, 51, 55, 26, 95, 7, 81, 38, 22, 58, 89, 91, 83, 98, 37, 70, 31, 49, 34, 11, 6, 13, 3, 23, 57, 9, 96, 84, 36, 5, 45, 77, 78, 44, 12, 42, 73, 93, 41, 71, 17, 39, 54, 88, 72, 97, 60, 62, 48, 19, 25, 76, 59, 66, 64, 79, 94, 40, 32, 46, 47, 15, 68, 10, 0, 80, 56, 50, 30, ], [-186.673789279, -96.7177218184, 172.392784956, 45.4856075292], ), ( 2, [ 61, 74, 29, 99, 16, 43, 35, 33, 27, 63, 18, 90, 8, 53, 82, 21, 65, 24, 4, 1, 75, 67, 86, 52, 28, 85, 87, 14, 69, 20, ], [-174.739939684, 32.6596016791, 184.761387556, 96.6043699778], ), ] # go through the traversal and see if everything is close assert all( all(np.allclose(a, b) for a, b in zip(L, E)) # type: ignore for L, E in zip(leaves, expected) ) hits2 = sorted(list(idx.intersection((0, 60, 0, 60), objects=True))) self.assertEqual(len(hits2), 10) self.assertEqual(hits2[0].object, 42) def test_overwrite(self) -> None: """Index overwrite works as expected""" tname = tempfile.mktemp() idx = index.Index(tname) del idx idx = index.Index(tname, overwrite=True) assert isinstance(idx, index.Index) class IndexNearest(IndexTestCase): def test_nearest_basic(self) -> None: """Test nearest basic selection of records""" hits = list(self.idx.nearest((0, 0, 10, 10), 3)) self.assertEqual(hits, [76, 48, 19]) idx = index.Index() locs = [(2, 4), (6, 8), (10, 12), (11, 13), (15, 17), (13, 20)] for i, (start, stop) in enumerate(locs): idx.add(i, (start, 1, stop, 1)) hits = sorted(idx.nearest((13, 0, 20, 2), 3)) self.assertEqual(hits, [3, 4, 5]) def test_nearest_equidistant(self) -> None: """Test that if records are equidistant, both are returned.""" point = (0, 0) small_box = (-10, -10, 10, 10) large_box = (-50, -50, 50, 50) idx = index.Index() idx.insert(0, small_box) idx.insert(1, large_box) self.assertEqual(list(idx.nearest(point, 2)), [0, 1]) self.assertEqual(list(idx.nearest(point, 1)), [0, 1]) idx.insert(2, (0, 0)) self.assertEqual(list(idx.nearest(point, 2)), [0, 1, 2]) self.assertEqual(list(idx.nearest(point, 1)), [0, 1, 2]) idx = index.Index() idx.insert(0, small_box) idx.insert(1, large_box) idx.insert(2, (50, 50)) # point on top right vertex of large_box point = (51, 51) # right outside of large_box self.assertEqual(list(idx.nearest(point, 2)), [1, 2]) self.assertEqual(list(idx.nearest(point, 1)), [1, 2]) idx = index.Index() idx.insert(0, small_box) idx.insert(1, large_box) # point right outside on top right vertex of large_box idx.insert(2, (51, 51)) point = (51, 52) # shifted 1 unit up from the point above self.assertEqual(list(idx.nearest(point, 2)), [2, 1]) self.assertEqual(list(idx.nearest(point, 1)), [2]) def test_nearest_object(self) -> None: """Test nearest object selection of records""" idx = index.Index() locs = [(14, 10, 14, 10), (16, 10, 16, 10)] for i, (minx, miny, maxx, maxy) in enumerate(locs): idx.add(i, (minx, miny, maxx, maxy), obj={"a": 42}) hits = sorted( (i.id, i.object) for i in idx.nearest((15, 10, 15, 10), 1, objects=True) ) self.assertEqual(hits, [(0, {"a": 42}), (1, {"a": 42})]) class IndexDelete(IndexTestCase): def test_deletion(self) -> None: """Test we can delete data from the index""" idx = index.Index() for i, coords in enumerate(self.boxes15): idx.add(i, coords) for i, coords in enumerate(self.boxes15): idx.delete(i, coords) hits = list(idx.intersection((0, 0, 60, 60))) self.assertEqual(hits, []) class IndexMoreDimensions(IndexTestCase): def test_3d(self) -> None: """Test we make and query a 3D index""" p = index.Property() p.dimension = 3 idx = index.Index(properties=p, interleaved=False) idx.insert(1, (0, 0, 60, 60, 22, 22.0)) hits = idx.intersection((-1, 1, 58, 62, 22, 24)) self.assertEqual(list(hits), [1]) def test_4d(self) -> None: """Test we make and query a 4D index""" p = index.Property() p.dimension = 4 idx = index.Index(properties=p, interleaved=False) idx.insert(1, (0, 0, 60, 60, 22, 22.0, 128, 142)) hits = idx.intersection((-1, 1, 58, 62, 22, 24, 120, 150)) self.assertEqual(list(hits), [1]) class IndexStream(IndexTestCase): def test_stream_input(self) -> None: p = index.Property() sindex = index.Index(self.boxes15_stream(), properties=p) bounds = (0, 0, 60, 60) hits = sindex.intersection(bounds) self.assertEqual(sorted(hits), [0, 4, 16, 27, 35, 40, 47, 50, 76, 80]) objects = list(sindex.intersection((0, 0, 60, 60), objects=True)) self.assertEqual(len(objects), 10) self.assertEqual(objects[0].object, 42) def test_empty_stream(self) -> None: """Assert empty stream raises exception""" self.assertRaises(RTreeError, index.Index, iter(())) def test_exception_in_generator(self) -> None: """Assert exceptions raised in callbacks are raised in main thread""" class TestException(Exception): pass def create_index() -> index.Index: def gen() -> Iterator[tuple[int, tuple[int, int, int, int], None]]: # insert at least 6 or so before the exception for i in range(10): yield (i, (1, 2, 3, 4), None) raise TestException("raising here") return index.Index(gen()) self.assertRaises(TestException, create_index) def test_exception_at_beginning_of_generator(self) -> None: """ Assert exceptions raised in callbacks before generator function are raised in main thread. """ class TestException(Exception): pass def create_index() -> index.Index: def gen() -> None: raise TestException("raising here") return index.Index(gen()) # type: ignore[func-returns-value] self.assertRaises(TestException, create_index) class DictStorage(index.CustomStorage): """A simple storage which saves the pages in a python dictionary""" def __init__(self) -> None: index.CustomStorage.__init__(self) self.clear() def create(self, returnError): """Called when the storage is created on the C side""" def destroy(self, returnError): """Called when the storage is destroyed on the C side""" def clear(self) -> None: """Clear all our data""" self.dict: dict = {} def loadByteArray(self, page, returnError): """Returns the data for page or returns an error""" try: return self.dict[page] except KeyError: returnError.contents.value = self.InvalidPageError def storeByteArray(self, page, data, returnError): """Stores the data for page""" if page == self.NewPage: newPageId = len(self.dict) self.dict[newPageId] = data return newPageId else: if page not in self.dict: returnError.value = self.InvalidPageError return 0 self.dict[page] = data return page def deleteByteArray(self, page, returnError): """Deletes a page""" try: del self.dict[page] except KeyError: returnError.contents.value = self.InvalidPageError hasData = property(lambda self: bool(self.dict)) """ Returns true if we contains some data """ class IndexCustomStorage(unittest.TestCase): def test_custom_storage(self) -> None: """Custom index storage works as expected""" settings = index.Property() settings.writethrough = True settings.buffering_capacity = 1 # Notice that there is a small in-memory buffer by default. # We effectively disable it here so our storage directly receives # any load/store/delete calls. # This is not necessary in general and can hamper performance; # we just use it here for illustrative and testing purposes. storage = DictStorage() r = index.Index(storage, properties=settings) # Interestingly enough, if we take a look at the contents of our # storage now, we can see the Rtree has already written two pages # to it. This is for header and index. state1 = storage.dict.copy() self.assertEqual(list(state1.keys()), [0, 1]) r.add(123, (0, 0, 1, 1)) state2 = storage.dict.copy() self.assertNotEqual(state1, state2) item = list(r.nearest((0, 0), 1, objects=True))[0] self.assertEqual(item.id, 123) self.assertTrue(r.valid()) self.assertTrue(isinstance(list(storage.dict.values())[0], bytes)) r.delete(123, (0, 0, 1, 1)) self.assertTrue(r.valid()) r.clearBuffer() self.assertTrue(r.valid()) del r storage.clear() self.assertFalse(storage.hasData) del storage def test_custom_storage_reopening(self) -> None: """Reopening custom index storage works as expected""" storage = DictStorage() r1 = index.Index(storage, overwrite=True) r1.add(555, (2, 2)) del r1 self.assertTrue(storage.hasData) r2 = index.Index(storage, overwrite=False) count = r2.count((0, 0, 10, 10)) self.assertEqual(count, 1) rtree-1.4.0/tests/test_tpr.py000066400000000000000000000170261476215601300162300ustar00rootroot00000000000000from __future__ import annotations import os import unittest from collections import defaultdict, namedtuple from collections.abc import Iterator from math import ceil from typing import Any import numpy as np from numpy.random import default_rng from rtree.index import Index, Property, RT_TPRTree class Cartesian( namedtuple( "Cartesian", ("id", "time", "x", "y", "x_vel", "y_vel", "update_time", "out_of_bounds"), ) ): __slots__ = () def getX(self, t: float) -> float: return self.x + self.x_vel * (t - self.time) def getY(self, t: float) -> float: return self.y + self.y_vel * (t - self.time) def getXY(self, t: float) -> tuple[float, float]: return self.getX(t), self.getY(t) def get_coordinates( self, t_now: float | None = None ) -> tuple[ tuple[float, float, float, float], tuple[float, float, float, float], float | tuple[float, float], ]: return ( (self.x, self.y, self.x, self.y), (self.x_vel, self.y_vel, self.x_vel, self.y_vel), self.time if t_now is None else (self.time, t_now), ) class QueryCartesian( namedtuple("QueryCartesian", ("start_time", "end_time", "x", "y", "dx", "dy")) ): __slots__ = () def get_coordinates( self, ) -> tuple[ tuple[float, float, float, float], tuple[float, float, float, float], tuple[float, float], ]: return ( (self.x - self.dx, self.y - self.dy, self.x + self.dx, self.y + self.dy), (0, 0, 0, 0), (self.start_time, self.end_time), ) def data_generator( dataset_size: int = 100, simulation_length: int = 10, max_update_interval: int = 20, queries_per_time_step: int = 5, min_query_extent: float = 0.05, max_query_extent: float = 0.1, horizon: int = 20, min_query_interval: int = 2, max_query_interval: int = 10, agility: float = 0.01, min_speed: float = 0.0025, max_speed: float = 0.0166, min_x: int = 0, min_y: int = 0, max_x: int = 1, max_y: int = 1, ) -> Iterator[tuple[str, int, Any]]: rng = default_rng() def create_object( id_: float, time: float, x: float | None = None, y: float | None = None ) -> Cartesian: # Create object with random or defined x, y and random velocity if x is None: x = rng.uniform(min_x, max_x) if y is None: y = rng.uniform(min_y, max_y) speed = rng.uniform(min_speed, max_speed) angle = rng.uniform(-np.pi, np.pi) x_vel, y_vel = speed * np.cos(angle), speed * np.sin(angle) # Set update time for when out of bounds, or max interval for dt in range(1, max_update_interval + 1): if not (0 < x + x_vel * dt < max_x and 0 < y + y_vel * dt < max_y): out_of_bounds = True update_time = time + dt break else: out_of_bounds = False update_time = time + max_update_interval return Cartesian(id_, time, x, y, x_vel, y_vel, update_time, out_of_bounds) objects = list() objects_to_update = defaultdict(set) for id_ in range(dataset_size): object_ = create_object(id_, 0) objects.append(object_) objects_to_update[object_.update_time].add(object_) yield "INSERT", 0, object_ for t_now in range(1, simulation_length): need_to_update = ceil(dataset_size * agility) updated_ids = set() while need_to_update > 0 or objects_to_update[t_now]: kill = False if objects_to_update[t_now]: object_ = objects_to_update[t_now].pop() if object_ not in objects: continue kill = object_.out_of_bounds else: id_ = rng.integers(0, dataset_size) while id_ in updated_ids: id_ = rng.integers(0, dataset_size) object_ = objects[id_] updated_ids.add(object_.id) need_to_update -= 1 yield "DELETE", t_now, object_ if kill: x = y = None else: x, y = object_.getXY(t_now) object_ = create_object(object_.id, t_now, x, y) objects[object_.id] = object_ objects_to_update[object_.update_time].add(object_) yield "INSERT", t_now, object_ for _ in range(queries_per_time_step): x = rng.uniform(min_x, max_x) y = rng.uniform(min_y, max_y) dx = rng.uniform(min_query_extent, max_query_extent) dy = rng.uniform(min_query_extent, max_query_extent) dt = rng.integers(min_query_interval, max_query_interval + 1) t = rng.integers(t_now, t_now + horizon - dt) yield "QUERY", t_now, QueryCartesian(t, t + dt, x, y, dx, dy) def intersects( x1: float, y1: float, x2: float, y2: float, x: float, y: float, dx: float, dy: float ) -> bool: # Checks if line from x1, y1 to x2, y2 intersects with rectangle with # bottom left at x-dx, y-dy and top right at x+dx, y+dy. # Implementation of https://stackoverflow.com/a/293052 # Check if line points not both more/less than max/min for each axis if ( (x1 > x + dx and x2 > x + dx) or (x1 < x - dx and x2 < x - dx) or (y1 > y + dy and y2 > y + dy) or (y1 < y - dy and y2 < y - dy) ): return False # Check on which side (+ve, -ve) of the line the rectangle corners are, # returning True if any corner is on a different side. calcs = ( (y2 - y1) * rect_x + (x1 - x2) * rect_y + (x2 * y1 - x1 * y2) for rect_x, rect_y in ( (x - dx, y - dy), (x + dx, y - dy), (x - dx, y + dy), (x + dx, y + dy), ) ) sign = np.sign(next(calcs)) # First corner (bottom left) return any(np.sign(calc) != sign for calc in calcs) # Check remaining 3 class TPRTests(unittest.TestCase): def test_tpr(self) -> None: # TODO : this freezes forever on some windows cloud builds if os.name == "nt": return # Cartesians list for brute force objects = dict() tpr_tree = Index(properties=Property(type=RT_TPRTree)) for operation, t_now, object_ in data_generator(): if operation == "INSERT": tpr_tree.insert(object_.id, object_.get_coordinates()) objects[object_.id] = object_ elif operation == "DELETE": tpr_tree.delete(object_.id, object_.get_coordinates(t_now)) del objects[object_.id] elif operation == "QUERY": tree_intersect = set(tpr_tree.intersection(object_.get_coordinates())) # Brute intersect brute_intersect = set() for tree_object in objects.values(): x_low, y_low = tree_object.getXY(object_.start_time) x_high, y_high = tree_object.getXY(object_.end_time) if intersects( x_low, y_low, x_high, y_high, # Line object_.x, object_.y, object_.dx, object_.dy, ): # Rect brute_intersect.add(tree_object.id) # Tree should match brute force approach assert tree_intersect == brute_intersect rtree-1.4.0/tox.ini000066400000000000000000000005031476215601300141530ustar00rootroot00000000000000[tox] requires = tox>=4 env_list = py{39,310,311,312,313} [testenv] description = run unit tests deps = pytest>=6 numpy install_command = python -I -m pip install --only-binary=:all: {opts} {packages} ignore_errors = True ignore_outcome = True commands = pytest --import-mode=importlib {posargs:tests}