pax_global_header00006660000000000000000000000064147414507410014521gustar00rootroot0000000000000052 comment=5bfd0bf1b773adee319374993f414737156c9caf Levenshtein-0.27.0/000077500000000000000000000000001474145074100140735ustar00rootroot00000000000000Levenshtein-0.27.0/.clang-format000066400000000000000000000014311474145074100164450ustar00rootroot00000000000000ColumnLimit: 110 IndentWidth: 4 AccessModifierOffset: -4 AllowShortIfStatementsOnASingleLine: true PointerAlignment: Left AllowShortBlocksOnASingleLine: Always AllowShortFunctionsOnASingleLine: None AllowShortLambdasOnASingleLine: None BreakBeforeBraces: Custom AlwaysBreakTemplateDeclarations: true BraceWrapping: SplitEmptyFunction: false AfterCaseLabel: true AfterClass: false AfterControlStatement: MultiLine AfterEnum: false AfterFunction: true AfterNamespace: false AfterStruct: false AfterUnion: false BeforeCatch: true BeforeElse: true SplitEmptyRecord: false SplitEmptyNamespace: false AllowAllConstructorInitializersOnNextLine: true ConstructorInitializerAllOnOneLineOrOnePerLine: true AllowShortCaseLabelsOnASingleLine: true IndentPPDirectives: AfterHash Levenshtein-0.27.0/.gitattributes000066400000000000000000000000461474145074100167660ustar00rootroot00000000000000src/c_levenshtein.c linguist-vendored Levenshtein-0.27.0/.github/000077500000000000000000000000001474145074100154335ustar00rootroot00000000000000Levenshtein-0.27.0/.github/FUNDING.yml000066400000000000000000000001361474145074100172500ustar00rootroot00000000000000github: maxbachmann custom: ["https://www.paypal.com/donate/?hosted_button_id=VGWQBBD5CTWJU"] Levenshtein-0.27.0/.github/workflows/000077500000000000000000000000001474145074100174705ustar00rootroot00000000000000Levenshtein-0.27.0/.github/workflows/docs.yml000066400000000000000000000012331474145074100211420ustar00rootroot00000000000000name: Build docs on: push: branches: - main jobs: build: runs-on: [ubuntu-latest] steps: - uses: actions/checkout@v2 with: submodules: 'true' - name: Set up Python 3.11 uses: actions/setup-python@v1 with: python-version: 3.11 - name: Install dependencies run: | python -m pip install Sphinx sphinx_rtd_theme python -m pip install . - name: Build Site run: make html - name: Deploy Site uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./build/html Levenshtein-0.27.0/.github/workflows/pythonbuild.yml000066400000000000000000000126021474145074100225550ustar00rootroot00000000000000name: Build on: push: pull_request: release: types: - published jobs: build_sdist: name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: submodules: 'true' - uses: actions/setup-python@v5 - name: Install dependencies run: | python -m pip install --upgrade pip pip install pytest hypothesis mypy Cython==3.0.11 # The cythonized files allow installation from the sdist without cython - name: Generate cython run: | chmod +x ./src/Levenshtein/generate.sh ./src/Levenshtein/generate.sh - name: Build sdist run: | git apply ./tools/sdist.patch pip install build python -m build --sdist # test whether tarball contains all files required for compiling pip install dist/levenshtein-*.tar.gz -v - name: Test type stubs run: | python -m mypy.stubtest Levenshtein --ignore-missing-stub - name: Test with pytest and backtrace in case of SegFault run: | tools/seg_wrapper.sh pytest tests - uses: actions/upload-artifact@v4 with: name: artifact-sdist path: dist/*.tar.gz build_wheels_windows: name: Build wheel on windows-latest/${{matrix.arch}}/${{matrix.python_tag}} needs: [build_sdist] runs-on: windows-latest strategy: fail-fast: false matrix: arch: [auto32, auto64, ARM64] env: CIBW_ARCHS: ${{matrix.arch}} CIBW_TEST_SKIP: "*-win32" CIBW_TEST_REQUIRES: pytest hypothesis CIBW_TEST_COMMAND: pytest {package}/tests CIBW_BUILD_VERBOSITY: 3 CIBW_PROJECT_REQUIRES_PYTHON: ">=3.9" steps: - uses: actions/download-artifact@v4 with: name: artifact-sdist path: dist - uses: actions/setup-python@v5 - name: Copy wheel run: cp dist/*.tar.gz rapidfuzz.tar.gz - name: Build wheels uses: pypa/cibuildwheel@v2.21.1 with: package-dir: rapidfuzz.tar.gz output-dir: wheelhouse - name: Upload wheels uses: actions/upload-artifact@v4 with: name: artifact-${{ github.job }}-${{ strategy.job-index }} path: ./wheelhouse/*.whl build_wheels_macos: name: Build wheel on macos-latest/${{matrix.arch}}/${{matrix.python_tag}} needs: [build_sdist] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [macos-13, macos-14] env: CIBW_ARCHS: native CIBW_TEST_SKIP: "pp*-macosx_*" CIBW_TEST_REQUIRES: pytest hypothesis CIBW_TEST_COMMAND: pytest {package}/tests CIBW_BUILD_VERBOSITY: 3 CIBW_PROJECT_REQUIRES_PYTHON: ">=3.9" steps: - uses: actions/download-artifact@v4 with: name: artifact-sdist path: dist - uses: actions/setup-python@v5 - name: Copy wheel run: cp dist/*.tar.gz rapidfuzz.tar.gz - name: Build wheels uses: pypa/cibuildwheel@v2.21.1 with: package-dir: rapidfuzz.tar.gz output-dir: wheelhouse - name: Upload wheels uses: actions/upload-artifact@v4 with: name: artifact-${{ github.job }}-${{ strategy.job-index }} path: ./wheelhouse/*.whl build_wheels_linux: name: Build wheels on ubuntu-latest/${{matrix.arch}}/${{matrix.python_tag}} needs: [build_sdist] runs-on: ubuntu-latest strategy: fail-fast: false matrix: arch: [auto, aarch64, ppc64le, s390x] python_tag: ["cp39-*", "cp310-*", "cp311-*", "cp312-*", "cp313-*", "pp39-*", "pp310-*"] exclude: # PyPy builds not supported on ppc64le / s390x - arch: ppc64le python_tag: "pp39-*" - arch: ppc64le python_tag: "pp310-*" - arch: s390x python_tag: "pp39-*" - arch: s390x python_tag: "pp310-*" env: CIBW_ARCHS_LINUX: ${{matrix.arch}} CIBW_BUILD: ${{matrix.python_tag}} CIBW_TEST_SKIP: "{*-manylinux_{aarch64,ppc64le,s390x},*musllinux_*}" CIBW_TEST_REQUIRES: pytest hypothesis CIBW_TEST_COMMAND: pytest {package}/tests CIBW_BUILD_VERBOSITY: 3 steps: - uses: actions/download-artifact@v4 with: name: artifact-sdist path: dist - uses: actions/setup-python@v5 - name: Copy wheel run: cp dist/*.tar.gz rapidfuzz.tar.gz - uses: docker/setup-qemu-action@v3 name: Set up QEMU - name: Build wheel uses: pypa/cibuildwheel@v2.21.1 with: package-dir: rapidfuzz.tar.gz output-dir: wheelhouse - name: Upload wheels uses: actions/upload-artifact@v4 with: name: artifact-${{ github.job }}-${{ strategy.job-index }} path: ./wheelhouse/*.whl deploy-wheels: if: github.event_name == 'release' && github.event.action == 'published' needs: [build_wheels_windows, build_wheels_macos, build_wheels_linux, build_sdist] name: deploy wheels to pypi runs-on: ubuntu-latest environment: pypi-release permissions: id-token: write steps: - uses: actions/download-artifact@v4 with: path: dist pattern: artifact-* merge-multiple: true - uses: pypa/gh-action-pypi-publish@v1.8.11 Levenshtein-0.27.0/.gitignore000066400000000000000000000040601474145074100160630ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # vscode .vscode/ _skbuild/ # Cython generated files *.cxx Levenshtein-0.27.0/.gitmodules000066400000000000000000000001511474145074100162450ustar00rootroot00000000000000[submodule "extern/rapidfuzz-cpp"] path = extern/rapidfuzz-cpp url = ../../rapidfuzz/rapidfuzz-cpp.git Levenshtein-0.27.0/.pre-commit-config.yaml000066400000000000000000000056341474145074100203640ustar00rootroot00000000000000# To use: # # pre-commit run -a # # Or: # # pre-commit install # (runs every time you commit in git) # # To update this file: # # pre-commit autoupdate # # See https://github.com/pre-commit/pre-commit exclude: | (?x)( .*\.patch ) repos: # Standard hooks - repo: https://github.com/pre-commit/pre-commit-hooks rev: "v4.6.0" hooks: - id: check-added-large-files - id: check-case-conflict - id: check-docstring-first - id: check-merge-conflict - id: check-symlinks - id: check-toml - id: check-yaml - id: debug-statements - id: end-of-file-fixer - id: mixed-line-ending - id: requirements-txt-fixer - id: trailing-whitespace # Black, the code formatter, natively supports pre-commit - repo: https://github.com/psf/black rev: "24.8.0" # Keep in sync with blacken-docs hooks: - id: black # Also code format the docs - repo: https://github.com/asottile/blacken-docs rev: "1.18.0" hooks: - id: blacken-docs additional_dependencies: - black==22.8.0 # keep in sync with black hook # Changes tabs to spaces - repo: https://github.com/Lucas-C/pre-commit-hooks rev: "v1.5.5" hooks: - id: remove-tabs - repo: https://github.com/sirosen/texthooks rev: "0.6.7" hooks: - id: fix-ligatures - id: fix-smartquotes # Checking for common mistakes - repo: https://github.com/pre-commit/pygrep-hooks rev: "v1.10.0" hooks: - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal # PyLint has native support - not always usable, but works for us - repo: https://github.com/PyCQA/pylint rev: "v3.2.7" hooks: - id: pylint files: ^pybind11 # CMake formatting - repo: https://github.com/cheshirekow/cmake-format-precommit rev: "v0.6.13" hooks: - id: cmake-format additional_dependencies: [pyyaml] types: [file] files: (\.cmake|CMakeLists.txt)(.in)?$ # Check static types with mypy #- repo: https://github.com/pre-commit/mirrors-mypy # rev: "v0.971" # hooks: # - id: mypy # args: [] # exclude: ^(tests|docs)/ # additional_dependencies: [nox, rich] - repo: https://github.com/charliermarsh/ruff-pre-commit rev: v0.6.5 hooks: - id: ruff args: ["--fix", "--show-fixes"] # Check for spelling - repo: https://github.com/codespell-project/codespell rev: "v2.3.0" hooks: - id: codespell exclude: ".*/test_.*.py" #args: ["-x", ".codespell-ignore-lines"] # Check for common shell mistakes #- repo: https://github.com/shellcheck-py/shellcheck-py # rev: "v0.9.0.6" # hooks: # - id: shellcheck # Disallow some common capitalization mistakes - repo: local hooks: - id: disallow-caps name: Disallow improper capitalization language: pygrep entry: PyBind|Numpy|Cmake|CCache|PyTest exclude: ^\.pre-commit-config.yaml$ # Clang format the codebase automatically - repo: https://github.com/pre-commit/mirrors-clang-format rev: "v18.1.8" hooks: - id: clang-format types_or: [c++, c] Levenshtein-0.27.0/CMakeLists.txt000066400000000000000000000035071474145074100166400ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.15...3.26) cmake_policy(SET CMP0054 NEW) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) set(SKBUILD_LINK_LIBRARIES_KEYWORD PRIVATE) set(Python_FIND_IMPLEMENTATIONS CPython PyPy) set(THREADS_PREFER_PTHREAD_FLAG ON) if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") endif() project(Levenshtein LANGUAGES C CXX) if(MSVC) add_compile_options(/W4) else() add_compile_options(-Wall -Wextra -pedantic) endif() if(CMAKE_VERSION VERSION_LESS 3.18) find_package( Python COMPONENTS Interpreter Development REQUIRED) else() set(Python_ARTIFACTS_INTERACTIVE TRUE) find_package( Python COMPONENTS Interpreter Development.Module REQUIRED) endif() if(CMAKE_VERSION VERSION_LESS 3.17) execute_process( COMMAND "${Python_EXECUTABLE}" -c "import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX').split('.')[1])" OUTPUT_VARIABLE Python_SOABI OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ECHO STDOUT) message(STATUS "Corrected SOABI: ${Python_SOABI}") elseif("${Python_INTERPRETER_ID}" STREQUAL "PyPy") message(STATUS "PyPy SOABI: ${Python_SOABI}") execute_process( COMMAND "${Python_EXECUTABLE}" -c "import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX').split('.')[1])" OUTPUT_VARIABLE Python_SOABI OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ECHO STDOUT) message(STATUS "Corrected SOABI: ${Python_SOABI}") endif() find_package(rapidfuzz 3.2.0 QUIET) if(rapidfuzz_FOUND) message(STATUS "Using system supplied version of rapidfuzz-cpp") else() message(STATUS "Using packaged version of rapidfuzz-cpp") add_subdirectory(extern/rapidfuzz-cpp) endif() set(LEV_BASE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src/Levenshtein) add_subdirectory(src/Levenshtein) Levenshtein-0.27.0/COPYING000066400000000000000000000432401474145074100151310ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS Appendix: How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) 19yy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) 19yy name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. Levenshtein-0.27.0/HISTORY.md000066400000000000000000000202601474145074100155560ustar00rootroot00000000000000## Changelog ### v0.27.0 #### Fixed - avoid instantiation of basic_string for unsupported types #### Changed - upgrade ``rapidfuzz-cpp`` to ``v3.2.0`` ### v0.26.1 #### Fixed - add missing mac os wheels ### v0.26.0 #### Changed - add support for Python 3.13 - drop support for Python 3.8 - switch build system to `scikit-build-core` - upgrade to `Cython==3.0.11` ### v0.25.1 #### Fixed - fix potentially incorrect results of `jaro_winkler` when using high prefix weights ### v0.25.0 #### Changed - improve type hints ### v0.24.0 #### Changed - upgrade ``rapidfuzz-cpp`` to ``v3.0.0`` - drop support for Python 3.7 ### v0.23.0 #### Changed - added keyword argument `pad` to Hamming distance. This controls whether sequences of different length should be padded or lead to a `ValueError` - upgrade to `Cython==3.0.3` ### v0.22.0 #### Changed - add support for Python 3.12 - drop support for Python 3.6 #### Added - add wheels for windows arm64 ### v0.21.1 #### Changed - upgrade ``rapidfuzz-cpp`` to ``v2.0.0`` ### v0.21.0 #### Changed - relax dependency requirement on ``rapidfuzz`` ### v0.20.9 #### Fixed - fix function signature of `get_requires_for_build_wheel` ### v0.20.8 #### Fixed - type hints for `editops`/`opcoded`/`matching_blocks` did not allow any hashable sequence ### v0.20.7 #### Fixed - type hints did not get installed ### v0.20.6 #### Fixed - fix incorrect result normalization in `setratio` and `seqratio` ### v0.20.5 #### Fixed - fix support for cmake versions below 3.17 - fix version requirement for `rapidfuzz-cpp` when building against a previously installed version ### v0.20.4 #### Changed - modernize cmake build to fix most conda-forge builds ### v0.20.3 #### Changed - Added support for Python 3.11 ### v0.20.2 #### Fixed - fix matching_blocks conversion for empty editops #### Changed - added in-tree build backend to install cmake and ninja only when it is not installed yet and only when wheels are available ### v0.20.1 #### Fixed - fix broken matching_blocks conversion ### v0.20.0 #### Changed - use `matching_blocks`/`apply`/`remove_subsequence`/`inverse` implementation from RapidFuzz #### Fixed - stop adding data to wheels - fix segmentation fault on some invalid editop sequences in subtract_edit - detect duplicated entries in editops validation ### v0.19.3 #### Added - add musllinux wheels ### v0.19.2 #### Added - add missing type hints ### v0.19.1 #### Added - Add type hints ### v0.19.0 #### Changed - implement all Python wrappers mostly with cython - replace usage of deprecated Python APIs #### Fixed - fix behavior of median and median_improve ### v0.18.2 #### Changed - Allow installation from system installed versions of `rapidfuzz-cpp` ### v0.18.1 #### Fixed - Indel.normalized_similarity was broken in RapidFuzz v2.0.0 (see #20) ### v0.18.0 #### Fixed * Fixed memory leak in error path of setratio * Fixed out of bound reads due to uninitialized variable in median * e.g. quickmedian(["test", "teste"], [0, 0]) caused out of bound reads #### Changed * Use a faster editops implementation provided by RapidFuzz * Reduce code duplication * reuse implementations from rapidfuzz-cpp * Transition to scikit-build ### v0.17.0 * Removed support for Python 3.5 ### v0.16.1 * Add support for RapidFuzz v1.9.* ### v0.16.0 * Add support for Python 3.10 ### v0.15.0 * Update SequenceMatcher interface to support the autojunk parameter ### v0.14.0 * Drop Python 2 support * Fixed free of non heap object due caused by zero offset on a heap object * Fixed warnings about missing type conversions * Fix segmentation fault in subtract_edit when incorrect input types are used * Fixed unchecked memory allocations * Implement distance/ratio/hamming/jaro/jaro_winkler using rapidfuzz instead of providing a own implementation * Implement Wrapper for inverse/editops/opcodes/matching_blocks/subtract_edit/apply_edit using Cython to simplify support for new Python versions ### v0.13.0 * Maintainership passed to Max Bachmann * use faster bitparallel implementations for distance and ratio * avoid string copies in distance, ratio and hamming * Fix usage of deprecated Unicode APIs in distance, ratio and hamming * Fixed incorrect window size inside Jaro and Jaro-Winkler implementation * Fixed incorrect exception messages * Removed unused functions and compiler specific hacks * Split the Python and C implementations to simplify building of the C library * Fixed multiple bugs which prevented the use as C library, since some functions only got defined when compiling for Python * Build and deliver python wheels for the library * Fixed incorrect allocation size in lev_editops_matching_blocks and lev_opcodes_matching_blocks ### v0.12.1 * Fixed handling of numerous possible wraparounds in calculating the size of memory allocations; incorrect handling of which could cause denial of service or even possible remote code execution in previous versions of the library. ### v0.12.0 * Fixed a bug in StringMatcher.StringMatcher.get_matching_blocks / extract_editops for Python 3; now allow only `str` editops on both Python 2 and Python 3, for simpler and working code. * Added documentation in the source distribution and in GIT * Fixed the package layout: renamed the .so/.dll to _levenshtein, and made it reside inside a package, along with the StringMatcher class. * Fixed spelling errors. ### v0.11.2 * Fixed a bug in setup.py: installation would fail on Python 3 if the locale did not specify UTF-8 charset (Felix Yan). * Added COPYING, StringMatcher.py, gendoc.sh and NEWS in MANIFEST.in, as they were missing from source distributions. ### v0.11.1 * Added Levenshtein.h to MANIFEST.in ### v0.11.0 * Python 3 support, maintainership passed to Antti Haapala ### v0.10.2 * Made python-Lehvenstein Git compatible and use setuptools for PyPi upload * Created HISTORY.txt and made README reST compatible ### v0.10.1 * apply_edit() broken for Unicodes was fixed (thanks to Radovan Garabik) * subtract_edit() function was added ### v0.10.0 * Hamming distance, Jaro similarity metric and Jaro-Winkler similarity metric were added * ValueErrors raised on wrong argument types were fixed to TypeErrors ### v0.9.0 * a poor-but-fast generalized median method quickmedian() was added * some auxiliary functions added to the C api (lev_set_median_index, lev_editops_normalize, ...) ### v0.8.2 * fixed missing `static' in the method list ### v0.8.1 * some compilation problems with non-gcc were fixed v0.8.0 * median_improve(), a generalized median improving function, was added * an arbitrary length limitation imposed on greedy median() result was removed * out of memory should be handled more gracefully (on systems w/o memory overcomitting) * the documentation now passes doctest ### v0.7.0 * fixed greedy median() for Unicode characters > U+FFFF, it's now usable with whatever integer type wchar_t happens to be * added missing MANIFEST * renamed exported C functions, all public names now have lev_, LEV_ or Lev prefix; defined lev_byte, lev_wchar, and otherwise santinized the (still unstable) C interface * added edit-ops group of functions, with two interfaces: native, useful for string averaging, and difflib-like for interoperability * added an example SequenceMatcher-like class StringMatcher ### v0.6.0 * a segfault in seqratio()/setratio() on invalid input has been fixed to an exception * optimized ratio() and distance() (about 20%) * Levenshtein.h header file was added to make it easier to actually use it as a C library ### v0.5.0 * a segfault in setratio() was fixed * median() handles all empty strings situation more gracefully ### v0.4.0 * new functions seqratio() and setratio() computing similarity between string sequences and sets * Levenshtein optimizations (affects all routines except median()) * all Sequence objects are accepted, not just Lists ### v0.3.0 * setmedian() finding set median was added * median() initial overhead for Unicodes was reduced ### v0.2.0 * ratio() and distance() now accept both Strings and Unicodes * removed uratio() and udistance() * Levenshtein.c is now compilable as a C library (with -DNO_PYTHON) * a median() function finding approximate weighted median of a string set was added ### v0.1.0 * Initial release Levenshtein-0.27.0/Makefile000066400000000000000000000011741474145074100155360ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = docs BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) Levenshtein-0.27.0/README.md000066400000000000000000000050121474145074100153500ustar00rootroot00000000000000# Levenshtein

Continuous Integration PyPI package version Python versions Documentation GitHub license

## Introduction The Levenshtein Python C extension module contains functions for fast computation of: * Levenshtein (edit) distance, and edit operations * string similarity * approximate median strings, and generally string averaging * string sequence and set similarity ## Requirements * Python 3.9 or later ## Installation ```bash pip install levenshtein ``` ## Documentation The documentation for the current version can be found at [https://rapidfuzz.github.io/Levenshtein/](https://rapidfuzz.github.io/Levenshtein/) ## Support the project If you are using Levenshtein for your work and feel like giving a bit of your own benefit back to support the project, consider sending us money through GitHub Sponsors or PayPal that we can use to buy us free time for the maintenance of this great library, to fix bugs in the software, review and integrate code contributions, to improve its features and documentation, or to just take a deep breath and have a cup of tea every once in a while. Thank you for your support. Support the project through [GitHub Sponsors](https://github.com/sponsors/maxbachmann) or via [PayPal](https://www.paypal.com/donate/?hosted_button_id=VGWQBBD5CTWJU): [![](https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif)](https://www.paypal.com/donate/?hosted_button_id=VGWQBBD5CTWJU). ## License Levenshtein is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. See the file [COPYING](https://github.com/rapidfuzz/Levenshtein/blob/main/COPYING) for the full text of GNU General Public License version 2. Levenshtein-0.27.0/SECURITY.md000066400000000000000000000017331474145074100156700ustar00rootroot00000000000000## Reporting Security Issues If you believe you have found a security vulnerability in the project, please report it to us through coordinated disclosure. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** Instead, please send an email to oss@maxbachmann.de. Please include as much of the information listed below as you can to help us better understand and resolve the issue: * The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. Levenshtein-0.27.0/docs/000077500000000000000000000000001474145074100150235ustar00rootroot00000000000000Levenshtein-0.27.0/docs/changelog.rst000066400000000000000000000000341474145074100175010ustar00rootroot00000000000000.. include:: ../HISTORY.txt Levenshtein-0.27.0/docs/conf.py000066400000000000000000000040561474145074100163270ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- from __future__ import annotations project = "Levenshtein" copyright = "2021, Max Bachmann" author = "Max Bachmann" # The full version, including alpha/beta/rc tags release = "0.23.0" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] napoleon_google_docstring = False # -- Extension configuration ------------------------------------------------- Levenshtein-0.27.0/docs/index.rst000066400000000000000000000016301474145074100166640ustar00rootroot00000000000000Welcome to Levenshtein's documentation! ======================================= A C extension module for fast computation of: - Levenshtein (edit) distance and edit sequence manipulation - string similarity - approximate median strings, and generally string averaging - string sequence and set similarity Levenshtein has a some overlap with difflib (SequenceMatcher). It supports only strings, not arbitrary sequence types, but on the other hand it's much faster. It supports both normal and Unicode strings, but can't mix them, all arguments to a function (method) have to be of the same type (or its subclasses). .. toctree:: :maxdepth: 2 :caption: Installation: installation .. toctree:: :maxdepth: 2 :caption: Usage: levenshtein .. toctree:: :maxdepth: 2 :caption: Changelog: changelog Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` Levenshtein-0.27.0/docs/installation.rst000066400000000000000000000011271474145074100202570ustar00rootroot00000000000000Installation ============ The recommended method to install Levenshtein is by using ``pip`` (the Python package manager) using pip --------- Levenshtein can be installed with ``pip``: .. code-block:: sh pip install levenshtein There are pre-built binaries (wheels) of Levenshtein for MacOS (10.9 and later), Linux x86_64 and Windows. from git -------- Levenshtein can be directly used from GitHub by cloning the repository which might be useful when you want to work on it: .. code-block:: sh git clone https://github.com/rapidfuzz/Levenshtein.git cd Levenshtein pip install . Levenshtein-0.27.0/docs/levenshtein.rst000066400000000000000000000020551474145074100201030ustar00rootroot00000000000000Levenshtein module ================== distance -------- .. autofunction:: Levenshtein.distance ratio ----- .. autofunction:: Levenshtein.ratio hamming ------- .. autofunction:: Levenshtein.hamming jaro ---- .. autofunction:: Levenshtein.jaro jaro_winkler ------------ .. autofunction:: Levenshtein.jaro_winkler median ------ .. autofunction:: Levenshtein.median median_improve -------------- .. autofunction:: Levenshtein.median_improve quickmedian ----------- .. autofunction:: Levenshtein.quickmedian setmedian --------- .. autofunction:: Levenshtein.setmedian seqratio -------- .. autofunction:: Levenshtein.seqratio setratio -------- .. autofunction:: Levenshtein.setratio editops ------- .. autofunction:: Levenshtein.editops opcodes ------- .. autofunction:: Levenshtein.opcodes inverse ------- .. autofunction:: Levenshtein.inverse apply_edit ---------- .. autofunction:: Levenshtein.apply_edit matching_blocks --------------- .. autofunction:: Levenshtein.matching_blocks subtract_edit ------------- .. autofunction:: Levenshtein.subtract_edit Levenshtein-0.27.0/extern/000077500000000000000000000000001474145074100154005ustar00rootroot00000000000000Levenshtein-0.27.0/extern/rapidfuzz-cpp/000077500000000000000000000000001474145074100201765ustar00rootroot00000000000000Levenshtein-0.27.0/make.bat000066400000000000000000000014301474145074100154760ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=docs set BUILDDIR=build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd Levenshtein-0.27.0/pyproject.toml000066400000000000000000000071401474145074100170110ustar00rootroot00000000000000[build-system] requires = [ "scikit-build-core>=0.10.7", "Cython>=3.0.11,<3.1.0" ] build-backend = "scikit_build_core.build" [project] name = "Levenshtein" dynamic = ["version"] dependencies = [ "rapidfuzz >= 3.9.0, < 4.0.0" ] requires-python = ">= 3.9" authors = [ {name = "Max Bachmann", email = "pypi@maxbachmann.de"}, ] description = "Python extension for computing string edit distances and similarities." readme = "README.md" classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", ] Homepage = "https://github.com/rapidfuzz/Levenshtein" Documentation = "https://rapidfuzz.github.io/Levenshtein/" Repository = "https://github.com/rapidfuzz/Levenshtein.git" Issues = "https://github.com/rapidfuzz/Levenshtein/issues" Changelog = "https://github.com/rapidfuzz/Levenshtein/blob/main/HISTORY.md" [tool.scikit-build] sdist.include = [ "src/Levenshtein/*.cxx", "src/Levenshtein/_version.py" ] sdist.exclude = [ ".github" ] wheel.exclude = [ "**.pyx", "**.cxx", "**.cpp", "**.hpp", "CMakeLists.txt", "generate.sh" ] [tool.scikit-build.metadata.version] provider = "scikit_build_core.metadata.regex" input = "src/Levenshtein/__init__.py" [tool.black] line-length = 120 [tool.mypy] files = ["src"] python_version = "3.9" warn_unused_configs = true show_error_codes = true enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] strict = true disallow_untyped_defs = false [tool.pytest.ini_options] minversion = "6.0" testpaths = ["tests"] addopts = ["-ra", "--showlocals", "--strict-markers", "--strict-config"] norecursedirs = ["_skbuild"] xfail_strict = true log_cli_level = "info" [tool.pylint] py-version = "3.9" [tool.pylint.reports] output-format = "colorized" [tool.pylint.messages_control] disable = [ "design", "fixme", "imports", "line-too-long", "imports", "invalid-name", "protected-access", "missing-module-docstring", ] [tool.ruff] select = [ "E", "F", "W", # flake8 "B", # flake8-bugbear "I", # isort "ARG", # flake8-unused-arguments "C4", # flake8-comprehensions "EM", # flake8-errmsg "ICN", # flake8-import-conventions "ISC", # flake8-implicit-str-concat "G", # flake8-logging-format "PGH", # pygrep-hooks "PIE", # flake8-pie "PL", # pylint "PT", # flake8-pytest-style "PTH", # flake8-use-pathlib "RET", # flake8-return "RUF", # Ruff-specific "SIM", # flake8-simplify "T20", # flake8-print "UP", # pyupgrade "YTT", # flake8-2020 "EXE", # flake8-executable "NPY", # NumPy specific rules "PD", # pandas-vet ] extend-ignore = [ "PLR", # Design related pylint codes "E501", # Line too long "PT004", # Use underscore for non-returning fixture (use usefixture instead) "PTH123", # use pathlib instead of builtin open "PLC1901", # simply not always correct ] target-version = "py39" src = ["src"] unfixable = [ "T20", # Removes print statements "F841", # Removes unused variables ] exclude = [] flake8-unused-arguments.ignore-variadic-names = true isort.required-imports = ["from __future__ import annotations"] [tool.ruff.per-file-ignores] "tests/**" = ["T20"] "bench/**" = ["T20"] "_custom_build/backend.py" = ["T20"] "setup.py" = ["T20"] Levenshtein-0.27.0/src/000077500000000000000000000000001474145074100146625ustar00rootroot00000000000000Levenshtein-0.27.0/src/Levenshtein/000077500000000000000000000000001474145074100171465ustar00rootroot00000000000000Levenshtein-0.27.0/src/Levenshtein/CMakeLists.txt000066400000000000000000000027761474145074100217220ustar00rootroot00000000000000function(create_cython_target _name) if(EXISTS ${CMAKE_CURRENT_LIST_DIR}/${_name}.cxx) set(${_name} ${CMAKE_CURRENT_LIST_DIR}/${_name}.cxx PARENT_SCOPE) else() add_custom_command( OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${_name}.cxx" MAIN_DEPENDENCY "${CMAKE_CURRENT_LIST_DIR}/${_name}.pyx" VERBATIM COMMAND Python::Interpreter -m cython "${CMAKE_CURRENT_LIST_DIR}/${_name}.pyx" --output-file "${CMAKE_CURRENT_BINARY_DIR}/${_name}.cxx") set(${_name} ${CMAKE_CURRENT_BINARY_DIR}/${_name}.cxx PARENT_SCOPE) endif() endfunction(create_cython_target) function(rf_add_library name) if(CMAKE_VERSION VERSION_LESS 3.17) python_add_library(${name} MODULE ${ARGV}) get_property( suffix TARGET ${name} PROPERTY SUFFIX) if(NOT suffix) set(suffix "${CMAKE_SHARED_MODULE_SUFFIX}") endif() set_property(TARGET ${name} PROPERTY SUFFIX ".${Python_SOABI}${suffix}") else() python_add_library(${name} MODULE WITH_SOABI ${ARGV}) endif() endfunction(rf_add_library) create_cython_target(levenshtein_cpp) rf_add_library(levenshtein_cpp ${levenshtein_cpp} ${LEV_BASE_DIR}/Levenshtein-c/_levenshtein.cpp) target_compile_features(levenshtein_cpp PUBLIC cxx_std_17) target_include_directories(levenshtein_cpp PRIVATE ${LEV_BASE_DIR}/Levenshtein-c) target_link_libraries(levenshtein_cpp PRIVATE rapidfuzz::rapidfuzz) install(TARGETS levenshtein_cpp DESTINATION Levenshtein/) Levenshtein-0.27.0/src/Levenshtein/Levenshtein-c/000077500000000000000000000000001474145074100216525ustar00rootroot00000000000000Levenshtein-0.27.0/src/Levenshtein/Levenshtein-c/_levenshtein.cpp000066400000000000000000000312471474145074100250500ustar00rootroot00000000000000/* * Levenshtein.c * @(#) $Id: Levenshtein.c,v 1.41 2005/01/13 20:05:36 yeti Exp $ * Python extension computing Levenshtein distances, string similarities, * median strings and other goodies. * * Copyright (C) 2002-2003 David Necas (Yeti) . * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. **/ #include #include #include /* for debugging */ #include #include #include "_levenshtein.hpp" #define LEV_EPSILON 1e-14 /**************************************************************************** * * Quick (voting) medians * ****************************************************************************/ /* {{{ */ struct HQItem { uint32_t c; double s; HQItem* n; }; /* compute the sets of symbols each string contains, and the set of symbols * in any of them (symset). meanwhile, count how many different symbols * there are (used below for symlist). * the symset is passed as an argument to avoid its allocation and * deallocation when it's used in the caller too */ class SymMap { std::unique_ptr symmap; public: SymMap(const std::vector& strings) { symmap = std::make_unique(0x100); /* this is an ugly memory allocation avoiding hack: most hash elements * will probably contain none or one symbols only so, when p->n is equal * to symmap, it means there're no symbols yet, afters inserting the * first one, p->n becomes normally NULL and then it behaves like an * usual singly linked list */ for (size_t i = 0; i < 0x100; i++) symmap[i].n = &symmap[0]; for (size_t i = 0; i < strings.size(); i++) { visit(strings[i], [&](auto s1) { for (auto c : s1) { int key = ((int)c + ((int)c >> 7)) & 0xff; HQItem* p = symmap.get() + key; if (p->n == symmap.get()) { p->c = c; p->n = NULL; continue; } while (p->c != c && p->n != NULL) p = p->n; if (p->c != c) { p->n = new HQItem; p = p->n; p->n = NULL; p->c = c; } } }); } } ~SymMap() { for (size_t j = 0; j < 0x100; j++) { HQItem* p = &symmap[j]; if (p->n == symmap.get() || p->n == NULL) continue; p = p->n; while (p) { HQItem* q = p; p = p->n; delete q; } } } void clear() { for (size_t i = 0; i < 0x100; i++) { HQItem* p = &symmap[i]; if (p->n == symmap.get()) continue; while (p) { p->s = 0.0; p = p->n; } } } HQItem* get() { return symmap.get(); } }; std::vector lev_quick_median(const std::vector& strings, const std::vector& weights) { std::vector median; /* the resulting string */ /* first check whether the result would be an empty string * and compute resulting string length */ double ml = 0; double wl = 0; for (size_t i = 0; i < strings.size(); i++) { ml += weights[i] * strings[i].length; wl += weights[i]; } if (wl == 0.0) return median; ml = floor(ml / wl + 0.499999); median.resize((size_t)ml); if (median.empty()) return median; /* find the symbol set; * now an empty symbol set is really a failure */ SymMap symmap(strings); for (size_t j = 0; j < median.size(); j++) { /* clear the symbol probabilities */ symmap.clear(); /* let all strings vote */ for (size_t i = 0; i < strings.size(); i++) { visit(strings[i], [&](auto s1) { double weighti = weights[i]; size_t lengthi = (size_t)s1.size(); double start = (double)lengthi / ml * (double)j; double end = start + (double)lengthi / ml; size_t istart = (size_t)floor(start); size_t iend = (size_t)ceil(end); /* rounding errors can overflow the buffer */ if (iend > lengthi) iend = lengthi; /* the inner part, including the complete last character */ for (size_t k = istart + 1; k < iend; k++) { uint32_t c = static_cast(s1[k]); int key = (c + (c >> 7)) & 0xff; HQItem* p = symmap.get() + key; while (p->c != c) p = p->n; p->s += weighti; } /* the initial fraction */ { uint32_t c = static_cast(s1[istart]); int key = (c + (c >> 7)) & 0xff; HQItem* p = symmap.get() + key; while (p->c != c) p = p->n; p->s += weighti * ((double)(1 + istart) - start); } /* subtract what we counted from the last character but doesn't * actually belong here. * this strategy works also when istart+1 == iend (i.e., everything * happens inside a one character) */ { uint32_t c = static_cast(s1[iend - 1]); int key = (c + (c >> 7)) & 0xff; HQItem* p = symmap.get() + key; while (p->c != c) p = p->n; p->s -= weighti * ((double)iend - end); } }); } /* find the elected symbol */ { HQItem* max = NULL; for (size_t i = 0; i < 0x100; i++) { HQItem* p = symmap.get() + i; if (p->n == symmap.get()) continue; while (p) { if (!max || p->s > max->s) max = p; p = p->n; } } median[j] = max->c; } } return median; } /* }}} */ /**************************************************************************** * * Set, sequence distances * ****************************************************************************/ /* {{{ */ /* * Munkres-Blackman algorithm. */ std::vector munkres_blackman(size_t n1, size_t n2, double* dists) { size_t row = 0; /* allocate memory */ /* 1 if column is covered */ std::vector covc(n1, 0); /* row of a z* in given column (1-base indices, so we can use zero as `none')*/ std::vector zstarc(n1, 0); /* 1 if row is covered */ std::vector covr(n2, 0); /* column of a z* in given row (1-base indices, so we can use zero as `none')*/ std::vector zstarr(n2, 0); /* column of a z' in given row (1-base indices, so we can use zero as `none')*/ std::vector zprimer(n2, 0); /* step 0 (subtract minimal distance) and step 1 (find zeroes) => [2] */ auto step1 = [&]() { for (size_t j = 0; j < n1; j++) { size_t minidx = 0; double* col = dists + j; double min = *col; double* p = col + n1; for (size_t i = 1; i < n2; i++) { if (min > *p) { minidx = i; min = *p; } p += n1; } /* subtract */ p = col; for (size_t i = 0; i < n2; i++) { *p -= min; if (*p < LEV_EPSILON) *p = 0.0; p += n1; } /* star the zero, if possible */ if (!zstarc[j] && !zstarr[minidx]) { zstarc[j] = minidx + 1; zstarr[minidx] = j + 1; } else { /* otherwise try to find some other */ p = col; for (size_t i = 0; i < n2; i++) { if (i != minidx && *p == 0.0 && !zstarc[j] && !zstarr[i]) { zstarc[j] = i + 1; zstarr[i] = j + 1; break; } p += n1; } } } return 2; }; /* step 2 (cover columns containing z*) => [0, 3] */ auto step2 = [&]() { size_t nc = 0; for (size_t j = 0; j < n1; j++) if (zstarc[j]) { covc[j] = 1; nc++; } return (nc == n1) ? 0 : 3; }; /* step 3 (find uncovered zeroes) => [3, 4, 5] */ auto step3 = [&]() { /* search uncovered matrix entries */ for (size_t j = 0; j < n1; j++) { double* p = dists + j; if (covc[j]) continue; for (size_t i = 0; i < n2; i++) { if (!covr[i] && *p == 0.0) { /* when a zero is found, prime it */ zprimer[i] = j + 1; if (zstarr[i]) { /* if there's a z* in the same row, * uncover the column, cover the row and redo */ covr[i] = 1; covc[zstarr[i] - 1] = 0; return 3; } /* if there's no z*, * we are at the end of our path an can convert z' * to z* */ row = i; return 4; } p += n1; } } return 5; }; /* step 4 (increment the number of z*) * i is the row number (we get it from step 3) => [2] */ auto step4 = [&]() { row++; do { size_t x = row; row--; size_t j = zprimer[row] - 1; /* move to z' in the same row */ zstarr[row] = j + 1; /* mark it as z* in row buffer */ row = zstarc[j]; /* move to z* in the same column */ zstarc[j] = x; /* mark the z' as being new z* */ } while (row); std::fill(std::begin(zprimer), std::end(zprimer), 0); std::fill(std::begin(covr), std::end(covr), 0); std::fill(std::begin(covc), std::end(covc), 0); return 2; }; /* step 5 (new zero manufacturer) * we can't get here, unless no zero is found at all => [3] */ auto step5 = [&]() { /* find the smallest uncovered entry */ double min = std::numeric_limits::max(); for (size_t j = 0; j < n1; j++) { double* p = dists + j; if (covc[j]) continue; for (size_t i = 0; i < n2; i++) { if (!covr[i] && min > *p) { min = *p; } p += n1; } } /* add it to all covered rows */ for (size_t i = 0; i < n2; i++) { double* p = dists + i * n1; if (!covr[i]) continue; for (size_t j = 0; j < n1; j++) *(p++) += min; } /* subtract if from all uncovered columns */ for (size_t j = 0; j < n1; j++) { double* p = dists + j; if (covc[j]) continue; for (size_t i = 0; i < n2; i++) { *p -= min; if (*p < LEV_EPSILON) *p = 0.0; p += n1; } } return 3; }; /* main */ int next_step = 1; while (next_step) { switch (next_step) { case 1: next_step = step1(); break; case 2: next_step = step2(); break; case 3: next_step = step3(); break; case 4: next_step = step4(); break; case 5: next_step = step5(); break; default: next_step = 0; break; } } for (size_t j = 0; j < n1; j++) zstarc[j]--; return zstarc; } /* }}} */ Levenshtein-0.27.0/src/Levenshtein/Levenshtein-c/_levenshtein.hpp000066400000000000000000000625741474145074100250640ustar00rootroot00000000000000#pragma once #include "Python.h" #include #include #include #include #include #include #include #include #include #include #define PYTHON_VERSION(major, minor, micro) ((major << 24) | (minor << 16) | (micro << 8)) using rapidfuzz::detail::Range; enum RF_StringType { RF_UINT8, /* uint8_t */ RF_UINT16, /* uint16_t */ RF_UINT32 /* uint32_t */ }; typedef struct _RF_String { /* members */ RF_StringType kind; void* data; int64_t length; } RF_String; #define LIST_OF_CASES() \ X_ENUM(RF_UINT8, uint8_t) \ X_ENUM(RF_UINT16, uint16_t) \ X_ENUM(RF_UINT32, uint32_t) template auto visit(const RF_String& str, Func&& f, Args&&... args) { switch (str.kind) { #define X_ENUM(kind, type) \ case kind: return f(Range((type*)str.data, (type*)str.data + str.length), std::forward(args)...); LIST_OF_CASES() #undef X_ENUM default: throw std::logic_error("Invalid string type"); } } template auto visitor(const RF_String& str1, const RF_String& str2, Func&& f, Args&&... args) { return visit(str2, [&](auto s2) { return visit(str1, std::forward(f), s2, std::forward(args)...); }); } static inline bool is_valid_string(PyObject* py_str) { bool is_string = false; if (PyBytes_Check(py_str)) is_string = true; else if (PyUnicode_Check(py_str)) { // PEP 623 deprecates legacy strings and therefore // deprecates e.g. PyUnicode_READY in Python 3.10 #if PY_VERSION_HEX < PYTHON_VERSION(3, 10, 0) if (PyUnicode_READY(py_str)) // cython will use the exception set by PyUnicode_READY throw std::runtime_error(""); #endif is_string = true; } return is_string; } static inline RF_String convert_string(PyObject* py_str) { if (PyBytes_Check(py_str)) return {RF_UINT8, PyBytes_AS_STRING(py_str), static_cast(PyBytes_GET_SIZE(py_str))}; else { RF_StringType kind; switch (PyUnicode_KIND(py_str)) { case PyUnicode_1BYTE_KIND: kind = RF_UINT8; break; case PyUnicode_2BYTE_KIND: kind = RF_UINT16; break; default: kind = RF_UINT32; break; } return {kind, PyUnicode_DATA(py_str), static_cast(PyUnicode_GET_LENGTH(py_str))}; } } /* Edit operation type * DON'T CHANGE! used as array indices and the bits are occasionally used * as flags */ enum LevEditType { LEV_EDIT_KEEP = 0, LEV_EDIT_REPLACE = 1, LEV_EDIT_INSERT = 2, LEV_EDIT_DELETE = 3, LEV_EDIT_LAST /* sometimes returned when an error occurs */ }; /* compute the sets of symbols each string contains, and the set of symbols * in any of them (symset). meanwhile, count how many different symbols * there are (used below for symlist). */ static inline std::vector make_symlist(const std::vector& strings) { std::vector symlist; auto is_empty_str = [](const auto& x) { return x.length == 0; }; if (std::all_of(std::begin(strings), std::end(strings), is_empty_str)) return symlist; std::set symmap; for (const auto& string : strings) visit(string, [&](auto s1) { for (auto ch : s1) symmap.insert(ch); }); /* create dense symbol table, so we can easily iterate over only characters * present in the strings */ symlist.insert(std::end(symlist), std::begin(symmap), std::end(symmap)); return symlist; } /** * lev_greedy_median: * @n: The size of @lengths, @strings, and @weights. * @lengths: The lengths of @strings. * @strings: An array of strings, that may contain NUL characters. * @weights: The string weights (they behave exactly as multiplicities, though * any positive value is allowed, not just integers). * @medlength: Where the length of the median should be stored. * * Finds a generalized median string of @strings using the greedy algorithm. * * Note it's considerably more efficient to give a string with weight 2 * than to store two identical strings in @strings (with weights 1). * * Returns: The generalized median, as a newly allocated string; its length * is stored in @medlength. **/ static inline std::vector lev_greedy_median(const std::vector& strings, const std::vector& weights) { /* find all symbols */ std::vector symlist = make_symlist(strings); if (symlist.empty()) return {}; /* allocate and initialize per-string matrix rows and a common work buffer */ std::vector> rows(strings.size()); size_t maxlen = (size_t)std::max_element(std::begin(strings), std::end(strings), [](const auto& a, const auto& b) { return a.length < b.length; })->length; for (size_t i = 0; i < strings.size(); i++) { size_t leni = (size_t)strings[i].length; rows[i] = std::make_unique(leni + 1); std::iota(rows[i].get(), rows[i].get() + leni + 1, 0); } size_t stoplen = 2 * maxlen + 1; auto row = std::make_unique(stoplen + 1); /* compute final cost of string of length 0 (empty string may be also * a valid answer) */ std::vector median(stoplen); /** * the total distance of the best median string of * given length. warning! mediandist[0] is total * distance for empty string, while median[] itself * is normally zero-based */ auto mediandist = std::make_unique(stoplen + 1); mediandist[0] = 0; for (size_t i = 0; i < strings.size(); i++) mediandist[0] += strings[i].length + weights[i]; /* build up the approximate median string symbol by symbol * XXX: we actually exit on break below, but on the same condition */ for (size_t len = 1; len <= stoplen; len++) { uint32_t symbol = 0; double minminsum = std::numeric_limits::max(); row[0] = len; /* iterate over all symbols we may want to add */ for (size_t j = 0; j < symlist.size(); j++) { double totaldist = 0.0; double minsum = 0.0; symbol = symlist[j]; /* sum Levenshtein distances from all the strings, with given weights */ for (size_t i = 0; i < strings.size(); i++) { visit(strings[i], [&](auto s1) { size_t* p = rows[i].get(); size_t min = len; size_t x = len; /* == row[0] */ /* compute how another row of Levenshtein matrix would look for median * string with this symbol added */ for (auto ch : s1) { size_t D = *(p++) + (symbol != ch); x++; if (x > D) x = D; if (x > *p + 1) x = *p + 1; if (x < min) min = x; } minsum += (double)min * weights[i]; totaldist += (double)x * weights[i]; }); } /* is this symbol better than all the others? */ if (minsum < minminsum) { minminsum = minsum; mediandist[len] = totaldist; median[len - 1] = symbol; } } /* stop the iteration if we no longer need to recompute the matrix rows * or when we are over maxlen and adding more characters doesn't seem * useful */ if (len == stoplen || (len > maxlen && mediandist[len] > mediandist[len - 1])) { stoplen = len; break; } /* now the best symbol is known, so recompute all matrix rows for this * one */ symbol = median[len - 1]; for (size_t i = 0; i < strings.size(); i++) visit(strings[i], [&](auto s1) { size_t* oldrow = rows[i].get(); /* compute a row of Levenshtein matrix */ for (size_t k = 1; k <= (size_t)s1.size(); k++) { size_t c1 = oldrow[k] + 1; size_t c2 = row[k - 1] + 1; size_t c3 = oldrow[k - 1] + (symbol != s1[k - 1]); row[k] = c2 > c3 ? c3 : c2; if (row[k] > c1) row[k] = c1; } memcpy(oldrow, row.get(), (s1.size() + 1) * sizeof(size_t)); }); } /* find the string with minimum total distance */ size_t bestlen = std::distance(mediandist.get(), std::min_element(mediandist.get(), mediandist.get() + stoplen)); /* return result */ median.resize(bestlen); return median; } /* * Knowing the distance matrices up to some row, finish the distance * computations. * * string1, len1 are already shortened. */ static inline double finish_distance_computations(const Range& string1, const std::vector& strings, const std::vector& weights, std::vector>& rows, std::unique_ptr& row) { double distsum = 0.0; /* sum of distances */ /* catch trivial case */ if (string1.empty()) { for (size_t j = 0; j < strings.size(); j++) distsum += (double)rows[j][(size_t)strings[j].length] * weights[j]; return distsum; } /* iterate through the strings and sum the distances */ for (size_t j = 0; j < strings.size(); j++) { visit(strings[j], [&](auto s2) { size_t* rowi = rows[j].get(); /* current row */ auto s1_temp = string1; /* temporary string for suffix stripping */ /* strip common suffix (prefix CAN'T be stripped) */ rapidfuzz::detail::remove_common_suffix(s1_temp, s2); /* catch trivial cases */ if (s1_temp.empty()) { distsum += (double)rowi[(size_t)s2.size()] * weights[j]; return; } /* row[0]; offset + len1 give together real len of string1 */ size_t offset = rowi[0]; if (s2.empty()) { distsum += (double)(offset + s1_temp.size()) * weights[j]; return; } /* complete the matrix */ std::copy_n(rowi, s2.size() + 1, row.get()); for (size_t i = 0; i < (size_t)s1_temp.size(); i++) { auto ch1 = s1_temp[i]; size_t* p = row.get() + 1; size_t D = i + 1 + offset; size_t x = D; for (auto ch2 : s2) { size_t c3 = --D + (ch1 != ch2); x++; if (x > c3) x = c3; D = *p; D++; if (x > D) x = D; *(p++) = x; } } size_t* end = row.get() + s2.size(); distsum += weights[j] * (double)(*end); }); } return distsum; } /** * lev_median_improve: * @len: The length of @s. * @s: The approximate generalized median string to be improved. * @n: The size of @lengths, @strings, and @weights. * @lengths: The lengths of @strings. * @strings: An array of strings, that may contain NUL characters. * @weights: The string weights (they behave exactly as multiplicities, though * any positive value is allowed, not just integers). * * Tries to make @s a better generalized median string of @strings with * small perturbations. * * It never returns a string with larger SOD than @s; in the worst case, a * string identical to @s is returned. * * Returns: The improved generalized median **/ static inline std::vector lev_median_improve(const RF_String& string, const std::vector& strings, const std::vector& weights) { /* find all symbols */ std::vector symlist = make_symlist(strings); if (symlist.empty()) return {}; /* allocate and initialize per-string matrix rows and a common work buffer */ std::vector> rows(strings.size()); size_t maxlen = 0; for (size_t i = 0; i < strings.size(); i++) { size_t leni = (size_t)strings[i].length; if (leni > maxlen) maxlen = leni; rows[i] = std::make_unique(leni + 1); std::iota(rows[i].get(), rows[i].get() + leni + 1, 0); } size_t stoplen = 2 * maxlen + 1; auto row = std::make_unique(stoplen + 2); /* initialize median to given string */ auto _median = std::make_unique(stoplen + 1); uint32_t* median = _median.get() + 1; /* we need -1st element for insertions at pos 0 */ size_t medlen = (size_t)string.length; visit(string, [&](auto s1) { std::copy(std::begin(s1), std::end(s1), median); }); double minminsum = finish_distance_computations(Range(median, median + medlen), strings, weights, rows, row); /* sequentially try perturbations on all positions */ for (size_t pos = 0; pos <= medlen;) { uint32_t orig_symbol, symbol; LevEditType operation; double sum; symbol = median[pos]; operation = LEV_EDIT_KEEP; /* IF pos < medlength: FOREACH symbol: try to replace the symbol * at pos, if some lower the total distance, chooste the best */ if (pos < medlen) { orig_symbol = median[pos]; for (size_t j = 0; j < symlist.size(); j++) { if (symlist[j] == orig_symbol) continue; median[pos] = symlist[j]; sum = finish_distance_computations(Range(median + pos, median + medlen), strings, weights, rows, row); if (sum < minminsum) { minminsum = sum; symbol = symlist[j]; operation = LEV_EDIT_REPLACE; } } median[pos] = orig_symbol; } /* FOREACH symbol: try to add it at pos, if some lower the total * distance, chooste the best (increase medlen) * We simulate insertion by replacing the character at pos-1 */ orig_symbol = *(median + pos - 1); for (size_t j = 0; j < symlist.size(); j++) { *(median + pos - 1) = symlist[j]; sum = finish_distance_computations(Range(median + pos - 1, median + medlen), strings, weights, rows, row); if (sum < minminsum) { minminsum = sum; symbol = symlist[j]; operation = LEV_EDIT_INSERT; } } *(median + pos - 1) = orig_symbol; /* IF pos < medlen: try to delete the symbol at pos, if it lowers * the total distance remember it (decrease medlen) */ if (pos < medlen) { sum = finish_distance_computations(Range(median + pos + 1, median + medlen), strings, weights, rows, row); if (sum < minminsum) { minminsum = sum; operation = LEV_EDIT_DELETE; } } /* actually perform the operation */ switch (operation) { case LEV_EDIT_REPLACE: median[pos] = symbol; break; case LEV_EDIT_INSERT: memmove(median + pos + 1, median + pos, (medlen - pos) * sizeof(uint32_t)); median[pos] = symbol; medlen++; break; case LEV_EDIT_DELETE: memmove(median + pos, median + pos + 1, (medlen - pos - 1) * sizeof(uint32_t)); medlen--; break; default: break; } assert(medlen <= stoplen); /* now the result is known, so recompute all matrix rows and move on */ if (operation != LEV_EDIT_DELETE) { symbol = median[pos]; row[0] = pos + 1; for (size_t i = 0; i < strings.size(); i++) { visit(strings[i], [&](auto s1) { size_t* oldrow = rows[i].get(); /* compute a row of Levenshtein matrix */ for (size_t k = 1; k <= (size_t)s1.size(); k++) { size_t c1 = oldrow[k] + 1; size_t c2 = row[k - 1] + 1; size_t c3 = oldrow[k - 1] + (symbol != s1[k - 1]); row[k] = c2 > c3 ? c3 : c2; if (row[k] > c1) row[k] = c1; } std::copy_n(row.get(), s1.size() + 1, oldrow); }); } pos++; } } return std::vector(median, median + medlen); } std::vector lev_quick_median(const std::vector& strings, const std::vector& weights); /** * lev_set_median: * @n: The size of @lengths, @strings, and @weights. * @lengths: The lengths of @strings. * @strings: An array of strings, that may contain NUL characters. * @weights: The string weights (they behave exactly as multiplicities, though * any positive value is allowed, not just integers). * * Finds the median string of a string set @strings. * * Returns: The set median **/ static inline std::vector lev_set_median(const std::vector& strings, const std::vector& weights) { size_t minidx = 0; double mindist = std::numeric_limits::max(); std::vector distances(strings.size() * (strings.size() - 1) / 2, 0xff); for (size_t i = 0; i < strings.size(); i++) { visit(strings[i], [&](auto s1) { /* deduction guides are broken here on msvc */ rapidfuzz::CachedLevenshtein scorer(s1); double dist = 0.0; /* below diagonal */ size_t j = 0; for (; j < i && dist < mindist; j++) { size_t dindex = (i - 1) * (i - 2) / 2 + j; long int d; if (distances[dindex] >= 0) d = distances[dindex]; else d = (size_t)visit(strings[j], [&](auto s2) { return scorer.distance(s2); }); dist += weights[j] * (double)d; } j++; /* no need to compare item with itself */ /* above diagonal */ for (; j < strings.size() && dist < mindist; j++) { size_t dindex = (j - 1) * (j - 2) / 2 + i; distances[dindex] = visit(strings[j], [&](auto s2) { return scorer.distance(s2); }); dist += weights[j] * (double)distances[dindex]; } if (dist < mindist) { mindist = dist; minidx = i; } }); } return visit(strings[minidx], [&](auto s1) { return std::vector(std::begin(s1), std::end(s1)); }); } static inline bool is_equal(const RF_String& a, const RF_String& b) { return visitor(a, b, [](auto s1, auto s2) { return s1 == s2; }); } /** * lev_edit_seq_distance: * @n1: The length of @lengths1 and @strings1. * @lengths1: The lengths of strings in @strings1. * @strings1: An array of strings that may contain NUL characters. * @n2: The length of @lengths2 and @strings2. * @lengths2: The lengths of strings in @strings2. * @strings2: An array of strings that may contain NUL characters. * * Finds the distance between string sequences @strings1 and @strings2. * * In other words, this is a double-Levenshtein algorithm. * * The cost of string replace operation is based on string similarity: it's * zero for identical strings and 2 for completely unsimilar strings. * * Returns: The distance of the two sequences. **/ static inline double lev_edit_seq_distance(const std::vector& strings1, const std::vector& strings2) { if (strings1.size() > strings2.size()) return lev_edit_seq_distance(strings2, strings1); auto strings1_start = std::begin(strings1); auto strings1_end = std::end(strings1); auto strings2_start = std::begin(strings2); auto strings2_end = std::end(strings2); /* strip common prefix */ while (strings1_start != strings1_end && strings2_start != strings2_end && is_equal(*strings1_start, *strings2_start)) { strings1_start++; strings2_start++; } /* strip common suffix */ while (strings1_start != strings1_end && strings2_start != strings2_end && is_equal(*(strings1_end - 1), *(strings2_end - 1))) { strings1_end--; strings2_end--; } /* catch trivial cases */ if (strings1_start == strings1_end) return (double)std::distance(strings2_start, strings2_end); if (strings2_start == strings2_end) return (double)std::distance(strings1_start, strings1_end); /* initialize first row */ size_t n1 = std::distance(strings1_start, strings1_end); size_t n2 = std::distance(strings2_start, strings2_end); auto row = std::make_unique(n2 + 1); double* last = row.get() + n2; double* end = row.get() + n2 + 1; std::iota(row.get(), end, 0.0); /* go through the matrix and compute the costs. yes, this is an extremely * obfuscated version, but also extremely memory-conservative and relatively * fast. */ for (size_t i = 0; i < n1; i++) { double* p = row.get() + 1; auto strings2_it = strings2_start; double D = (double)i; double x = (double)i + 1.0; visit(strings1[i], [&](auto s1) { /* deduction guides are broken here on msvc */ rapidfuzz::CachedIndel scorer(s1); while (p != end) { size_t l = strings1[i].length + strings2_it->length; double q; if (l == 0) q = D; else { size_t d = visit(*strings2_it, [&](auto s2) { return scorer.distance(s2); }); strings2_it++; q = D + 2.0 / (double)l * (double)d; } x += 1.0; if (x > q) x = q; D = *p; if (x > D + 1.0) x = D + 1.0; *(p++) = x; } }); } return *last; } std::vector munkres_blackman(size_t n1, size_t n2, double* dists); /** * lev_set_distance: * @n1: The length of @lengths1 and @strings1. * @lengths1: The lengths of strings in @strings1. * @strings1: An array of strings that may contain NUL characters. * @n2: The length of @lengths2 and @strings2. * @lengths2: The lengths of strings in @strings2. * @strings2: An array of strings that may contain NUL characters. * * Finds the distance between string sets @strings1 and @strings2. * * The difference from lev_edit_seq_distance() is that order doesn't matter. * The optimal association of @strings1 and @strings2 is found first and * the similarity is computed for that. * * Uses sequential Munkres-Blackman algorithm. * * Returns: The distance of the two sets. **/ static inline double lev_set_distance(const std::vector& strings1, const std::vector& strings2) { /* catch trivial cases */ if (strings1.empty()) return (double)strings2.size(); if (strings2.empty()) return (double)strings1.size(); /* make the number of columns (n1) smaller than the number of rows */ if (strings1.size() > strings2.size()) return lev_set_distance(strings2, strings1); /* compute distances from each to each */ if (SIZE_MAX / strings1.size() <= strings2.size()) throw std::bad_alloc(); auto dists = std::make_unique(strings1.size() * strings2.size()); double* r = dists.get(); for (const auto& str2 : strings2) visit(str2, [&](auto s1) { /* deduction guides are broken here on msvc */ rapidfuzz::CachedIndel scorer(s1); for (const auto& str1 : strings1) *(r++) = visit(str1, [&](auto s2) { return scorer.normalized_distance(s2); }); }); /* find the optimal mapping between the two sets */ auto map = munkres_blackman(strings1.size(), strings2.size(), dists.get()); /* sum the set distance */ double sum = (double)(strings2.size() - strings1.size()); for (size_t j = 0; j < strings1.size(); j++) { size_t i = map[j]; sum += 2.0 * visitor(strings1[j], strings2[i], [](auto s1, auto s2) { return rapidfuzz::indel_normalized_distance(s1, s2); }); } return sum; } Levenshtein-0.27.0/src/Levenshtein/StringMatcher.py000066400000000000000000000044671474145074100223050ustar00rootroot00000000000000from __future__ import annotations from warnings import warn from Levenshtein import distance, editops, matching_blocks, opcodes, ratio class StringMatcher: """A SequenceMatcher-like class built on the top of Levenshtein""" def _reset_cache(self): self._ratio = self._distance = None self._opcodes = self._editops = self._matching_blocks = None def __init__(self, isjunk=None, seq1="", seq2="", autojunk=False): if isjunk: warn("isjunk NOT implemented, it will be ignored", stacklevel=1) if autojunk: warn("autojunk NOT implemented, it will be ignored", stacklevel=1) self._str1, self._str2 = seq1, seq2 self._reset_cache() def set_seqs(self, seq1, seq2): self._str1, self._str2 = seq1, seq2 self._reset_cache() def set_seq1(self, seq1): self._str1 = seq1 self._reset_cache() def set_seq2(self, seq2): self._str2 = seq2 self._reset_cache() def get_opcodes(self): if not self._opcodes: if self._editops: self._opcodes = opcodes(self._editops, self._str1, self._str2) else: self._opcodes = opcodes(self._str1, self._str2) return self._opcodes def get_editops(self): if not self._editops: if self._opcodes: self._editops = editops(self._opcodes, self._str1, self._str2) else: self._editops = editops(self._str1, self._str2) return self._editops def get_matching_blocks(self): if not self._matching_blocks: self._matching_blocks = matching_blocks(self.get_opcodes(), self._str1, self._str2) return self._matching_blocks def ratio(self): if not self._ratio: self._ratio = ratio(self._str1, self._str2) return self._ratio def quick_ratio(self): # This is usually quick enough :o) if not self._ratio: self._ratio = ratio(self._str1, self._str2) return self._ratio def real_quick_ratio(self): len1, len2 = len(self._str1), len(self._str2) return 2.0 * min(len1, len2) / (len1 + len2) def distance(self): if not self._distance: self._distance = distance(self._str1, self._str2) return self._distance Levenshtein-0.27.0/src/Levenshtein/__init__.py000066400000000000000000000430151474145074100212620ustar00rootroot00000000000000""" A C extension module for fast computation of: - Levenshtein (edit) distance and edit sequence manipulation - string similarity - approximate median strings, and generally string averaging - string sequence and set similarity Levenshtein has a some overlap with difflib (SequenceMatcher). It supports only strings, not arbitrary sequence types, but on the other hand it's much faster. It supports both normal and Unicode strings, but can't mix them, all arguments to a function (method) have to be of the same type (or its subclasses). """ from __future__ import annotations __author__: str = "Max Bachmann" __license__: str = "GPL" __version__: str = "0.27.0" import rapidfuzz.distance.Hamming as _Hamming import rapidfuzz.distance.Indel as _Indel import rapidfuzz.distance.Jaro as _Jaro import rapidfuzz.distance.JaroWinkler as _JaroWinkler import rapidfuzz.distance.Levenshtein as _Levenshtein from rapidfuzz.distance import ( Editops as _Editops, ) from rapidfuzz.distance import ( Opcodes as _Opcodes, ) from Levenshtein.levenshtein_cpp import ( median, median_improve, quickmedian, seqratio, setmedian, setratio, ) __all__ = [ "quickmedian", "median", "median_improve", "setmedian", "setratio", "seqratio", "distance", "ratio", "hamming", "jaro", "jaro_winkler", "editops", "opcodes", "matching_blocks", "apply_edit", "subtract_edit", "inverse", ] def distance(s1, s2, *, weights=(1, 1, 1), processor=None, score_cutoff=None, score_hint=None): """ Calculates the minimum number of insertions, deletions, and substitutions required to change one sequence into the other according to Levenshtein with custom costs for insertion, deletion and substitution Parameters ---------- s1 : Sequence[Hashable] First string to compare. s2 : Sequence[Hashable] Second string to compare. weights : Tuple[int, int, int] or None, optional The weights for the three operations in the form (insertion, deletion, substitution). Default is (1, 1, 1), which gives all three operations a weight of 1. processor: callable, optional Optional callable that is used to preprocess the strings before comparing them. Default is None, which deactivates this behaviour. score_cutoff : int, optional Maximum distance between s1 and s2, that is considered as a result. If the distance is bigger than score_cutoff, score_cutoff + 1 is returned instead. Default is None, which deactivates this behaviour. score_hint : int, optional Expected distance between s1 and s2. This is used to select a faster implementation. Default is None, which deactivates this behaviour. Returns ------- distance : int distance between s1 and s2 Raises ------ ValueError If unsupported weights are provided a ValueError is thrown Examples -------- Find the Levenshtein distance between two strings: >>> from Levenshtein import distance >>> distance("lewenstein", "levenshtein") 2 Setting a maximum distance allows the implementation to select a more efficient implementation: >>> distance("lewenstein", "levenshtein", score_cutoff=1) 2 It is possible to select different weights by passing a `weight` tuple. >>> distance("lewenstein", "levenshtein", weights=(1,1,2)) 3 """ return _Levenshtein.distance( s1, s2, weights=weights, processor=processor, score_cutoff=score_cutoff, score_hint=score_hint, ) def ratio(s1, s2, *, processor=None, score_cutoff=None): """ Calculates a normalized indel similarity in the range [0, 1]. The indel distance calculates the minimum number of insertions and deletions required to change one sequence into the other. This is calculated as ``1 - (distance / (len1 + len2))`` Parameters ---------- s1 : Sequence[Hashable] First string to compare. s2 : Sequence[Hashable] Second string to compare. processor: callable, optional Optional callable that is used to preprocess the strings before comparing them. Default is None, which deactivates this behaviour. score_cutoff : float, optional Optional argument for a score threshold as a float between 0 and 1.0. For norm_sim < score_cutoff 0 is returned instead. Default is 0, which deactivates this behaviour. Returns ------- norm_sim : float normalized similarity between s1 and s2 as a float between 0 and 1.0 Examples -------- Find the normalized Indel similarity between two strings: >>> from Levenshtein import ratio >>> ratio("lewenstein", "levenshtein") 0.85714285714285 Setting a score_cutoff allows the implementation to select a more efficient implementation: >>> ratio("lewenstein", "levenshtein", score_cutoff=0.9) 0.0 When a different processor is used s1 and s2 do not have to be strings >>> ratio(["lewenstein"], ["levenshtein"], processor=lambda s: s[0]) 0.8571428571428572 """ return _Indel.normalized_similarity(s1, s2, processor=processor, score_cutoff=score_cutoff) def hamming(s1, s2, *, pad=True, processor=None, score_cutoff=None): """ Calculates the Hamming distance between two strings. The hamming distance is defined as the number of positions where the two strings differ. It describes the minimum amount of substitutions required to transform s1 into s2. Parameters ---------- s1 : Sequence[Hashable] First string to compare. s2 : Sequence[Hashable] Second string to compare. pad : bool, optional should strings be padded if there is a length difference. If pad is False and strings have a different length a ValueError is thrown instead. Default is True. processor: callable, optional Optional callable that is used to preprocess the strings before comparing them. Default is None, which deactivates this behaviour. score_cutoff : int or None, optional Maximum distance between s1 and s2, that is considered as a result. If the distance is bigger than score_cutoff, score_cutoff + 1 is returned instead. Default is None, which deactivates this behaviour. Returns ------- distance : int distance between s1 and s2 Raises ------ ValueError If s1 and s2 have a different length """ return _Hamming.distance(s1, s2, pad=pad, processor=processor, score_cutoff=score_cutoff) def jaro(s1, s2, *, processor=None, score_cutoff=None) -> float: """ Calculates the jaro similarity Parameters ---------- s1 : Sequence[Hashable] First string to compare. s2 : Sequence[Hashable] Second string to compare. processor: callable, optional Optional callable that is used to preprocess the strings before comparing them. Default is None, which deactivates this behaviour. score_cutoff : float, optional Optional argument for a score threshold as a float between 0 and 1.0. For ratio < score_cutoff 0 is returned instead. Default is None, which deactivates this behaviour. Returns ------- similarity : float similarity between s1 and s2 as a float between 0 and 1.0 """ return _Jaro.similarity(s1, s2, processor=processor, score_cutoff=score_cutoff) def jaro_winkler(s1, s2, *, prefix_weight=0.1, processor=None, score_cutoff=None) -> float: """ Calculates the jaro winkler similarity Parameters ---------- s1 : Sequence[Hashable] First string to compare. s2 : Sequence[Hashable] Second string to compare. prefix_weight : float, optional Weight used for the common prefix of the two strings. Has to be between 0 and 0.25. Default is 0.1. processor: callable, optional Optional callable that is used to preprocess the strings before comparing them. Default is None, which deactivates this behaviour. score_cutoff : float, optional Optional argument for a score threshold as a float between 0 and 1.0. For ratio < score_cutoff 0 is returned instead. Default is None, which deactivates this behaviour. Returns ------- similarity : float similarity between s1 and s2 as a float between 0 and 1.0 Raises ------ ValueError If prefix_weight is invalid """ return _JaroWinkler.similarity( s1, s2, prefix_weight=prefix_weight, processor=processor, score_cutoff=score_cutoff, ) # assign attributes to function. This allows rapidfuzz to call them more efficiently # we can't directly copy the functions + replace the docstrings, since this leads to # crashes on PyPy distance._RF_OriginalScorer = distance ratio._RF_OriginalScorer = ratio hamming._RF_OriginalScorer = hamming jaro._RF_OriginalScorer = jaro jaro_winkler._RF_OriginalScorer = jaro_winkler distance._RF_ScorerPy = _Levenshtein.distance._RF_ScorerPy ratio._RF_ScorerPy = _Indel.normalized_similarity._RF_ScorerPy hamming._RF_ScorerPy = _Hamming.distance._RF_ScorerPy jaro._RF_ScorerPy = _Jaro.similarity._RF_ScorerPy jaro_winkler._RF_ScorerPy = _JaroWinkler.similarity._RF_ScorerPy if hasattr(_Levenshtein.distance, "_RF_Scorer"): distance._RF_Scorer = _Levenshtein.distance._RF_Scorer if hasattr(_Indel.normalized_similarity, "_RF_Scorer"): ratio._RF_Scorer = _Indel.normalized_similarity._RF_Scorer if hasattr(_Hamming.distance, "_RF_Scorer"): hamming._RF_Scorer = _Hamming.distance._RF_Scorer if hasattr(_Jaro.similarity, "_RF_Scorer"): jaro._RF_Scorer = _Jaro.similarity._RF_Scorer if hasattr(_JaroWinkler.similarity, "_RF_Scorer"): jaro_winkler._RF_Scorer = _JaroWinkler.similarity._RF_Scorer def editops(*args): """ Find sequence of edit operations transforming one string to another. editops(source_string, destination_string) editops(edit_operations, source_length, destination_length) The result is a list of triples (operation, spos, dpos), where operation is one of 'equal', 'replace', 'insert', or 'delete'; spos and dpos are position of characters in the first (source) and the second (destination) strings. These are operations on single characters. In fact the returned list doesn't contain the 'equal', but all the related functions accept both lists with and without 'equal's. Examples -------- >>> editops('spam', 'park') [('delete', 0, 0), ('insert', 3, 2), ('replace', 3, 3)] The alternate form editops(opcodes, source_string, destination_string) can be used for conversion from opcodes (5-tuples) to editops (you can pass strings or their lengths, it doesn't matter). """ # convert: we were called (bops, s1, s2) if len(args) == 3: arg1, arg2, arg3 = args len1 = arg2 if isinstance(arg2, int) else len(arg2) len2 = arg3 if isinstance(arg3, int) else len(arg3) return _Editops(arg1, len1, len2).as_list() # find editops: we were called (s1, s2) arg1, arg2 = args return _Levenshtein.editops(arg1, arg2).as_list() def opcodes(*args): """ Find sequence of edit operations transforming one string to another. opcodes(source_string, destination_string) opcodes(edit_operations, source_length, destination_length) The result is a list of 5-tuples with the same meaning as in SequenceMatcher's get_opcodes() output. But since the algorithms differ, the actual sequences from Levenshtein and SequenceMatcher may differ too. Examples -------- >>> for x in opcodes('spam', 'park'): ... print(x) ... ('delete', 0, 1, 0, 0) ('equal', 1, 3, 0, 2) ('insert', 3, 3, 2, 3) ('replace', 3, 4, 3, 4) The alternate form opcodes(editops, source_string, destination_string) can be used for conversion from editops (triples) to opcodes (you can pass strings or their lengths, it doesn't matter). """ # convert: we were called (ops, s1, s2) if len(args) == 3: arg1, arg2, arg3 = args len1 = arg2 if isinstance(arg2, int) else len(arg2) len2 = arg3 if isinstance(arg3, int) else len(arg3) return _Opcodes(arg1, len1, len2).as_list() # find editops: we were called (s1, s2) arg1, arg2 = args return _Levenshtein.opcodes(arg1, arg2).as_list() def matching_blocks(edit_operations, source_string, destination_string): """ Find identical blocks in two strings. Parameters ---------- edit_operations : list[] editops or opcodes created for the source and destination string source_string : str | int source string or the length of the source string destination_string : str | int destination string or the length of the destination string Returns ------- matching_blocks : list[] List of triples with the same meaning as in SequenceMatcher's get_matching_blocks() output. Examples -------- >>> a, b = 'spam', 'park' >>> matching_blocks(editops(a, b), a, b) [(1, 0, 2), (4, 4, 0)] >>> matching_blocks(editops(a, b), len(a), len(b)) [(1, 0, 2), (4, 4, 0)] The last zero-length block is not an error, but it's there for compatibility with difflib which always emits it. One can join the matching blocks to get two identical strings: >>> a, b = 'dog kennels', 'mattresses' >>> mb = matching_blocks(editops(a,b), a, b) >>> ''.join([a[x[0]:x[0]+x[2]] for x in mb]) 'ees' >>> ''.join([b[x[1]:x[1]+x[2]] for x in mb]) 'ees' """ len1 = source_string if isinstance(source_string, int) else len(source_string) len2 = destination_string if isinstance(destination_string, int) else len(destination_string) if not edit_operations or len(edit_operations[0]) == 3: return _Editops(edit_operations, len1, len2).as_matching_blocks() return _Opcodes(edit_operations, len1, len2).as_matching_blocks() def apply_edit(edit_operations, source_string, destination_string): """ Apply a sequence of edit operations to a string. apply_edit(edit_operations, source_string, destination_string) In the case of editops, the sequence can be arbitrary ordered subset of the edit sequence transforming source_string to destination_string. Examples -------- >>> e = editops('man', 'scotsman') >>> apply_edit(e, 'man', 'scotsman') 'scotsman' >>> apply_edit(e[:3], 'man', 'scotsman') 'scoman' The other form of edit operations, opcodes, is not very suitable for such a tricks, because it has to always span over complete strings, subsets can be created by carefully replacing blocks with 'equal' blocks, or by enlarging 'equal' block at the expense of other blocks and adjusting the other blocks accordingly. >>> a, b = 'spam and eggs', 'foo and bar' >>> e = opcodes(a, b) >>> apply_edit(inverse(e), b, a) 'spam and eggs' """ if len(edit_operations) == 0: return source_string len1 = len(source_string) len2 = len(destination_string) if len(edit_operations[0]) == 3: return _Editops(edit_operations, len1, len2).apply(source_string, destination_string) return _Opcodes(edit_operations, len1, len2).apply(source_string, destination_string) def subtract_edit(edit_operations, subsequence): """ Subtract an edit subsequence from a sequence. subtract_edit(edit_operations, subsequence) The result is equivalent to editops(apply_edit(subsequence, s1, s2), s2), except that is constructed directly from the edit operations. That is, if you apply it to the result of subsequence application, you get the same final string as from application complete edit_operations. It may be not identical, though (in amibuous cases, like insertion of a character next to the same character). The subtracted subsequence must be an ordered subset of edit_operations. Note this function does not accept difflib-style opcodes as no one in his right mind wants to create subsequences from them. Examples -------- >>> e = editops('man', 'scotsman') >>> e1 = e[:3] >>> bastard = apply_edit(e1, 'man', 'scotsman') >>> bastard 'scoman' >>> apply_edit(subtract_edit(e, e1), bastard, 'scotsman') 'scotsman' """ str_len = 2**32 return ( _Editops(edit_operations, str_len, str_len) .remove_subsequence(_Editops(subsequence, str_len, str_len)) .as_list() ) def inverse(edit_operations): """ Invert the sense of an edit operation sequence. In other words, it returns a list of edit operations transforming the second (destination) string to the first (source). It can be used with both editops and opcodes. Parameters ---------- edit_operations : list[] edit operations to invert Returns ------- edit_operations : list[] inverted edit operations Examples -------- >>> editops('spam', 'park') [('delete', 0, 0), ('insert', 3, 2), ('replace', 3, 3)] >>> inverse(editops('spam', 'park')) [('insert', 0, 0), ('delete', 2, 3), ('replace', 3, 3)] """ if len(edit_operations) == 0: return [] if len(edit_operations[0]) == 3: len1 = edit_operations[-1][1] + 1 len2 = edit_operations[-1][2] + 1 return _Editops(edit_operations, len1, len2).inverse().as_list() len1 = edit_operations[-1][2] len2 = edit_operations[-1][4] return _Opcodes(edit_operations, len1, len2).inverse().as_list() Levenshtein-0.27.0/src/Levenshtein/__init__.pyi000066400000000000000000000055461474145074100214420ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Hashable, Sequence from typing import overload __author__: str __license__: str __version__: str _EditopsList = list[tuple[str, int, int]] _OpcodesList = list[tuple[str, int, int, int, int]] _MatchingBlocks = list[tuple[int, int, int]] _AnyEditops = _EditopsList | _OpcodesList def inverse(edit_operations: list) -> list: ... @overload def editops(s1: Sequence[Hashable], s2: Sequence[Hashable]) -> _EditopsList: ... @overload def editops( ops: _AnyEditops, s1: Sequence[Hashable] | int, s2: Sequence[Hashable] | int, ) -> _EditopsList: ... @overload def opcodes(s1: Sequence[Hashable], s2: Sequence[Hashable]) -> _OpcodesList: ... @overload def opcodes( ops: _AnyEditops, s1: Sequence[Hashable] | int, s2: Sequence[Hashable] | int, ) -> _OpcodesList: ... def matching_blocks( edit_operations: _AnyEditops, source_string: Sequence[Hashable] | int, destination_string: Sequence[Hashable] | int, ) -> _MatchingBlocks: ... def subtract_edit(edit_operations: _EditopsList, subsequence: _EditopsList) -> _EditopsList: ... def apply_edit(edit_operations: _AnyEditops, source_string: str, destination_string: str) -> str: ... def median(strlist: list[str | bytes], wlist: list[float] | None = None) -> str: ... def quickmedian(strlist: list[str | bytes], wlist: list[float] | None = None) -> str: ... def median_improve( string: str | bytes, strlist: list[str | bytes], wlist: list[float] | None = None, ) -> str: ... def setmedian(strlist: list[str | bytes], wlist: list[float] | None = None) -> str: ... def setratio(strlist1: list[str | bytes], strlist2: list[str | bytes]) -> float: ... def seqratio(strlist1: list[str | bytes], strlist2: list[str | bytes]) -> float: ... def distance( s1: Sequence[Hashable], s2: Sequence[Hashable], *, weights: tuple[int, int, int] | None = (1, 1, 1), processor: Callable[..., Sequence[Hashable]] | None = None, score_cutoff: float | None = None, score_hint: float | None = None, ) -> int: ... def ratio( s1: Sequence[Hashable], s2: Sequence[Hashable], *, processor: Callable[..., Sequence[Hashable]] | None = None, score_cutoff: float | None = None, ) -> float: ... def hamming( s1: Sequence[Hashable], s2: Sequence[Hashable], *, pad: bool = True, processor: Callable[..., Sequence[Hashable]] | None = None, score_cutoff: float | None = None, ) -> int: ... def jaro( s1: Sequence[Hashable], s2: Sequence[Hashable], *, processor: Callable[..., Sequence[Hashable]] | None = None, score_cutoff: float | None = None, ) -> float: ... def jaro_winkler( s1: Sequence[Hashable], s2: Sequence[Hashable], *, prefix_weight: float | None = 0.1, processor: Callable[..., Sequence[Hashable]] | None = None, score_cutoff: float | None = None, ) -> float: ... Levenshtein-0.27.0/src/Levenshtein/generate.sh000066400000000000000000000003201474145074100212670ustar00rootroot00000000000000#!/bin/sh curdir="${0%/*}" generate_cython() { python -m cython -I "$curdir" --cplus "$curdir"/"$1".pyx -o "$curdir"/"$1".cxx || exit 1 echo "Generated $curdir/$1.cxx" } generate_cython levenshtein_cpp Levenshtein-0.27.0/src/Levenshtein/levenshtein_cpp.pyx000066400000000000000000000176321474145074100231070ustar00rootroot00000000000000# distutils: language=c++ # cython: language_level=3 # cython: binding=True from libc.stdint cimport uint32_t from libcpp.vector cimport vector from libcpp cimport bool from libcpp.utility cimport move cdef extern from *: int PyUnicode_4BYTE_KIND object PyUnicode_FromKindAndData(int kind, const void *buffer, Py_ssize_t size) cdef extern from "_levenshtein.hpp": cdef vector[uint32_t] lev_greedy_median(const vector[RF_String]& strings, const vector[double]& weights) except + cdef vector[uint32_t] lev_median_improve(const RF_String& string, const vector[RF_String]& strings, const vector[double]& weights) except + cdef vector[uint32_t] lev_quick_median(const vector[RF_String]& strings, const vector[double]& weights) except + cdef vector[uint32_t] lev_set_median(const vector[RF_String]& strings, const vector[double]& weights) except + cdef double lev_set_distance(const vector[RF_String]& strings1, const vector[RF_String]& strings2) except + cdef double lev_edit_seq_distance(const vector[RF_String]& strings1, const vector[RF_String]& strings2) except + ctypedef struct RF_String: pass cdef bool is_valid_string(object) cdef RF_String convert_string(object) cdef inline RF_String conv_sequence(seq) except *: if is_valid_string(seq): return convert_string(seq) raise TypeError("Expected string or bytes") cdef vector[double] extract_weightlist(wlist, size_t n) except *: cdef size_t i cdef double weight cdef vector[double] weights if wlist is None: weights.resize(n, 1.0) else: weights.resize(n) for i, w in enumerate(wlist): weight = w if w < 0: raise ValueError(f"weight {weight} is negative") weights[i] = w return weights cdef vector[RF_String] extract_stringlist(strings) except *: cdef vector[RF_String] strlist for string in strings: strlist.push_back(move(conv_sequence(string))) return move(strlist) def median(strlist, wlist = None, *): """ Find an approximate generalized median string using greedy algorithm. You can optionally pass a weight for each string as the second argument. The weights are interpreted as item multiplicities, although any non-negative real numbers are accepted. Use them to improve computation speed when strings often appear multiple times in the sequence. Examples -------- >>> median(['SpSm', 'mpamm', 'Spam', 'Spa', 'Sua', 'hSam']) 'Spam' >>> fixme = ['Levnhtein', 'Leveshein', 'Leenshten', ... 'Leveshtei', 'Lenshtein', 'Lvenstein', ... 'Levenhtin', 'evenshtei'] >>> median(fixme) 'Levenshtein' Hm. Even a computer program can spell Levenshtein better than me. """ if wlist is not None and len(strlist) != len(wlist): raise ValueError("strlist has a different length than wlist") weights = extract_weightlist(wlist, len(strlist)) strings = extract_stringlist(strlist) median = lev_greedy_median(strings, weights) return PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, median.data(), median.size()) def quickmedian(strlist, wlist = None, *): """ Find a very approximate generalized median string, but fast. See median() for argument description. This method is somewhere between setmedian() and picking a random string from the set; both speedwise and quality-wise. Examples -------- >>> fixme = ['Levnhtein', 'Leveshein', 'Leenshten', ... 'Leveshtei', 'Lenshtein', 'Lvenstein', ... 'Levenhtin', 'evenshtei'] >>> quickmedian(fixme) 'Levnshein' """ if wlist is not None and len(strlist) != len(wlist): raise ValueError("strlist has a different length than wlist") weights = extract_weightlist(wlist, len(strlist)) strings = extract_stringlist(strlist) median = lev_quick_median(strings, weights) return PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, median.data(), median.size()) def median_improve(string, strlist, wlist = None, *): """ Improve an approximate generalized median string by perturbations. The first argument is the estimated generalized median string you want to improve, the others are the same as in median(). It returns a string with total distance less or equal to that of the given string. Note this is much slower than median(). Also note it performs only one improvement step, calling median_improve() again on the result may improve it further, though this is unlikely to happen unless the given string was not very similar to the actual generalized median. Examples -------- >>> fixme = ['Levnhtein', 'Leveshein', 'Leenshten', ... 'Leveshtei', 'Lenshtein', 'Lvenstein', ... 'Levenhtin', 'evenshtei'] >>> median_improve('spam', fixme) 'enhtein' >>> median_improve(median_improve('spam', fixme), fixme) 'Levenshtein' It takes some work to change spam to Levenshtein. """ if wlist is not None and len(strlist) != len(wlist): raise ValueError("strlist has a different length than wlist") weights = extract_weightlist(wlist, len(strlist)) query = conv_sequence(string) strings = extract_stringlist(strlist) median = lev_median_improve(query, strings, weights) return PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, median.data(), median.size()) def setmedian(strlist, wlist = None, *): """ Find set median of a string set (passed as a sequence). See median() for argument description. The returned string is always one of the strings in the sequence. Examples -------- >>> setmedian(['ehee', 'cceaes', 'chees', 'chreesc', ... 'chees', 'cheesee', 'cseese', 'chetese']) 'chees' You haven't asked me about Limburger, sir. """ if wlist is not None and len(strlist) != len(wlist): raise ValueError("strlist has a different length than wlist") weights = extract_weightlist(wlist, len(strlist)) strings = extract_stringlist(strlist) median = lev_set_median(strings, weights) return PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, median.data(), median.size()) def setratio(strlist1, strlist2, *): """ Compute similarity ratio of two strings sets (passed as sequences). The best match between any strings in the first set and the second set (passed as sequences) is attempted. I.e., the order doesn't matter here. Examples -------- >>> setratio(['newspaper', 'litter bin', 'tinny', 'antelope'], ... ['caribou', 'sausage', 'gorn', 'woody']) 0.281845 No, even reordering doesn't help the tinny words to match the woody ones. """ strings1 = extract_stringlist(strlist1) strings2 = extract_stringlist(strlist2) lensum = strings1.size() + strings2.size() if lensum == 0: return 1.0 if strings1.empty(): dist = strings2.size() elif strings2.empty(): dist = strings1.size() else: dist = lev_set_distance(strings1, strings2) return (lensum - dist) / lensum def seqratio(strlist1, strlist2, *): """ Compute similarity ratio of two sequences of strings. This is like ratio(), but for string sequences. A kind of ratio() is used to to measure the cost of item change operation for the strings. Examples -------- >>> seqratio(['newspaper', 'litter bin', 'tinny', 'antelope'], ... ['caribou', 'sausage', 'gorn', 'woody']) 0.215178 """ strings1 = extract_stringlist(strlist1) strings2 = extract_stringlist(strlist2) lensum = strings1.size() + strings2.size() if lensum == 0: return 1.0 if strings1.empty(): dist = strings2.size() elif strings2.empty(): dist = strings1.size() else: dist = lev_edit_seq_distance(strings1, strings2) return (lensum - dist) / lensum Levenshtein-0.27.0/src/Levenshtein/py.typed000066400000000000000000000000001474145074100206330ustar00rootroot00000000000000Levenshtein-0.27.0/tests/000077500000000000000000000000001474145074100152355ustar00rootroot00000000000000Levenshtein-0.27.0/tests/test_levenshtein_distance.py000066400000000000000000000015731474145074100230520ustar00rootroot00000000000000from __future__ import annotations import Levenshtein def test_empty_string(): """ when both strings are empty this is a perfect match """ assert Levenshtein.distance(b"", b"") == 0 assert Levenshtein.distance("", "") == 0 def test_simple(): """ some very simple tests using supported string types bytes/unicode to catch relatively obvious implementation errors """ assert Levenshtein.distance(b"ABCD", b"AF") == 3 assert Levenshtein.distance("ABCD", "AF") == 3 assert Levenshtein.distance(b"ABCD", b"ABCD") == 0 assert Levenshtein.distance("ABCD", "ABCD") == 0 def test_simple_unicode_tests(): """ some very simple tests using unicode to catch relatively obvious implementation errors """ s1 = "ÁÄ" s2 = "ABCD" assert Levenshtein.distance(s1, s2) == 4 assert Levenshtein.distance(s1, s1) == 0 Levenshtein-0.27.0/tests/test_matching_blocks.py000066400000000000000000000014351474145074100220000ustar00rootroot00000000000000from __future__ import annotations from rapidfuzz.distance import MatchingBlock from Levenshtein import editops, matching_blocks def test_simple(): a, b = "spam", "park" assert matching_blocks(editops(a, b), a, b) == [ MatchingBlock(1, 0, 2), MatchingBlock(4, 4, 0), ] assert matching_blocks(editops(a, b), len(a), len(b)) == [ MatchingBlock(1, 0, 2), MatchingBlock(4, 4, 0), ] assert matching_blocks(editops("", ""), 0, 0) == [MatchingBlock(0, 0, 0)] assert matching_blocks(editops("", "a"), 0, 1) == [MatchingBlock(0, 1, 0)] assert matching_blocks(editops("a", ""), 1, 0) == [MatchingBlock(1, 0, 0)] assert matching_blocks(editops("a", "a"), 1, 1) == [ MatchingBlock(0, 0, 1), MatchingBlock(1, 1, 0), ] Levenshtein-0.27.0/tests/test_median.py000066400000000000000000000024251474145074100201060ustar00rootroot00000000000000from __future__ import annotations import Levenshtein def test_weight_zero(): """ Strings with zero weights should be ignored """ assert Levenshtein.quickmedian(["tes", "teste"], [0, 0]) == "" assert Levenshtein.quickmedian(["tes", "teste"], [1, 0]) == "tes" assert Levenshtein.quickmedian(["tes", "teste"], [0, 1]) == "teste" def test_documented(): """ run tests from documentation """ assert Levenshtein.median(["SpSm", "mpamm", "Spam", "Spa", "Sua", "hSam"]) == "Spam" fixme = [ "Levnhtein", "Leveshein", "Leenshten", "Leveshtei", "Lenshtein", "Lvenstein", "Levenhtin", "evenshtei", ] assert Levenshtein.median(fixme) == "Levenshtein" assert Levenshtein.quickmedian(fixme) == "Levnshein" assert Levenshtein.median_improve("spam", fixme) == "enhtein" assert Levenshtein.median_improve(Levenshtein.median_improve("spam", fixme), fixme) == "Levenshtein" assert ( Levenshtein.setmedian( [ "ehee", "cceaes", "chees", "chreesc", "chees", "cheesee", "cseese", "chetese", ] ) == "chees" ) Levenshtein-0.27.0/tests/test_seq_ratio.py000066400000000000000000000006711474145074100206400ustar00rootroot00000000000000from __future__ import annotations import Levenshtein def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) def test_documented(): """ run tests from documentation """ ratio = Levenshtein.seqratio( ["newspaper", "litter bin", "tinny", "antelope"], ["caribou", "sausage", "gorn", "woody"], ) assert isclose(ratio, 0.21517857142857144) Levenshtein-0.27.0/tests/test_set_ratio.py000066400000000000000000000006701474145074100206420ustar00rootroot00000000000000from __future__ import annotations import Levenshtein def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) def test_documented(): """ run tests from documentation """ ratio = Levenshtein.setratio( ["newspaper", "litter bin", "tinny", "antelope"], ["caribou", "sausage", "gorn", "woody"], ) assert isclose(ratio, 0.2818452380952381) Levenshtein-0.27.0/tools/000077500000000000000000000000001474145074100152335ustar00rootroot00000000000000Levenshtein-0.27.0/tools/backtrace000077500000000000000000000000171474145074100170760ustar00rootroot00000000000000backtrace quit Levenshtein-0.27.0/tools/sdist.patch000066400000000000000000000004621474145074100174040ustar00rootroot00000000000000diff --git a/pyproject.toml b/pyproject.toml index 0a8c033..cf967b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,6 @@ [build-system] requires = [ - "scikit-build-core>=0.10.7", - "Cython>=3.0.11,<3.1.0" + "scikit-build-core>=0.10.7" ] build-backend = "scikit_build_core.build" Levenshtein-0.27.0/tools/seg_wrapper.sh000077500000000000000000000003031474145074100201040ustar00rootroot00000000000000#!/bin/bash SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" ulimit -c unlimited "$@" if [[ $? -eq 139 ]]; then coredumpctl gdb -1 -A "--batch -x $SCRIPTPATH/backtrace" fi