pax_global_header00006660000000000000000000000064145532120260014512gustar00rootroot0000000000000052 comment=c98229be5fab21bb5c6df0d1c5e8a52f461f7e1e fpylll-0.6.1/000077500000000000000000000000001455321202600130205ustar00rootroot00000000000000fpylll-0.6.1/.github/000077500000000000000000000000001455321202600143605ustar00rootroot00000000000000fpylll-0.6.1/.github/workflows/000077500000000000000000000000001455321202600164155ustar00rootroot00000000000000fpylll-0.6.1/.github/workflows/check-style.yml000066400000000000000000000007341455321202600213570ustar00rootroot00000000000000name: Check Style on: push: pull_request: jobs: build: runs-on: ubuntu-latest steps: - name: Setup Python uses: actions/setup-python@v2 with: python-version: '3.x' - name: Check out uses: actions/checkout@v2 - name: Dependencies run: pip install flake8 - name: Flake8 run: flake8 --max-line-length=120 --max-complexity=16 --ignore=E22,E241,E741,E203 src tests fpylll-0.6.1/.github/workflows/docker.yml000066400000000000000000000016651455321202600204170ustar00rootroot00000000000000name: Docker on: push: branches: master tags: '*' jobs: main: runs-on: ubuntu-latest steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - name: Docker meta id: docker_meta uses: crazy-max/ghaction-docker-meta@v1 with: images: fplll/fpylll # list of Docker images to use as base name for tags - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push id: docker_build uses: docker/build-push-action@v2 with: push: true tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} file: ./Dockerfile - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} fpylll-0.6.1/.github/workflows/release.yml000066400000000000000000000026421455321202600205640ustar00rootroot00000000000000name: Release on: push: tags: - '*' jobs: build: name: Upload Release Asset runs-on: ubuntu-latest steps: - name: Set up Python uses: actions/setup-python@v2 - name: Checkout code uses: actions/checkout@v2 - name: Build run: | pip install Cython pip install setuptools python setup.py sdist - name: Create Release id: create_release uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} release_name: ${{ github.ref }} draft: false prerelease: false - name: Get Name of Artifact run: | ARTIFACT_PATHNAME=$(ls dist/fpylll-*.tar.gz | head -n 1) ARTIFACT_NAME=$(basename $ARTIFACT_PATHNAME) echo "ARTIFACT_NAME=${ARTIFACT_NAME}" >> $GITHUB_ENV echo "ARTIFACT_PATHNAME=${ARTIFACT_PATHNAME}" >> $GITHUB_ENV - name: Upload Release Asset id: upload-release-asset uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.create_release.outputs.upload_url }} asset_path: ${{ env.ARTIFACT_PATHNAME }} asset_name: ${{ env.ARTIFACT_NAME }} asset_content_type: application/gzip fpylll-0.6.1/.github/workflows/sdist.yml000066400000000000000000000033241455321202600202700ustar00rootroot00000000000000name: Source Distribution Test on: push: pull_request: schedule: - cron: '0 0 * * 0' # weekly env: JOBS: 2 DEPS: libgmp-dev libmpfr-dev libqd-dev libtool autoconf python3-pip python3-dev python3-flake8 jobs: build: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.10", "3.11", "3.12"] have_qd: [yes, no] have_numpy: [yes, no] env: HAVE_QD: ${{ matrix.have_qd }} HAVE_NUMPY: ${{ matrix.have_numpy }} steps: - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Check out uses: actions/checkout@v2 - name: Install prerequisites run: | sudo DEBIAN_FRONTEND=noninteractive apt-get update sudo DEBIAN_FRONTEND=noninteractive apt-get install $DEPS - name: Dependencies run: | git clone https://github.com/fplll/fplll cd fplll || exit ./autogen.sh ./configure --disable-static --prefix=/usr --with-max-enum-dim=64 --with-max-parallel-enum-dim=20 make -j $JOBS sudo make install cd .. pip install Cython pip install -r requirements.txt pip install -r suggestions.txt cd .. rm -rf ./fplll - name: Compile run: | python setup.py sdist cd dist tar xvfz fpylll-*.tar.gz cd $(ls -1 *.tar.gz | sed s/\.tar\.gz//) python setup.py build_ext -j $JOBS python setup.py install - name: Test run: PY_IGNORE_IMPORTMISMATCH=1 python -m pytest fpylll-0.6.1/.github/workflows/tests.yml000066400000000000000000000032171455321202600203050ustar00rootroot00000000000000name: Tests on: push: pull_request: workflow_dispatch: schedule: - cron: '0 0 * * 0' # weekly env: JOBS: 2 DEPS: libgmp-dev libmpfr-dev libqd-dev libtool autoconf python3-pip python3-dev python3-flake8 jobs: build: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.10", "3.11", "3.12"] have_qd: [yes, no] have_ld: [yes, no] have_numpy: [yes, no] env: HAVE_QD: ${{ matrix.have_qd }} HAVE_LONG_DOUBLE: ${{ matrix.have_ld }} HAVE_NUMPY: ${{ matrix.have_numpy }} steps: - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Check out uses: actions/checkout@v2 - name: Install prerequisites run: | sudo DEBIAN_FRONTEND=noninteractive apt-get update sudo DEBIAN_FRONTEND=noninteractive apt-get install $DEPS - name: Dependencies run: | git clone https://github.com/fplll/fplll cd fplll || exit ./autogen.sh ./configure --disable-static --prefix=/usr --with-max-enum-dim=64 --with-max-parallel-enum-dim=20 make -j $JOBS sudo make install cd .. pip install Cython pip install -r requirements.txt pip install -r suggestions.txt cd .. rm -rf ./fplll - name: Compile run: | python setup.py build_ext -j $JOBS python setup.py install - name: Test run: PY_IGNORE_IMPORTMISMATCH=1 python -m pytest fpylll-0.6.1/.gitignore000066400000000000000000000015031455321202600150070ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation _build/ # PyBuilder target/ /.vagrant/ /Vagrantfile /src/fpylll/config.pxi /.ipynb_checkpoints/*.ipynb /MANIFEST /valgrind-python.supp /.pytest_cache/ /activate /fpylll-env /fpylll-fplllfpylll-0.6.1/Dockerfile000066400000000000000000000013571455321202600150200ustar00rootroot00000000000000## -*- docker-image-name: "fplll/fpylll" -*- FROM fplll/fplll MAINTAINER Martin Albrecht ARG BRANCH=master ARG JOBS=2 ARG CXXFLAGS="-O2 -march=x86-64" ARG CFLAGS="-O2 -march=x86-64" SHELL ["/bin/bash", "-c"] ENTRYPOINT /usr/local/bin/ipython RUN apt update && \ apt install -y python3-pip python3-dev zlib1g-dev libjpeg-dev && \ apt clean RUN rm /usr/lib/python3.11/EXTERNALLY-MANAGED RUN git clone --branch $BRANCH https://github.com/fplll/fpylll RUN pip3 install -r fpylll/requirements.txt RUN pip3 install -r fpylll/suggestions.txt RUN cd fpylll && \ CFLAGS=$CFLAGS CXXFLAGS=$CXXFLAGS python3 setup.py build -j $JOBS && \ python3 setup.py -q install && \ cd .. && \ rm -rf fpylll fpylll-0.6.1/LICENSE000066400000000000000000000431771455321202600140410ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. {description} Copyright (C) {year} {fullname} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. {signature of Ty Coon}, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. fpylll-0.6.1/MANIFEST.in000066400000000000000000000002661455321202600145620ustar00rootroot00000000000000global-include README.rst requirements.txt suggestions.txt global-include setup.py *.pyx graft src include tests/test_* include tests/tools.py include LICENSE prune build prune dist fpylll-0.6.1/README.rst000066400000000000000000000264341455321202600145200ustar00rootroot00000000000000fpylll ====== A Python wrapper for `fplll `__. .. image:: https://github.com/fplll/fpylll/workflows/Tests/badge.svg :target: https://github.com/fplll/fpylll/actions?query=workflow%3ATests .. image:: https://badge.fury.io/py/fpylll.svg :target: https://badge.fury.io/py/fpylll .. image:: https://readthedocs.org/projects/fpylll/badge/?version=latest :target: http://fpylll.readthedocs.io/en/latest/?badge=latest .. code-block:: python >>> from fpylll import * >>> A = IntegerMatrix(50, 50) >>> A.randomize("ntrulike", bits=50, q=127) >>> A[0].norm() 3564748886669202.5 >>> M = GSO.Mat(A) >>> M.update_gso() >>> M.get_mu(1,0) 0.815748944429783 >>> L = LLL.Reduction(M) >>> L() >>> M.get_mu(1,0) 0.41812865497076024 >>> A[0].norm() 24.06241883103193 The basic BKZ algorithm can be implemented in about 60 pretty readable lines of Python code (cf. `simple_bkz.py `__). For a quick tour of the library, you can check out the `tutorial `__. How to cite ----------- .. code-block:: @unpublished{fpylll, author = {The {FPLLL} development team}, title = {{fpylll}, a {Python} wrapper for the {fplll} lattice reduction library, {Version}: 0.6.1}, year = 2023, note = {Available at \url{https://github.com/fplll/fpylll}}, url = {https://github.com/fplll/fpylll} } Requirements ------------ **fpylll** relies on the following C/C++ libraries: - `GMP `__ or `MPIR `__ for arbitrary precision integer arithmetic. - `MPFR `__ for arbitrary precision floating point arithmetic. - `QD `__ for double double and quad double arithmetic (optional). - `fplll `__ for pretty much everything. **fpylll** also relies on - `Cython `__ for linking Python and C/C++. - `cysignals `__ for signal handling such as interrupting C++ code. - `py.test `__ for testing Python. - `flake8 `__ for linting. We also suggest - `virtualenv `__ to build and install fpylll in - `IPython `__ for interacting with Python - `Numpy `__ for numerical computations (e.g. with Gram-Schmidt values) Online ------ **fpylll** ships with Sage. Thus, it is available via `SageMathCell `__ and `CoCalc `__ (select a Jupyter notebook with a Sage kernel). Getting Started --------------- **Note:** fpylll is also available via `PyPI `__ and `Conda-Forge `__ for `Conda `__. In what follows, we explain manual installation. We recommend `virtualenv `__ for isolating Python build environments and `virtualenvwrapper `__ to manage virtual environments. We indicate active virtualenvs by the prefix ``(fpylll)``. **Automatic install** 1. Run bootstrap.sh .. code-block:: bash $ ./bootstrap.sh $ source ./activate **Manual install** 1. Create a new virtualenv and activate it: .. code-block:: bash $ virtualenv env $ ln -s ./env/bin/activate ./ $ source ./activate 2. Install the required libraries - `GMP `__ or `MPIR `__ and `MPFR `__ - if not available already. You may also want to install `QD `__. 3. Install fplll: .. code-block:: bash $ (fpylll) ./install-dependencies.sh $VIRTUAL_ENV Some OSX users report that they required ``export CXXFLAGS="-stdlib=libc++ -mmacosx-version-min=10.7"`` and ``export CXX=clang++`` (after installing a recent clang with `brew `__) since the default GCC installed by Apple does not have full C++11 support. 4. Then, execute: .. code-block:: bash $ (fpylll) pip install -r requirements.txt to install the required Python packages (see above). 5. If you are so inclined, run: .. code-block:: bash $ (fpylll) pip install -r suggestions.txt to install suggested Python packages as well (optional). 6. Build the Python extension: .. code-block:: bash $ (fpylll) export PKG_CONFIG_PATH="$VIRTUAL_ENV/lib/pkgconfig:$PKG_CONFIG_PATH" $ (fpylll) python setup.py build_ext $ (fpylll) python setup.py install 7. To run **fpylll**, you will need to: .. code-block:: bash $ (fpylll) export LD_LIBRARY_PATH="$VIRTUAL_ENV/lib" so that Python can find fplll and friends. Note that you can also patch ``activate`` to set ``LD_LIBRRY_PATH``. For this, add: .. code-block:: bash ### LD_LIBRARY_HACK _OLD_LD_LIBRARY_PATH="$LD_LIBRARY_PATH" LD_LIBRARY_PATH="$VIRTUAL_ENV/lib:$LD_LIBRARY_PATH" export LD_LIBRARY_PATH ### END_LD_LIBRARY_HACK ### PKG_CONFIG_HACK _OLD_PKG_CONFIG_PATH="$PKG_CONFIG_PATH" PKG_CONFIG_PATH="$VIRTUAL_ENV/lib/pkgconfig:$PKG_CONFIG_PATH" export PKG_CONFIG_PATH ### END_PKG_CONFIG_HACK towards the end and: .. code-block:: bash ### LD_LIBRARY_HACK if ! [ -z ${_OLD_LD_LIBRARY_PATH+x} ] ; then LD_LIBRARY_PATH="$_OLD_LD_LIBRARY_PATH" export LD_LIBRARY_PATH unset _OLD_LD_LIBRARY_PATH fi ### END_LD_LIBRARY_HACK ### PKG_CONFIG_HACK if ! [ -z ${_OLD_PKG_CONFIG_PATH+x} ] ; then PKG_CONFIG_PATH="$_OLD_PKG_CONFIG_PATH" export PKG_CONFIG_PATH unset _OLD_PKG_CONFIG_PATH fi ### END_PKG_CONFIG_HACK in the ``deactivate`` function in the ``activate`` script. **Running fpylll** 1. To (re)activate the virtual environment, simply run: .. code-block:: bash $ source ./activate 2. Start Python: .. code-block:: bash $ (fpylll) ipython **Manual update of fpylll and fplll inside Sagemath 9.0+** The instructions are very similar to the manual ones above. 1. Activate the sage-sh virtualenv: .. code-block:: bash $ sage -sh 2. Install the required libraries - `GMP `__ or `MPIR `__ and `MPFR `__ - if not available already. You may also want to install `QD `__. 3. Install fplll: .. code-block:: bash $ (sage-sh) ./install-dependencies.sh $SAGE_LOCAL Some OSX users report that they required ``export CXXFLAGS="-stdlib=libc++ -mmacosx-version-min=10.7"`` and ``export CXX=clang++`` (after installing a recent clang with `brew `__) since the default GCC installed by Apple does not have full C++11 support. 4. Then, execute: .. code-block:: bash $ (sage-sh) pip3 install -r requirements.txt to install the required Python packages (see above). 5. If you are so inclined, run: .. code-block:: bash $ (sage-sh) pip3 install -r suggestions.txt to install suggested Python packages as well (optional). 6. Build the Python extension: .. code-block:: bash $ (sage-sh) export PKG_CONFIG_PATH="$SAGE_LOCAL/lib/pkgconfig:$PKG_CONFIG_PATH" $ (sage-sh) python3 setup.py build_ext $ (sage-sh) python3 setup.py install $ (sage-sh) exit 7. Verify the upgrade went well: .. code-block:: bash $ sage sage: import fpylll sage: print(fpylll.__version__) The output should match the value of `__version__` in `src/fpylll/__init__.py `__. Multicore Support ----------------- **fpylll** supports parallelisation on multiple cores. For all C++ support to drop the `GIL `_ is enabled, allowing the use of threads to parallelise. Fplll is thread safe as long as each thread works on a separate object such as ``IntegerMatrix`` or ``MatGSO``. Also, **fpylll** does not actually drop the GIL in all calls to C++ functions yet. In many scenarios using `multiprocessing `_, which sidesteps the GIL and thread safety issues by using processes instead of threads, will be the better choice. The example below calls ``LLL.reduction`` on 128 matrices of dimension 30 on four worker processes. .. code-block:: python from fpylll import IntegerMatrix, LLL from multiprocessing import Pool d, workers, tasks = 30, 4, 128 def run_it(p, f, A, prefix=""): """Print status during parallel execution.""" import sys r = [] for i, retval in enumerate(p.imap_unordered(f, A, 1)): r.append(retval) sys.stderr.write('\r{0} done: {1:.2%}'.format(prefix, float(i)/len(A))) sys.stderr.flush() sys.stderr.write('\r{0} done {1:.2%}\n'.format(prefix, float(i+1)/len(A))) return r A = [IntegerMatrix.random(d, "uniform", bits=30) for _ in range(tasks)] A = run_it(Pool(workers), LLL.reduction, A) To test threading simply replace the line ``from multiprocessing import Pool`` with ``from multiprocessing.pool import ThreadPool as Pool``. For calling ``BKZ.reduction`` this way, which expects a second parameter with options, using `functools.partial `_ is a good choice. Contributing ------------ **fpylll** welcomes contributions, cf. the list of `open issues `_. To contribute, clone this repository, commit your code on a separate branch and send a pull request. Please write tests for your code. You can run them by calling:: $ (fpylll) PY_IGNORE_IMPORTMISMATCH=1 py.test from the top-level directory which runs all tests in ``tests/test_*.py``. We run `flake8 `_ on every commit automatically, In particular, we run:: $ (fpylll) flake8 --max-line-length=120 --max-complexity=16 --ignore=E22,E241 src Note that **fpylll** supports Python 3. In particular, tests are run using Python 3.10, 3.11 and 3.12. See `.tests.yml `_ for details on automated testing. Attribution & License --------------------- **fpylll** is maintained by Martin Albrecht. The following people have contributed to **fpylll** + Eamonn Postlethwaite + E M Bray + Fernando Virdia + Guillaume Bonnoron + Jeroen Demeyer + Jérôme Benoit + Konstantinos Draziotis + Leo Ducas + Martin Albrecht + Michael Walter + Omer Katz We copied a decent bit of code over from Sage, mostly from it's fpLLL interface. **fpylll** is licensed under the GPLv2+. This project was supported through the European Union PROMETHEUS project (Horizon 2020 Research and Innovation Program, grant 780701), EPSRC grant EP/P009417/1 and EPSRC grant EP/S020330/1. fpylll-0.6.1/bootstrap.sh000077500000000000000000000042061455321202600153760ustar00rootroot00000000000000#!/usr/bin/env bash jobs="-j 4 " if [ "$1" = "-j" ]; then jobs="-j $2 " fi # Create Virtual Environment if [ "$PYTHON" = "" ]; then PYTHON=python; export PYTHON; fi PIP="$PYTHON -m pip" PYVER=$($PYTHON --version | cut -d' ' -f2) echo "Usage:" echo " ./bootstrap.sh [ -j <#jobs> ] (uses system's python)" echo " PYTHON=python2 ./bootstrap.sh (uses python2)" echo " PYTHON=python3 ./bootstrap.sh (uses python3)" echo " " echo "Using python version: $PYVER" echo "Using $jobs" sleep 1 rm -rf fpylll-env $PYTHON -m virtualenv fpylll-env if [ ! -d fpylll-env ]; then echo "Failed to create virtual environment in 'fpylll-env' !" echo "Is '$PYTHON -m virtualenv' working?" echo "Try '$PYTHON -m pip install virtualenv' otherwise." exit 1 fi cat <>fpylll-env/bin/activate ### LD_LIBRARY_HACK _OLD_LD_LIBRARY_PATH="\$LD_LIBRARY_PATH" LD_LIBRARY_PATH="\$VIRTUAL_ENV/lib:\$LD_LIBRARY_PATH" export LD_LIBRARY_PATH ### END_LD_LIBRARY_HACK EOF ln -s fpylll-env/bin/activate source ./activate $PIP install -U pip $PIP install Cython $PIP install cysignals # Install FPLLL cat <>fpylll-env/bin/activate CFLAGS="\$CFLAGS -O3 -march=native -Wp,-U_FORTIFY_SOURCE" CXXFLAGS="\$CXXFLAGS -O3 -march=native -Wp,-U_FORTIFY_SOURCE" export CFLAGS export CXXFLAGS EOF deactivate source ./activate git clone https://github.com/fplll/fplll fpylll-fplll cd fpylll-fplll || exit ./autogen.sh ./configure --prefix="$VIRTUAL_ENV" $CONFIGURE_FLAGS make clean make $jobs retval=$? if [ $retval -ne 0 ]; then echo "Making fplll failed." echo "Check the logs above - they'll contain more information." exit 2 # 2 is the exit value if building fplll fails as a result of make $jobs. fi make install if [ $retval -ne 0 ]; then echo "Make install failed for fplll." echo "Check the logs above - they'll contain more information." exit 3 # 3 is the exit value if installing fplll failed. fi cd .. $PIP install -r requirements.txt $PIP install -r suggestions.txt $PYTHON setup.py clean $PYTHON setup.py build $jobs || $PYTHON setup.py build_ext $PYTHON setup.py install echo "Don't forget to activate environment each time:" echo " source ./activate" fpylll-0.6.1/docs/000077500000000000000000000000001455321202600137505ustar00rootroot00000000000000fpylll-0.6.1/docs/Makefile000066400000000000000000000163611455321202600154170ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/fpyLLL.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/fpyLLL.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/fpyLLL" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/fpyLLL" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." fpylll-0.6.1/docs/conf.py000066400000000000000000000233271455321202600152560ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # fpylll documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 20:50:11 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex try: from itertools import ifilter as filter except ImportError: pass # python 3 from ast import parse with open(os.path.join('..', 'src', 'fpylll', '__init__.py')) as f: __version__ = parse(next(filter(lambda line: line.startswith('__version__'), f))).body[0].value.s # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'fpylll' copyright = u'2015-2019, fpylll team' author = u'fpylll team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = "math" # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: html_theme = 'default' else: html_theme = 'haiku' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'fpyllldoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'fpylll.tex', u'fpylll Documentation', u'Martin R. Albrecht', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'fpylll', u'fpylll Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'fpylll', u'fpylll Documentation', author, 'fpylll', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Monkey-patch inspect with Cython support # See http://opendreamkit.org/2017/06/09/CythonSphinx/ def isfunction(obj): return hasattr(type(obj), "__code__") import inspect inspect.isfunction = isfunction fpylll-0.6.1/docs/example-custom-pruning.rst000066400000000000000000000036711455321202600211340ustar00rootroot00000000000000.. _example-custom-pruning: .. role:: math(raw) :format: html latex .. .. role:: raw-latex(raw) :format: latex .. :orphan: Linear Pruning ============== If we want to use pruning we can use the default pruning of fplll or to use our custom pruning. For instance, say that for some reason we want to use linear pruning. Then, we shall define the new linear pruning strategy as follows. :: >>> def linear_pruning_strategy(block_size, level): ... if level > block_size - 1: ... raise ValueError ... if block_size < 5: ... raise ValueError ... from fpylll import BKZ ... from fpylll.fplll.pruner import PruningParams ... from fpylll.fplll.bkz_param import Strategy ... preprocessing = 3 ... strategies1 = [Strategy(i) for i in range(6)] ... for b in range(6, block_size+1): ... if block_size == b: ... pr = PruningParams.LinearPruningParams(block_size, level) ... s = Strategy(b, [preprocessing], [pr]) ... else: ... s = Strategy(b, [preprocessing]) ... strategies1.append(s) ... param = BKZ.Param(block_size = block_size, strategies = strategies1) ... return param So, now we can define a new strategy that uses linear pruning :: >>> LP = linear_pruning_strategy(10, 6) Now, we shall compute the BKZ reduction of a large matrix using linear pruning. :: >>> from fpylll import IntegerMatrix, BKZ, FPLLL >>> A = IntegerMatrix(70, 71) >>> FPLLL.set_random_seed(2013) >>> A.randomize("intrel", bits=100) >>> bkz_reduced = BKZ.reduction(A, LP) Now, ``bkz_reduced`` is the BKZ reduced matrix of **A** using linear pruning with blocksize 10 and level 6. If we want to use the default strategy of fplll (which is faster than the previous linear pruning strategy) we use ``BKZ.DEFAULT_STRATEGY``, :: >>> param = BKZ.Param(block_size=10, strategies=BKZ.DEFAULT_STRATEGY) >>> bkz_reduced_2 = BKZ.reduction(A, param) and :: >>> bkz_reduced == bkz_reduced_2 True fpylll-0.6.1/docs/example-gauss-circle-problem.rst000066400000000000000000000054661455321202600221650ustar00rootroot00000000000000.. _example-gauss-circle-problem: .. role:: math(raw) :format: html latex .. :orphan: .. role:: raw-latex(raw) :format: latex .. Gauss Circle Problem ==================== Using enumeration we can study the Gauss circle problem [1]_, i.e. the problem of finding the number `N_R=|\{(x,y)\in {\mathbb{Z}}^2 : x^2+y^2\leq R^2 \}|`. Gauss proved that `N_R=\pi R^2 + Err(R),` with `|Err(R)|\leq 2\sqrt{2}\pi R`. The following code will compute all the points in the set `\{{(x,y)\in \mathbb{Z}}^2 : x^2+y^2\leq R^2\}`: :: >>> from fpylll.fplll.gso import MatGSO >>> from fpylll.fplll.integer_matrix import IntegerMatrix >>> from fpylll import FPLLL >>> from fpylll import Enumeration, EvaluatorStrategy >>> FPLLL.set_random_seed(1337) >>> def gauss(radius, dim, nr): ... A = IntegerMatrix.identity(dim) #define the latttice Z^dim ... M = MatGSO(A) ... _ = M.update_gso() ... enum = Enumeration(M, nr_solutions = nr) ... e1 = enum.enumerate(0, dim, radius**2, 0) ... return [tuple(dim*[0])] + [v for d,v in e1] + [tuple([-x for x in v]) for d,v in e1] For instance `N_2` is given by :: >>> g = gauss(2, 2, 100) >>> len(g) 13 >>> g [(0, 0), (0.0, 1.0), (1.0, 0.0), (-1.0, 1.0), (1.0, 1.0), (0.0, 2.0), (2.0, 0.0), (-0.0, -1.0), (-1.0, -0.0), (1.0, -1.0), (-1.0, -1.0), (-0.0, -2.0), (-2.0, -0.0)] For `{\rm dim} = 2` is enough to choose the parameter `nr = \lceil \pi R^2+2\sqrt{2}\pi R\rceil.` For `R=80` we get :: >>> from math import pi, ceil, sqrt >>> R = 80 >>> nr = ceil(pi*R**2 + 2*sqrt(2)*pi*R) >>> len(gauss(R,2,nr)) 20081 The parameter `nr_solutions` is by default `1.` If we set say, `{\rm{nr\_solutions}}= nr = \lceil \pi R^2+2\sqrt{2}\pi R\rceil,` then the enumerate function will return, say the set `\mathcal{A}_R=\{{\bf x}_1,...,{\bf x}_{n}\}` `(n>> # computation of N_R >>> from fpylll.fplll.gso import MatGSO >>> from fpylll.fplll.integer_matrix import IntegerMatrix >>> from fpylll import Enumeration, EvaluatorStrategy >>> from math import pi, ceil, sqrt >>> def n(radius): ... dim = 2 ... nr = ceil(pi*radius**2 + 2*sqrt(2)*pi*radius) ... A = IntegerMatrix.idenity(dim) ... M = MatGSO(A) ... _ = M.update_gso() ... enum = Enumeration(M,nr_solutions = nr) ... e1 = enum.enumerate(0, dim, radius**2, 0) ... return 2*len(e1)+1 .. [1] https://en.wikipedia.org/wiki/Gauss_circle_problem fpylll-0.6.1/docs/example-linear-diophantine-equations.rst000066400000000000000000000046761455321202600237300ustar00rootroot00000000000000.. _example-linear-diophantine-equations: .. role:: math(raw) :format: html latex .. .. role:: raw-latex(raw) :format: latex .. :orphan: Small solutions to a linear diophantine equation ================================================ Say we want find a small solution to a linear diophantine equation `\sum_{i=1}^{n}a_ix_i=a_0`. In general Euclidean algorithm will provide a solution in polynomial time. This method does not provide small solution in general. We can use the lattice from [1]_ to attack this problem. Let A be the basis: .. math:: A = \begin{bmatrix} 1 & 0 & 0 & \cdots & 0 & 0 & N_2a_1 \\ 0 & 1 & 0 & \cdots & 0 & 0 & N_2a_2 \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 0 & 0 & 0 & \cdots & 1& 0 &N_2a_n \\ 0 & 0 & 0 & \cdots & 0 & N_1 &-N_2a_0 \end{bmatrix} where `N_1`, `N_2` are some positive integers. Say `(x_1,x_2,...,x_n,x_{n+1},x_{n+2})` is a row of the LLL-reduced matrix of A. If `x_{n+1}=N_1, x_{n+2}=0,` then `(x_1,...,x_n)` is a solution of the linear equation. Say `{\bf a} = (1124, 1799, 1151, 1979, 1799, 1625, 1077, 1666, 1438, 1739)`, `a_0=22833`, `N_1=100`, `N_2=1000`.:: :: >>> from fpylll import IntegerMatrix, LLL >>> N1 = 100 >>> N2 = 10000 >>> a = [1124, 1799, 1151, 1979, 1799, 1625, 1077, 1666, 1438, 1739] >>> a0 = 22833 >>> n = len(a) >>> M = IntegerMatrix(n+1, n+2) >>> for i in range(len(a)): ... M[i, -1] = a[i]*N2 ... M[i, i] = 1 ... >>> M[-1, -2] = N1 >>> M[-1, -1] = -a0 * N2 We can now apply LLL:: >>> L = LLL.reduction(M); print(L) [ 0 -1 0 0 1 0 0 0 0 0 0 0 ] [ 0 1 0 0 0 0 1 0 -2 0 0 0 ] [ -1 0 -1 -1 0 0 1 0 1 1 0 0 ] [ -1 -1 0 1 -1 0 1 1 0 0 0 0 ] [ 1 -1 0 0 -1 1 1 -1 1 0 0 0 ] [ 0 0 0 0 0 1 -2 1 -2 1 0 0 ] [ 0 0 0 0 0 -2 0 -1 1 2 0 0 ] [ -1 -1 2 0 0 1 -1 -1 0 1 0 0 ] [ -2 1 -2 3 0 2 0 -3 -1 0 0 0 ] [ 1 2 0 1 2 3 1 1 1 2 100 0 ] [ 1 0 0 1 0 0 0 -1 -1 0 0 -10000 ] So a small solution is `{\bf v} = ( 1, 2, 0, 1, 2, 3, 1, 1, 1, 2 ),` with norm :: >>> L.submatrix(0, 0, n, n)[-1].norm() # doctest: +ELLIPSIS 5.099019513... .. [1] K. Aardal, C. Hurkens, A. Lenstra, Solving a linear Diophantine equation with lower and upper bounds on the variables. Integer programming and combinatorial optimization LNCS 1412, p.229–242, 1998. fpylll-0.6.1/docs/index.rst000066400000000000000000000003271455321202600156130ustar00rootroot00000000000000Overview ======== .. include:: ../README.rst Modules ======= .. toctree:: :maxdepth: 2 modules Indices and Tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` fpylll-0.6.1/docs/modules.rst000066400000000000000000000036531455321202600161610ustar00rootroot00000000000000fplll Modules ============= The modules in this category in some way represent classes or functions from fplll. They are typically implemented in Cython. Integer Matrices ---------------- .. automodule:: fpylll.fplll.integer_matrix :members: :undoc-members: :special-members: __init__, __call__, __copy__, __getitem__, __setitem__, Gram Schmidt Orthogonalization ------------------------------ .. automodule:: fpylll.fplll.gso :members: :undoc-members: :special-members: __init__, __call__ LLL Wrapper ----------- .. automodule:: fpylll.fplll.wrapper :members: :undoc-members: :special-members: __init__, __call__ LLL --- .. automodule:: fpylll.fplll.lll :members: :undoc-members: :special-members: __init__, __call__ BKZ --- .. automodule:: fpylll.fplll.bkz :members: :undoc-members: :special-members: __init__, __call__ SVP and CVP ----------- .. automodule:: fpylll.fplll.svpcvp :members: :undoc-members: :special-members: __init__, __call__ Pruning ------- .. automodule:: fpylll.fplll.pruner :members: :undoc-members: :special-members: __init__, __call__ Enumeration ----------- .. automodule:: fpylll.fplll.enumeration :members: :undoc-members: :special-members: __init__, __call__ Utilities --------- .. automodule:: fpylll.util :members: :undoc-members: :special-members: __init__, __call__ Python Algorithms ================= The modules in this category extend the functionality of fplll in some way by implementing algorithms in Python. Simple BKZ ---------- .. automodule:: fpylll.algorithms.simple_bkz :special-members: __init__, __call__ :members: :undoc-members: Simple Dual BKZ --------------- .. automodule:: fpylll.algorithms.simple_dbkz :special-members: __init__, __call__ :members: :undoc-members: BKZ --- .. automodule:: fpylll.algorithms.bkz :special-members: __init__, __call__ :members: :undoc-members: fpylll-0.6.1/docs/tutorial.rst000066400000000000000000000153711455321202600163540ustar00rootroot00000000000000.. role:: math(raw) :format: html latex .. :orphan: .. role:: raw-latex(raw) :format: latex .. .. _tutorial: Tutorial ======== Matrix generators ----------------- :: >>> from fpylll import IntegerMatrix, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix(9, 10) >>> A.randomize("intrel", bits=10) Matrix :math:`A` is a (random) knapsack type matrix. That is of the form :math:`[ {\bf a} | I_n]`, where :math:`{\bf a}` is a column vector of dimension :math:`n`, and :math:`I_n` the :math:`n`-dimensional identity matrix. Giving :: >>> print(A) [ 50 1 0 0 0 0 0 0 0 0 ] [ 556 0 1 0 0 0 0 0 0 0 ] [ 5 0 0 1 0 0 0 0 0 0 ] [ 899 0 0 0 1 0 0 0 0 0 ] [ 383 0 0 0 0 1 0 0 0 0 ] [ 846 0 0 0 0 0 1 0 0 0 ] [ 771 0 0 0 0 0 0 1 0 0 ] [ 511 0 0 0 0 0 0 0 1 0 ] [ 734 0 0 0 0 0 0 0 0 1 ] Also, the following types of matrices are supported, :: >>> from fpylll import FPLLL >>> from copy import copy >>> b = 10 >>> p = 521 # prime >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix(6,6) >>> B = copy(A) >>> C = copy(A) >>> D = copy(A) >>> A.randomize("uniform", bits=b) >>> B.randomize("ntrulike", bits=b, q=p) >>> C.randomize("ntrulike2", bits=b, q=p) >>> D.randomize("qary", bits=b, k=3) For instance:: >>> print(D) [ 1 0 0 858 790 620 ] [ 0 1 0 72 832 133 ] [ 0 0 1 263 121 724 ] [ 0 0 0 877 0 0 ] [ 0 0 0 0 877 0 ] [ 0 0 0 0 0 877 ] For a user defined matrix we use the method ``from_matrix``:: >>> A = IntegerMatrix.from_matrix([[1,2,3,4],[30,4,4,5],[1,-2,3,4],[0,0,1,0]]) Gram-Schmidt tools ------------------- To compute the Gram-Schimdt form of the matrix :math:`{\bf A}`, we use the GSO class:: >>> from fpylll import GSO >>> A = IntegerMatrix.from_matrix([[1,2,3,4],[30,4,4,5],[1,-2,3,4],[0,0,1,0]]) >>> M = GSO.Mat(A) To write a vector :math:`v` as a linear combination of the GS-basis of RowSp(:math:`A`):: >>> A = IntegerMatrix.from_matrix([[1,2,3,4],[30,4,4,5],[1,-2,3,4],[0,0,1,0]]) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> v = (1,2,5,5) >>> v_from_canonical = M.from_canonical(v) >>> print(v_from_canonical) # doctest: +ELLIPSIS (1.3333333333333..., -0.01301973960520..., 0.1949374454466..., 1.2521739130434...) >>> v_back_to_canonical = tuple([int(round(v_)) for v_ in M.to_canonical(v_from_canonical)]) >>> print(v_back_to_canonical) (1, 2, 5, 5) >>> # the dimension of the GS-matrix : ... print(M.d) 4 We can then compute the inner product :math:`r_{i,j} = \langle {\bf b}_i, {\bf b}^{*}_j \rangle` and the coefficient :math:`\mu_{i,j} = \langle {\bf b}_i, {\bf b}^*_j \rangle / ||{\bf b}^*_j||^2` (for any :math:`i,j`, here :math:`i=2`, :math:`j=3`) :: >>> i = 3; j = 2; >>> print(M.get_r(i,j)) # doctest: +ELLIPSIS 0.810079798... >>> print(M.get_mu(i,j)) # doctest: +ELLIPSIS 0.0584569876... To compute the determinant of :math:`{\bf A}`, compute either its :math:`\sqrt[n]{~}` or its :math:`\log` :: >>> start_row = 0 >>> stop_row = -1 >>> root_det_A = M.get_root_det(start_row, stop_row) >>> log_det_A = M.get_log_det(start_row, stop_row) >>> print(root_det_A) # root_det_A = det(A)^(1/n) doctest: +ELLIPSIS 21.44761058... >>> print(log_det_A) # log_det_A = exp(det(A)) in base e doctest: +ELLIPSIS 12.26245297... Lattice reduction tools ------------------------ To compute the LLL reduced matrix of :math:`{\bf A}` :: >>> from fpylll import LLL >>> FPLLL.set_random_seed(1337) >>> A.randomize("qary", bits=10, k=3) >>> A_original = copy(A) >>> A_lll = LLL.reduction(A) >>> print(A_lll) [ -1 9 -5 -3 ] [ 12 -2 7 -17 ] [ -18 3 16 -1 ] [ 4 17 20 12 ] To test if a matrix is LLL-reduced :: >>> print(LLL.is_reduced(A_original)) # a uniform matrix is usually not LLL-reduced False >>> print(LLL.is_reduced(A_lll)) True For the BKZ reduction of :math:`{\bf A}` with blocksize say 3 (without pruning), :: >>> from fpylll import BKZ >>> block_size = 3 >>> FPLLL.set_random_seed(1337) >>> A.randomize("qary", bits=10, k=3) >>> A_bkz = BKZ.reduction(A, BKZ.Param(block_size)) >>> print(A_bkz) [ -1 9 -5 -3 ] [ 12 -2 7 -17 ] [ -18 3 16 -1 ] [ 4 17 20 12 ] If we want to use pruning we can use the default pruning of fplll [GNR10]_. :: >>> from fpylll import BKZ >>> param = BKZ.Param(block_size = block_size, strategies = BKZ.DEFAULT_STRATEGY) >>> bkz_reduced = BKZ.reduction(A, param) SVP and CVP tools ----------------- To use Babai's Nearest Plane algorithm on the target vector :math:`v` with basis :math:`{\bf A}`, use it from the GSO tool detailed above :: >>> FPLLL.set_random_seed(1337) >>> A = LLL.reduction(IntegerMatrix.random(5, "qary", bits=10, k=3)) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> w = M.babai([1, 17, -3, -75, 102]) >>> A.multiply_left(w) (-4, 16, -5, -78, 97) To compute the norm of a shortest vector of the lattice generated by the rows of the matrix :math:`{\bf A}` we use the ``shortest_vector`` method of the SVP class, and measure the first row of the resulting matrix :math:`{\bf A}` :: >>> from fpylll import SVP >>> SVP.shortest_vector(A) (2, -2, 7, 4, -1) >>> print(A[0]) (2, -2, 7, 4, -1) >>> A[0].norm() 8.602325267042627 For the Closest Vector Problem, fplll (and so fpylll) uses enumeration:: >>> from fpylll import CVP >>> A = IntegerMatrix.from_matrix([[1,2,3,4],[30,4,4,5],[1,-2,3,4]]) >>> t = (1, 2, 5, 5) >>> v0 = CVP.closest_vector(A, t) >>> v0 (1, 2, 3, 4) In fact the following code was executed:: >>> from fpylll.fplll.gso import MatGSO >>> from fpylll.fplll.enumeration import Enumeration >>> M = MatGSO(A) >>> _ = M.update_gso() >>> E = Enumeration(M) >>> _, v2 = E.enumerate(0, A.nrows, 5, 40, M.from_canonical(t))[0] >>> v3 = IntegerMatrix.from_iterable(1, A.nrows, map(lambda x: int(x), v2)) >>> v1 = v3*A >>> print(v1) [ 1 2 3 4 ] Further examples ---------------- More specific examples can be found in: * :doc:`example-gauss-circle-problem` * :doc:`example-linear-diophantine-equations` * :doc:`example-custom-pruning` References ---------- .. [MV] D. Micciancio, P. Voulgaris, Faster exponential time algorithms for the shortest vector problem. In: SODA 2010, pp. 1468--1480 (2010). .. [GNR10] Nicolas Gama, Phong Q. Nguyen, and Oded Regev. 2010. Lattice enumeration using extreme pruning. In Proceedings of the 29th Annual international conference on Theory and Applications of Cryptographic Techniques (EUROCRYPT'10), Henri Gilbert (Ed.). Springer-Verlag, Berlin, Heidelberg, 257-278. DOI=http://dx.doi.org/10.1007/978-3-642-13190-5_13 fpylll-0.6.1/install-dependencies.sh000077500000000000000000000011461455321202600174530ustar00rootroot00000000000000#!/bin/bash if [ "$TRAVIS_BRANCH" != "" ]; then FPLLL_BRANCH=$TRAVIS_BRANCH; CONFIGURE_FLAGS="--disable-static --with-max-enum-dim=64 --with-max-parallel-enum-dim=64" fi if [ "$FPLLL_BRANCH" = "" ]; then FPLLL_BRANCH=master CONFIGURE_FLAGS="--disable-static" fi; cloned=$(git clone https://github.com/fplll/fplll -b "$FPLLL_BRANCH") if [ "$cloned" != "0" ]; then git clone https://github.com/fplll/fplll fi cd fplll || exit ./autogen.sh if [ "$1" != "" ]; then ./configure --prefix="$1" $CONFIGURE_FLAGS else ./configure $CONFIGURE_FLAGS fi make make install cd .. rm -rf fplll fpylll-0.6.1/pyproject.toml000066400000000000000000000002001455321202600157240ustar00rootroot00000000000000[build-system] requires = ["setuptools", "Cython", "cysignals"] build-backend = "setuptools.build_meta" fpylll-0.6.1/pytest.ini000066400000000000000000000003041455321202600150460ustar00rootroot00000000000000[pytest] addopts = -v --doctest-glob "*.pyx" --doctest-glob "*.rst" --doctest-modules --ignore=docs/conf.py testpaths = tests docs src/fpylll norecursedirs = build src/fpylll/mpfr src/fpylll/qd fpylll-0.6.1/readthedocs-conda.yml000066400000000000000000000001031455321202600171040ustar00rootroot00000000000000channels: - conda-forge dependencies: - gmp - mpfr - fplll fpylll-0.6.1/readthedocs.yml000066400000000000000000000001261455321202600160270ustar00rootroot00000000000000conda: file: readthedocs-conda.yml python: version: 2.7 pip_install: true fpylll-0.6.1/requirements.txt000066400000000000000000000000561455321202600163050ustar00rootroot00000000000000setuptools Cython>=3.0 pytest cysignals black fpylll-0.6.1/setup.py000077500000000000000000000210241455321202600145340ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import os import subprocess import sys import io if "READTHEDOCS" in os.environ: # When building with readthedocs, install the dependencies too. # See https://github.com/rtfd/readthedocs.org/issues/2776 for reqs in ["requirements.txt", "suggestions.txt"]: if os.path.isfile(reqs): subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", reqs]) try: from itertools import ifilter as filter except ImportError: pass # python 3 from os import path from ast import parse try: from setuptools.command.build_ext import build_ext as _build_ext from setuptools.core import setup from setuptools.extension import Extension as _Extension aux_setup_kwds = {"install_requires": ["cysignals"]} except ImportError: from distutils.command.build_ext import build_ext as _build_ext from distutils.core import setup from distutils.extension import Extension as _Extension aux_setup_kwds = {} from copy import copy try: FileNotFoundError except NameError: FileNotFoundError = OSError # Python 2 workaround class Extension(_Extension, object): """ distutils.extension.Extension subclass supporting additional keywords: * fplll: compile and link with flags from fplll as defined below in build_ext.fplll below * other: flags for compiling and linking other extension modules (without fplll flags) as defined below in build_ext.other """ def __init__(self, name, sources, **kwargs): self.fplll = kwargs.pop("fplll", False) self.other = kwargs.pop("other", False) super(Extension, self).__init__(name, sources, **kwargs) class build_ext(_build_ext, object): # CONFIG VARIABLES cythonize_dir = "build" fplll = None other = None def_varnames = ["HAVE_QD", "HAVE_LONG_DOUBLE", "HAVE_NUMPY"] config_pxi_path = os.path.join(".", "src", "fpylll", "config.pxi") def finalize_options(self): super(build_ext, self).finalize_options() def_vars = self._generate_config_pxi() include_dirs = [os.path.join(sys.prefix, "include")] library_dirs = [os.path.join(sys.exec_prefix, "lib")] cxxflags = list(filter(None, os.environ.get("CXXFLAGS", "").split())) if self.fplll is None: self.fplll = { "include_dirs": include_dirs, "library_dirs": library_dirs, "language": "c++", "libraries": ["gmp", "mpfr", "fplll"], "extra_compile_args": ["-std=c++11"] + cxxflags, "extra_link_args": ["-std=c++11"], } if def_vars["HAVE_QD"]: self.fplll["libraries"].append("qd") if self.other is None: self.other = { "include_dirs": include_dirs, "library_dirs": library_dirs, "libraries": ["gmp"], } if "READTHEDOCS" in os.environ: # ReadTheDocs uses fplll from Conda, which was built with the old # C++ ABI. self.fplll["extra_compile_args"].append("-D_GLIBCXX_USE_CXX11_ABI=0") if def_vars["HAVE_NUMPY"]: import numpy numpy_args = copy(self.fplll) numpy_args["include_dirs"].append(numpy.get_include()) self.extensions.append( Extension("fpylll.numpy", ["src/fpylll/numpy.pyx"], **numpy_args) ) for ext in self.extensions: if ext.fplll: for key, value in self.fplll.items(): setattr(ext, key, value) elif ext.other: for key, value in self.other.items(): setattr(ext, key, value) def run(self): import Cython.Build self.extensions = Cython.Build.cythonize( self.extensions, include_path=["src"], build_dir=self.cythonize_dir, compiler_directives={"binding": True, "embedsignature": True, "language_level": 2}, ) super(build_ext, self).run() def _generate_config_pxi(self): def_vars = {} config_pxi = [] for defvar in self.def_varnames: # We can optionally read values for these variables for the # environment; this is mostly used to force different values for # testing value = os.environ.get(defvar) if value is not None: value = value.lower() in ["1", "true", "yes"] else: value = getattr(self, "_get_" + defvar.lower())() config_pxi.append("DEF {0}={1}".format(defvar, value)) def_vars[defvar] = value config_pxi = "\n".join(config_pxi) + "\n" try: cur_config_pxi = open(self.config_pxi_path, "r").read() except IOError: cur_config_pxi = "" if cur_config_pxi != config_pxi: # check if we need to write with open(self.config_pxi_path, "w") as fw: fw.write(config_pxi) return def_vars def _get_have_qd(self): if "CONDA_PREFIX" in os.environ: os.environ["PKG_CONFIG_PATH"] = ":".join( [ os.path.join(os.environ["CONDA_PREFIX"], "lib", "pkgconfig"), os.environ.get("PKG_CONFIG_PATH", ""), ] ) if "VIRTUAL_ENV" in os.environ: os.environ["PKG_CONFIG_PATH"] = ":".join( [ os.path.join(os.environ["VIRTUAL_ENV"], "lib", "pkgconfig"), os.environ.get("PKG_CONFIG_PATH", ""), ] ) try: libs = subprocess.check_output(["pkg-config", "fplll", "--libs"]) if b"-lqd" in libs: return True except (subprocess.CalledProcessError, FileNotFoundError): pass return False def _get_have_numpy(self): try: import numpy return True except ImportError: pass return False def _get_have_long_double(self): # Ideally this would check the fplll headers explicitly for the # the FPLLL_WITH_LONG_DOUBLE define, but for now it suffices to # say that long double support is disabled on Cygwin return not sys.platform.startswith("cygwin") # EXTENSIONS extensions = [ Extension("fpylll.gmp.pylong", ["src/fpylll/gmp/pylong.pyx"], other=True), Extension("fpylll.fplll.integer_matrix", ["src/fpylll/fplll/integer_matrix.pyx"], fplll=True), Extension("fpylll.fplll.gso", ["src/fpylll/fplll/gso.pyx"], fplll=True), Extension("fpylll.fplll.lll", ["src/fpylll/fplll/lll.pyx"], fplll=True), Extension("fpylll.fplll.wrapper", ["src/fpylll/fplll/wrapper.pyx"], fplll=True), Extension("fpylll.fplll.bkz_param", ["src/fpylll/fplll/bkz_param.pyx"], fplll=True), Extension("fpylll.fplll.bkz", ["src/fpylll/fplll/bkz.pyx"], fplll=True), Extension("fpylll.fplll.enumeration", ["src/fpylll/fplll/enumeration.pyx"], fplll=True), Extension("fpylll.fplll.svpcvp", ["src/fpylll/fplll/svpcvp.pyx"], fplll=True), Extension("fpylll.fplll.pruner", ["src/fpylll/fplll/pruner.pyx"], fplll=True), Extension("fpylll.util", ["src/fpylll/util.pyx"], fplll=True), Extension("fpylll.io", ["src/fpylll/io.pyx"], fplll=True), Extension("fpylll.config", ["src/fpylll/config.pyx"], fplll=True), ] # VERSION with open(path.join("src", "fpylll", "__init__.py")) as f: __version__ = ( parse(next(filter(lambda line: line.startswith("__version__"), f))).body[0].value.s ) # FIRE def readme_to_long_description(): """ Python wants long descriptions to be plain ASCII. Our contributors have names that are not plain ASCII. Thus, we cut off the list of contributors when reading the long description. """ long_description = io.open("README.rst", encoding="utf-8").read() cut = long_description.index("Attribution & License") return str(long_description[:cut]) setup( name="fpylll", description="A Python interface for https://github.com/fplll/fplll", author=u"Martin R. Albrecht", author_email="fplll-devel@googlegroups.com", url="https://github.com/fplll/fpylll", version=__version__, ext_modules=extensions, package_dir={"": "src"}, packages=["fpylll", "fpylll.gmp", "fpylll.fplll", "fpylll.algorithms", "fpylll.tools"], license="GNU General Public License, version 2 or later", long_description=readme_to_long_description(), cmdclass={"build_ext": build_ext}, **aux_setup_kwds ) fpylll-0.6.1/src/000077500000000000000000000000001455321202600136075ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/000077500000000000000000000000001455321202600151115ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/__init__.py000066400000000000000000000007471455321202600172320ustar00rootroot00000000000000# flake8: noqa from __future__ import absolute_import from .fplll.integer_matrix import IntegerMatrix from .fplll.gso import GSO from .fplll.lll import LLL from .fplll.enumeration import Enumeration, EnumerationError, EvaluatorStrategy from .fplll.bkz import BKZ from .fplll.bkz_param import load_strategies_json from .fplll.svpcvp import SVP from .fplll.svpcvp import CVP from .fplll.pruner import Pruning from .util import ReductionError from .util import FPLLL __version__ = "0.6.1" fpylll-0.6.1/src/fpylll/algorithms/000077500000000000000000000000001455321202600172625ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/algorithms/__init__.py000066400000000000000000000000001455321202600213610ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/algorithms/babai.py000066400000000000000000000034031455321202600206720ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Babai's Nearest Plane algorithm .. moduleauthor:: Martin R. Albrecht """ from fpylll import IntegerMatrix, LLL from math import ceil def babai(B, t): """ Run Babai's Nearest Plane algorithm by running LLL. :param B: Input lattice basis. :param target: Target point (∈ ZZ^n) :returns coordinates of the solution vector: This implementation is more numerically stable compared to the one offered by `MatGSO.babai()`. On the other hand, this implementation will only accept tatgets with Integer coefficients. EXAMPLE :: >>> from fpylll import * >>> n = 10 >>> B = IntegerMatrix(n, n + 1) >>> B.randomize("intrel", bits=100) >>> v_opt = B.multiply_left([1,0,1,0,1,1,0,0,1,1]) >>> s = v_opt[0] # s = , where a is vector of knapsack values. >>> t = [s] + (n * [0]) >>> v = CVP.babai(B, t) >>> v[0] == t[0] True >>> v[1:] (1, 0, 1, 0, 1, 1, 0, 0, 1, 1) >>> _ = LLL.reduction(B) >>> v == CVP.closest_vector(B, t) True """ A = IntegerMatrix(B.nrows + 1, B.ncols + 1) for i in range(B.nrows): for j in range(B.ncols): A[i, j] = B[i, j] # make sure the input is LLL reduced before reading the norm of the last vector LLL.reduction(A) # zero vector at the end A.swap_rows(0, B.nrows) for j in range(B.ncols): A[-1, j] = t[j] A[-1, -1] = ceil(A[-2].norm()) LLL.reduction(A) # now call LLL to run Babai v = [0] * len(t) if A[-1, -1] > 0: for i in range(len(t)): v[i] = t[i] - A[-1][i] else: for i in range(len(t)): v[i] = t[i] + A[-1][i] return tuple(v) fpylll-0.6.1/src/fpylll/algorithms/bkz.py000066400000000000000000000263041455321202600204270ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Block Korkine Zolotarev algorithm in Python. .. moduleauthor:: Martin R. Albrecht This module reimplements fplll's BKZ algorithm in Python. It has feature parity with the C++ implementation in fplll's core. Additionally, this implementation collects some additional statistics. Hence, it should provide a good basis for implementing variants of this algorithm. """ from __future__ import absolute_import try: from time import process_time # Python 3 except ImportError: from time import clock as process_time # Python 2 from fpylll import IntegerMatrix, GSO, LLL from fpylll import BKZ from fpylll import Enumeration from fpylll import EnumerationError from fpylll.util import adjust_radius_to_gh_bound from fpylll.tools.bkz_stats import dummy_tracer, normalize_tracer, Tracer class BKZReduction(object): """ An implementation of the BKZ algorithm in Python. This class has feature parity with the C++ implementation in fplll's core. Additionally, this implementation collects some additional statistics. Hence, it should provide a good basis for implementing variants of this algorithm. """ def __init__(self, A): """Construct a new instance of the BKZ algorithm. :param A: an integer matrix, a GSO object or an LLL object """ if isinstance(A, GSO.Mat): L = None M = A A = M.B elif isinstance(A, LLL.Reduction): L = A M = L.M A = M.B elif isinstance(A, IntegerMatrix): L = None M = None A = A else: raise TypeError("Matrix must be IntegerMatrix but got type '%s'" % type(A)) if M is None and L is None: # run LLL first, but only if a matrix was passed LLL.reduction(A) self.A = A if M is None: self.M = GSO.Mat(A, flags=GSO.ROW_EXPO) else: self.M = M if L is None: self.lll_obj = LLL.Reduction(self.M, flags=LLL.DEFAULT) else: self.lll_obj = L def __call__(self, params, min_row=0, max_row=-1, tracer=False): """Run the BKZ algorithm with parameters `param`. :param params: BKZ parameters :param min_row: start processing in this row :param max_row: stop processing in this row (exclusive) :param tracer: see ``normalize_tracer`` for accepted values TESTS:: >>> from fpylll import * >>> A = IntegerMatrix.random(60, "qary", k=30, q=127) >>> from fpylll.algorithms.bkz import BKZReduction >>> bkz = BKZReduction(A) >>> _ = bkz(BKZ.EasyParam(10), tracer=True); bkz.trace is None False >>> _ = bkz(BKZ.EasyParam(10), tracer=False); bkz.trace is None True """ tracer = normalize_tracer(tracer) try: label = params["name"] except KeyError: label = "bkz" if not isinstance(tracer, Tracer): tracer = tracer( self, root_label=label, verbosity=params.flags & BKZ.VERBOSE, start_clocks=True, max_depth=2, ) if params.flags & BKZ.AUTO_ABORT: auto_abort = BKZ.AutoAbort(self.M, self.A.nrows) cputime_start = process_time() with tracer.context("lll"): self.lll_obj() i = 0 while True: with tracer.context("tour", i, dump_gso=params.flags & BKZ.DUMP_GSO): clean = self.tour(params, min_row, max_row, tracer) i += 1 if clean or params.block_size >= self.A.nrows: break if (params.flags & BKZ.AUTO_ABORT) and auto_abort.test_abort(): break if (params.flags & BKZ.MAX_LOOPS) and i >= params.max_loops: break if (params.flags & BKZ.MAX_TIME) and process_time() - cputime_start >= params.max_time: break tracer.exit() try: self.trace = tracer.trace except AttributeError: self.trace = None return clean def tour(self, params, min_row=0, max_row=-1, tracer=dummy_tracer): """One BKZ loop over all indices. :param params: BKZ parameters :param min_row: start index ≥ 0 :param max_row: last index ≤ n :returns: ``True`` if no change was made and ``False`` otherwise """ if max_row == -1: max_row = self.A.nrows clean = True for kappa in range(min_row, max_row - 1): block_size = min(params.block_size, max_row - kappa) clean &= self.svp_reduction(kappa, block_size, params, tracer) self.lll_obj.size_reduction(max(0, max_row - 1), max_row, max(0, max_row - 2)) return clean def svp_preprocessing(self, kappa, block_size, params, tracer): """Perform preprocessing for calling the SVP oracle :param kappa: current index :param params: BKZ parameters :param block_size: block size :param tracer: object for maintaining statistics :returns: ``True`` if no change was made and ``False`` otherwise .. note:: ``block_size`` may be smaller than ``params.block_size`` for the last blocks. """ clean = True lll_start = kappa if params.flags & BKZ.BOUNDED_LLL else 0 with tracer.context("lll"): self.lll_obj(lll_start, lll_start, kappa + block_size) if self.lll_obj.nswaps > 0: clean = False return clean def svp_call(self, kappa, block_size, params, tracer=dummy_tracer): """Call SVP oracle :param kappa: current index :param params: BKZ parameters :param block_size: block size :param tracer: object for maintaining statistics :returns: Coordinates of SVP solution or ``None`` if none was found. .. note:: ``block_size`` may be smaller than ``params.block_size`` for the last blocks. """ max_dist, expo = self.M.get_r_exp(kappa, kappa) delta_max_dist = self.lll_obj.delta * max_dist if params.flags & BKZ.GH_BND: root_det = self.M.get_root_det(kappa, kappa + block_size) max_dist, expo = adjust_radius_to_gh_bound( max_dist, expo, block_size, root_det, params.gh_factor ) try: enum_obj = Enumeration(self.M) with tracer.context("enumeration", enum_obj=enum_obj, probability=1.0): max_dist, solution = enum_obj.enumerate(kappa, kappa + block_size, max_dist, expo)[ 0 ] except EnumerationError as msg: if params.flags & BKZ.GH_BND: return None else: raise EnumerationError(msg) if max_dist >= delta_max_dist * (1 << expo): return None else: return solution def svp_postprocessing(self, kappa, block_size, solution, tracer=dummy_tracer): """Insert SVP solution into basis. Note that this does not run LLL; instead, it resolves the linear dependencies internally. :param solution: coordinates of an SVP solution :param kappa: current index :param block_size: block size :param tracer: object for maintaining statistics :returns: ``True`` if no change was made and ``False`` otherwise .. note :: postprocessing does not necessarily leave the GSO in a safe state. You may need to call ``update_gso()`` afterwards. """ if solution is None: return True # d = self.M.d # self.M.create_row() # with self.M.row_ops(d, d+1): # for i in range(block_size): # self.M.row_addmul(d, kappa + i, solution[i]) # self.M.move_row(d, kappa) # with tracer.context("lll"): # self.lll_obj(kappa, kappa, kappa + block_size + 1) # self.M.move_row(kappa + block_size, d) # self.M.remove_last_row() j_nz = None for i in range(block_size)[::-1]: if abs(solution[i]) == 1: j_nz = i break if len([x for x in solution if x]) == 1: self.M.move_row(kappa + j_nz, kappa) elif j_nz is not None: with self.M.row_ops(kappa + j_nz, kappa + j_nz + 1): for i in range(block_size): if solution[i] and i != j_nz: self.M.row_addmul(kappa + j_nz, kappa + i, solution[j_nz] * solution[i]) self.M.move_row(kappa + j_nz, kappa) else: solution = list(solution) for i in range(block_size): if solution[i] < 0: solution[i] = -solution[i] self.M.negate_row(kappa + i) with self.M.row_ops(kappa, kappa + block_size): offset = 1 while offset < block_size: k = block_size - 1 while k - offset >= 0: if solution[k] or solution[k - offset]: if solution[k] < solution[k - offset]: solution[k], solution[k - offset] = ( solution[k - offset], solution[k], ) self.M.swap_rows(kappa + k - offset, kappa + k) while solution[k - offset]: while solution[k - offset] <= solution[k]: solution[k] = solution[k] - solution[k - offset] self.M.row_addmul(kappa + k - offset, kappa + k, 1) solution[k], solution[k - offset] = ( solution[k - offset], solution[k], ) self.M.swap_rows(kappa + k - offset, kappa + k) k -= 2 * offset offset *= 2 self.M.move_row(kappa + block_size - 1, kappa) return False def svp_reduction(self, kappa, block_size, params, tracer=dummy_tracer): """Find shortest vector in projected lattice of dimension ``block_size`` and insert into current basis. :param kappa: current index :param params: BKZ parameters :param block_size: block size :param tracer: object for maintaining statistics :returns: ``True`` if no change was made and ``False`` otherwise """ clean = True with tracer.context("preprocessing"): clean_pre = self.svp_preprocessing(kappa, block_size, params, tracer) clean &= clean_pre solution = self.svp_call(kappa, block_size, params, tracer) with tracer.context("postprocessing"): clean_post = self.svp_postprocessing(kappa, block_size, solution, tracer) clean &= clean_post self.lll_obj.size_reduction(0, kappa + 1) return clean fpylll-0.6.1/src/fpylll/algorithms/bkz2.py000066400000000000000000000116531455321202600205120ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ BKZ 2.0 algorithm in Python. .. moduleauthor:: Martin R. Albrecht """ from fpylll import BKZ, Enumeration, EnumerationError from fpylll.algorithms.bkz import BKZReduction as BKZBase from fpylll.tools.bkz_stats import dummy_tracer from fpylll.util import gaussian_heuristic, randint class BKZReduction(BKZBase): def __init__(self, A): """Create new BKZ object. :param A: an integer matrix, a GSO object or an LLL object """ BKZBase.__init__(self, A) def get_pruning(self, kappa, block_size, params, tracer=dummy_tracer): strategy = params.strategies[block_size] radius, re = self.M.get_r_exp(kappa, kappa) radius *= self.lll_obj.delta r = [self.M.get_r_exp(i, i) for i in range(kappa, kappa+block_size)] gh_radius = gaussian_heuristic([x for x, _ in r]) ge = float(sum([y for _, y in r])) / len(r) if (params.flags & BKZ.GH_BND and block_size > 30): radius = min(radius, gh_radius * 2**(ge-re) * params.gh_factor) return radius, re, strategy.get_pruning(radius, gh_radius * 2**(ge-re)) def randomize_block(self, min_row, max_row, tracer=dummy_tracer, density=0): """Randomize basis between from ``min_row`` and ``max_row`` (exclusive) 1. permute rows 2. apply lower triangular matrix with coefficients in -1,0,1 :param min_row: start in this row :param max_row: stop at this row (exclusive) :param tracer: object for maintaining statistics :param density: number of non-zero coefficients in lower triangular transformation matrix """ if max_row - min_row < 2: return # there is nothing to do # 1. permute rows niter = 4 * (max_row-min_row) # some guestimate with self.M.row_ops(min_row, max_row): for i in range(niter): b = a = randint(min_row, max_row-1) while b == a: b = randint(min_row, max_row-1) self.M.move_row(b, a) # 2. triangular transformation matrix with coefficients in -1,0,1 with self.M.row_ops(min_row, max_row): for a in range(min_row, max_row-2): for i in range(density): b = randint(a+1, max_row-1) s = randint(0, 1) self.M.row_addmul(a, b, 2*s-1) return def svp_preprocessing(self, kappa, block_size, params, tracer=dummy_tracer): clean = True clean &= BKZBase.svp_preprocessing(self, kappa, block_size, params, tracer) for preproc in params.strategies[block_size].preprocessing_block_sizes: prepar = params.__class__(block_size=preproc, strategies=params.strategies, flags=BKZ.GH_BND) clean &= self.tour(prepar, kappa, kappa + block_size, tracer=tracer) return clean def svp_reduction(self, kappa, block_size, params, tracer=dummy_tracer): """ :param kappa: :param block_size: :param params: :param tracer: """ self.lll_obj.size_reduction(0, kappa+1) old_first, old_first_expo = self.M.get_r_exp(kappa, kappa) remaining_probability, rerandomize = 1.0, False while remaining_probability > 1. - params.min_success_probability: with tracer.context("preprocessing"): if rerandomize: with tracer.context("randomization"): self.randomize_block(kappa+1, kappa+block_size, density=params.rerandomization_density, tracer=tracer) with tracer.context("reduction"): self.svp_preprocessing(kappa, block_size, params, tracer=tracer) with tracer.context("pruner"): radius, re, pruning = self.get_pruning(kappa, block_size, params, tracer) try: enum_obj = Enumeration(self.M) with tracer.context("enumeration", enum_obj=enum_obj, probability=pruning.expectation, full=block_size==params.block_size): max_dist, solution = enum_obj.enumerate(kappa, kappa + block_size, radius, re, pruning=pruning.coefficients)[0] with tracer.context("postprocessing"): self.svp_postprocessing(kappa, block_size, solution, tracer=tracer) rerandomize = False except EnumerationError: rerandomize = True remaining_probability *= (1 - pruning.expectation) self.lll_obj.size_reduction(0, kappa+1) new_first, new_first_expo = self.M.get_r_exp(kappa, kappa) clean = old_first <= new_first * 2**(new_first_expo - old_first_expo) return clean fpylll-0.6.1/src/fpylll/algorithms/simple_bkz.py000066400000000000000000000064011455321202600217740ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ A minimal implementation of the Block Korkine Zolotarev algorithm in Python. .. moduleauthor:: Martin R. Albrecht """ from __future__ import absolute_import from fpylll import IntegerMatrix, GSO, LLL, BKZ from fpylll import Enumeration class BKZReduction: def __init__(self, A): """Construct a new BKZ reduction instance. :param A: Integer matrix to reduce. """ if not isinstance(A, IntegerMatrix): raise TypeError("Matrix must be IntegerMatrix but got type '%s'"%type(A)) # run LLL first wrapper = LLL.Wrapper(A) wrapper() self.A = A self.m = GSO.Mat(A, flags=GSO.ROW_EXPO) self.lll_obj = LLL.Reduction(self.m) def __call__(self, block_size): """Perform BKZ reduction with given``block_size``. Nothing is returned, the matrix ``A`` given during construction is modified in-place. :param block_size: an integer > 2 """ self.m.discover_all_rows() auto_abort = BKZ.AutoAbort(self.m, self.A.nrows) while True: clean = self.bkz_loop(block_size, 0, self.A.nrows) if clean: break if auto_abort.test_abort(): break def bkz_loop(self, block_size, min_row, max_row): """Perform one BKZ loop, often also called a "BKZ tour". :param block_size: an integer > 2 :param min_row: algorithm starts in this row (inclusive) :param max_row: algorithm stops at this row (exclusive) """ clean = True for kappa in range(min_row, max_row-1): bs = min(block_size, max_row - kappa) clean &= self.svp_reduction(kappa, bs) return clean def svp_reduction(self, kappa, block_size): """Call the SVP oracle and insert found vector into basis. :param kappa: row index :param block_size: an integer > 2 """ clean = True self.lll_obj(0, kappa, kappa + block_size) if self.lll_obj.nswaps > 0: clean = False max_dist, expo = self.m.get_r_exp(kappa, kappa) delta_max_dist = self.lll_obj.delta * max_dist max_dist, solution = Enumeration(self.m).enumerate(kappa, kappa + block_size, max_dist, expo, pruning=None)[0] if max_dist >= delta_max_dist * (1< 0: clean = False max_dist, expo = self.m.get_r_exp(kappa + block_size - 1, kappa + block_size - 1) max_dist = 1.0/max_dist expo *= -1.0 delta_max_dist = self.lll_obj.delta * max_dist max_dist, solution = Enumeration(self.m).enumerate(kappa, kappa + block_size, max_dist, expo, pruning=None, dual=True)[0] if max_dist >= delta_max_dist: return clean with self.m.row_ops(kappa, kappa+block_size): pairs = list(enumerate(solution, start=kappa)) [self.m.negate_row(pair[0]) for pair in pairs if pair[1] < 0] pairs = map(lambda x: (x[0], abs(x[1])), pairs) # GCD should be tree based but for proof of concept implementation, this will do row, x = reduce(self.euclid, pairs) if x != 1: raise RuntimeError("Euclid failed!") self.m.move_row(row, kappa + block_size - 1) self.lll_obj(kappa, kappa, kappa + block_size) return False fpylll-0.6.1/src/fpylll/config.pyx000066400000000000000000000027201455321202600171210ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" from .fplll.fplll cimport default_strategy as default_strategy_c from .fplll.fplll cimport default_strategy_path as default_strategy_path_c from .fplll.fplll cimport FPLLL_MAJOR_VERSION as FPLLL_MAJOR_VERSION_c from .fplll.fplll cimport FPLLL_MINOR_VERSION as FPLLL_MINOR_VERSION_c from .fplll.fplll cimport FPLLL_MICRO_VERSION as FPLLL_MICRO_VERSION_c from .fplll.fplll cimport FPLLL_MAX_ENUM_DIM as FPLLL_MAX_ENUM_DIM_c from .fplll.fplll cimport FPLLL_HAVE_RECURSIVE_ENUM as FPLLL_HAVE_RECURSIVE_ENUM_c from .fplll.fplll cimport FPLLL_MAX_PARALLEL_ENUM_DIM as FPLLL_MAX_PARALLEL_ENUM_DIM_c IF HAVE_LONG_DOUBLE: have_long_double = True float_types = ("d", "ld") ELSE: have_long_double = False float_types = ("d",) IF HAVE_QD: have_qd = True float_types = float_types + ("dpe", "dd", "qd", "mpfr") ELSE: have_qd = False float_types = float_types + ("dpe", "mpfr") int_types = ("long", "mpz") default_strategy = default_strategy_c().c_str() default_strategy_path = default_strategy_path_c().c_str() major_version = FPLLL_MAJOR_VERSION_c minor_version = FPLLL_MINOR_VERSION_c micro_version = FPLLL_MICRO_VERSION_c version = "{0}.{1}.{2}".format(major_version, minor_version, micro_version) max_enum_dim = FPLLL_MAX_ENUM_DIM_c have_recursive_enum = FPLLL_HAVE_RECURSIVE_ENUM_c max_parallel_enum_dim = FPLLL_MAX_PARALLEL_ENUM_DIM_c fpylll-0.6.1/src/fpylll/fplll/000077500000000000000000000000001455321202600162225ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/fplll/__init__.py000066400000000000000000000000001455321202600203210ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/fplll/bkz.pxd000066400000000000000000000010471455321202600175270ustar00rootroot00000000000000# -*- coding: utf-8 -*- from .decl cimport bkz_auto_abort_core_t, fplll_mat_gso_data_type_t from .decl cimport bkz_reduction_core_t from .gso cimport MatGSO from .bkz_param cimport BKZParam from .lll cimport LLLReduction cdef class BKZAutoAbort: cdef fplll_mat_gso_data_type_t _type cdef bkz_auto_abort_core_t _core cdef MatGSO M cdef class BKZReduction: cdef fplll_mat_gso_data_type_t _type cdef bkz_reduction_core_t _core cdef readonly MatGSO M cdef readonly LLLReduction lll_obj cdef readonly BKZParam param fpylll-0.6.1/src/fpylll/fplll/bkz.pyx000066400000000000000000001455161455321202600175660ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Block Korkine Zolotarev algorithm. .. moduleauthor:: Martin R. Albrecht """ include "fpylll/config.pxi" from cysignals.signals cimport sig_on, sig_off IF HAVE_QD: from .decl cimport mat_gso_mpz_dd, mat_gso_mpz_qd from .decl cimport mat_gso_long_dd, mat_gso_long_qd from .decl cimport dd_t, qd_t from .decl cimport d_t IF HAVE_LONG_DOUBLE: from .decl cimport ld_t from .decl cimport mat_gso_mpz_d, mat_gso_mpz_ld, mat_gso_mpz_dpe, mat_gso_mpz_mpfr, vector_fp_nr_t, fp_nr_t from .decl cimport mat_gso_long_d, mat_gso_long_ld, mat_gso_long_dpe, mat_gso_long_mpfr from .decl cimport mat_gso_gso_t, mat_gso_gram_t from .fplll cimport BKZAutoAbort as BKZAutoAbort_c from .fplll cimport BKZReduction as BKZReduction_c from .fplll cimport BKZ_MAX_LOOPS, BKZ_MAX_TIME, BKZ_DUMP_GSO, BKZ_DEFAULT from .fplll cimport BKZ_VERBOSE, BKZ_NO_LLL, BKZ_BOUNDED_LLL, BKZ_GH_BND, BKZ_AUTO_ABORT from .fplll cimport BKZ_DEF_AUTO_ABORT_SCALE, BKZ_DEF_AUTO_ABORT_MAX_NO_DEC from .fplll cimport BKZ_DEF_GH_FACTOR, BKZ_DEF_MIN_SUCCESS_PROBABILITY from .fplll cimport BKZ_DEF_RERANDOMIZATION_DENSITY from .fplll cimport BKZ_SD_VARIANT, BKZ_SLD_RED from .fplll cimport MatGSOInterface as MatGSOInterface_c from .fplll cimport FP_NR, Z_NR from .fplll cimport FloatType from .fplll cimport RED_BKZ_LOOPS_LIMIT, RED_BKZ_TIME_LIMIT from .fplll cimport bkz_reduction as bkz_reduction_c from .fplll cimport dpe_t from .fplll cimport get_red_status_str from .fplll cimport ZT_MPZ from fpylll.gmp.mpz cimport mpz_t from fpylll.mpfr.mpfr cimport mpfr_t from fpylll.util cimport check_delta, check_precision, check_float_type from fpylll.util import ReductionError from .integer_matrix cimport IntegerMatrix from fpylll.config import default_strategy, default_strategy_path from .bkz_param import BKZEasyParam cdef class BKZAutoAbort: """ Utility class for aborting BKZ when slope does not improve any longer. """ def __init__(self, MatGSO M, int num_rows, int start_row=0): """ Create new auto abort object. :param M: GSO matrix :param num_rows: number of rows :param start_row: start at this row """ if M._type == mat_gso_mpz_d: self._type = mat_gso_mpz_d self._core.mpz_d = new BKZAutoAbort_c[Z_NR[mpz_t], FP_NR[d_t]]((M._core.mpz_d)[0], num_rows, start_row) elif M._type == mat_gso_long_d: self._type = mat_gso_long_d self._core.long_d = new BKZAutoAbort_c[Z_NR[long], FP_NR[d_t]]((M._core.long_d)[0], num_rows, start_row) elif M._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: self._type = mat_gso_mpz_ld self._core.mpz_ld = new BKZAutoAbort_c[Z_NR[mpz_t], FP_NR[ld_t]]((M._core.mpz_ld)[0], num_rows, start_row) elif M._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: self._type = mat_gso_long_ld self._core.long_ld = new BKZAutoAbort_c[Z_NR[long], FP_NR[longdouble]]((M._core.long_ld)[0], num_rows, start_row) ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif M._type == mat_gso_mpz_dpe: self._type = mat_gso_mpz_dpe self._core.mpz_dpe = new BKZAutoAbort_c[Z_NR[mpz_t], FP_NR[dpe_t]]((M._core.mpz_dpe)[0], num_rows, start_row) elif M._type == mat_gso_long_dpe: self._type = mat_gso_long_dpe self._core.long_dpe = new BKZAutoAbort_c[Z_NR[long], FP_NR[dpe_t]]((M._core.long_dpe)[0], num_rows, start_row) elif M._type == mat_gso_mpz_mpfr: self._type = mat_gso_mpz_mpfr self._core.mpz_mpfr = new BKZAutoAbort_c[Z_NR[mpz_t], FP_NR[mpfr_t]]((M._core.mpz_mpfr)[0], num_rows, start_row) elif M._type == mat_gso_long_mpfr: self._type = mat_gso_long_mpfr self._core.long_mpfr = new BKZAutoAbort_c[Z_NR[long], FP_NR[mpfr_t]]((M._core.long_mpfr)[0], num_rows, start_row) else: IF HAVE_QD: if M._type == mat_gso_mpz_dd: self._type = mat_gso_mpz_dd self._core.mpz_dd = new BKZAutoAbort_c[Z_NR[mpz_t], FP_NR[dd_t]]((M._core.mpz_dd)[0], num_rows, start_row) elif M._type == mat_gso_long_dd: self._type = mat_gso_long_dd self._core.long_dd = new BKZAutoAbort_c[Z_NR[long], FP_NR[dd_t]]((M._core.long_dd)[0], num_rows, start_row) elif M._type == mat_gso_mpz_qd: self._type = mat_gso_mpz_qd self._core.mpz_qd = new BKZAutoAbort_c[Z_NR[mpz_t], FP_NR[qd_t]]((M._core.mpz_qd)[0], num_rows, start_row) elif M._type == mat_gso_long_qd: self._type = mat_gso_long_qd self._core.long_qd = new BKZAutoAbort_c[Z_NR[long], FP_NR[qd_t]]((M._core.long_qd)[0], num_rows, start_row) else: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) self.M = M def test_abort(self, scale=1.0, int max_no_dec=5): """ Test if new slope fails to be smaller than `scale * old_slope` for `max_no_dec` iterations. :param scale: target decrease :param int max_no_dec: number of rounds allowed to be stuck """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.test_abort(scale, max_no_dec) elif self._type == mat_gso_long_d: return self._core.long_d.test_abort(scale, max_no_dec) elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: return self._core.mpz_ld.test_abort(scale, max_no_dec) ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: return self._core.long_ld.test_abort(scale, max_no_dec) ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.test_abort(scale, max_no_dec) elif self._type == mat_gso_long_dpe: return self._core.long_dpe.test_abort(scale, max_no_dec) elif self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.test_abort(scale, max_no_dec) elif self._type == mat_gso_long_mpfr: return self._core.long_mpfr.test_abort(scale, max_no_dec) else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.test_abort(scale, max_no_dec) elif self._type == mat_gso_long_dd: return self._core.long_dd.test_abort(scale, max_no_dec) elif self._type == mat_gso_mpz_qd: return self._core.mpz_qd.test_abort(scale, max_no_dec) elif self._type == mat_gso_long_qd: return self._core.long_qd.test_abort(scale, max_no_dec) raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) cdef class BKZReduction: def __init__(self, MatGSO M, LLLReduction lll_obj, BKZParam param): """Construct new BKZ object. :param M: GSO object :param lll_obj: LLL object called as a subroutine :param param: parameters """ self.M = M self.lll_obj = lll_obj self.param = param self._type = M._type if M._type == mat_gso_mpz_d: self._type = mat_gso_mpz_d self._core.mpz_d = new BKZReduction_c[Z_NR[mpz_t], FP_NR[double]]((self.M._core.mpz_d)[0], self.lll_obj._core.mpz_d[0], param.o[0]) elif M._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: self._type = mat_gso_mpz_ld self._core.mpz_ld = new BKZReduction_c[Z_NR[mpz_t], FP_NR[ld_t]]((self.M._core.mpz_ld)[0], self.lll_obj._core.mpz_ld[0], param.o[0]) ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif M._type == mat_gso_mpz_dpe: self._type = mat_gso_mpz_dpe self._core.mpz_dpe = new BKZReduction_c[Z_NR[mpz_t], FP_NR[dpe_t]]((self.M._core.mpz_dpe)[0], self.lll_obj._core.mpz_dpe[0], param.o[0]) elif M._type == mat_gso_mpz_mpfr: self._type = mat_gso_mpz_mpfr self._core.mpz_mpfr = new BKZReduction_c[Z_NR[mpz_t], FP_NR[mpfr_t]]((self.M._core.mpz_mpfr)[0], self.lll_obj._core.mpz_mpfr[0], param.o[0]) elif M._type == mat_gso_long_d: self._type = mat_gso_long_d self._core.long_d = new BKZReduction_c[Z_NR[long], FP_NR[double]]((self.M._core.long_d)[0], self.lll_obj._core.long_d[0], param.o[0]) elif M._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: self._type = mat_gso_long_ld self._core.long_ld = new BKZReduction_c[Z_NR[long], FP_NR[ld_t]]((self.M._core.long_ld)[0], self.lll_obj._core.long_ld[0], param.o[0]) ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif M._type == mat_gso_long_dpe: self._type = mat_gso_long_dpe self._core.long_dpe = new BKZReduction_c[Z_NR[long], FP_NR[dpe_t]]((self.M._core.long_dpe)[0], self.lll_obj._core.long_dpe[0], param.o[0]) elif M._type == mat_gso_long_mpfr: self._type = mat_gso_long_mpfr self._core.long_mpfr = new BKZReduction_c[Z_NR[long], FP_NR[mpfr_t]]((self.M._core.long_mpfr)[0], self.lll_obj._core.long_mpfr[0], param.o[0]) else: IF HAVE_QD: if M._type == mat_gso_mpz_dd: self._type = mat_gso_mpz_dd self._core.mpz_dd = new BKZReduction_c[Z_NR[mpz_t], FP_NR[dd_t]]((self.M._core.mpz_dd)[0], self.lll_obj._core.mpz_dd[0], param.o[0]) elif M._type == mat_gso_mpz_qd: self._type = mat_gso_mpz_qd self._core.mpz_qd = new BKZReduction_c[Z_NR[mpz_t], FP_NR[qd_t]]((self.M._core.mpz_qd)[0], self.lll_obj._core.mpz_qd[0], param.o[0]) elif M._type == mat_gso_long_dd: self._type = mat_gso_long_dd self._core.long_dd = new BKZReduction_c[Z_NR[long], FP_NR[dd_t]]((self.M._core.long_dd)[0], self.lll_obj._core.long_dd[0], param.o[0]) elif M._type == mat_gso_long_qd: self._type = mat_gso_long_qd self._core.long_qd = new BKZReduction_c[Z_NR[long], FP_NR[qd_t]]((self.M._core.long_qd)[0], self.lll_obj._core.long_qd[0], param.o[0]) else: raise RuntimeError("MatGSO object '%s' has no core."%M) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%M) def __dealloc__(self): if self._type == mat_gso_mpz_d: del self._core.mpz_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: del self._core.mpz_ld if self._type == mat_gso_mpz_dpe: del self._core.mpz_dpe IF HAVE_QD: if self._type == mat_gso_mpz_dd: del self._core.mpz_dd if self._type == mat_gso_mpz_qd: del self._core.mpz_qd if self._type == mat_gso_mpz_mpfr: del self._core.mpz_mpfr if self._type == mat_gso_long_d: del self._core.long_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: del self._core.long_ld if self._type == mat_gso_long_dpe: del self._core.long_dpe IF HAVE_QD: if self._type == mat_gso_long_dd: del self._core.long_dd if self._type == mat_gso_long_qd: del self._core.long_qd if self._type == mat_gso_long_mpfr: del self._core.long_mpfr def __reduce__(self): """ Make sure attempts at pickling raise an error until proper pickling is implemented. """ raise NotImplementedError def __call__(self): """ Call BKZ, SD-BKZ or slide reduction. .. note :: To enable the latter, set flags ``BKZ.SLD_RED`` or ``BKZ.SD_VARIANT`` when calling the constructor of this class. """ if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.bkz() sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.bkz() sig_off() ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.bkz() sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.bkz() sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.bkz() sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.bkz() sig_off() ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.bkz() sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.bkz() sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.bkz() sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.bkz() sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.bkz() sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.bkz() sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r) def svp_preprocessing(self, int kappa, int block_size, BKZParam param): """Preprocess before calling (Dual-)SVP oracle. :param kappa: index :param block_size: block size :param param: reduction parameters """ if kappa < 0 or kappa >= self.M.d: raise ValueError("kappa %d out of bounds (0, %d)"%(kappa, self.M.d)) if block_size < 2 or block_size > self.M.d: raise ValueError("block size %d out of bounds (2, %d)"%(block_size, self.M.d)) r = True if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() ELSE: raise RuntimeError("BKZAutoAbort object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.svp_preprocessing(kappa, block_size, param.o[0]) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r) def svp_postprocessing(self, int kappa, int block_size, tuple solution): """Insert solution into basis after SVP oracle call :param kappa: index :param block_size: block size :param solution: solution to insert """ cdef vector_fp_nr_t solution_ cdef fp_nr_t t if kappa < 0 or kappa >= self.M.d: raise ValueError("kappa %d out of bounds (0, %d)"%(kappa, self.M.d)) if block_size < 2 or block_size > self.M.d: raise ValueError("block size %d out of bounds (2, %d)"%(block_size, self.M.d)) r = True if self._type == mat_gso_mpz_d: for s in solution: t.d = float(s) solution_.d.push_back(t.d) sig_on() r = self._core.mpz_d.svp_postprocessing(kappa, block_size, solution_.d) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: for s in solution: t.ld = float(s) solution_.ld.push_back(t.ld) sig_on() r = self._core.mpz_ld.svp_postprocessing(kappa, block_size, solution_.ld) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: for s in solution: t.dpe = float(s) solution_.dpe.push_back(t.dpe) sig_on() r = self._core.mpz_dpe.svp_postprocessing(kappa, block_size, solution_.dpe) sig_off() elif self._type == mat_gso_mpz_mpfr: for s in solution: t.mpfr = float(s) solution_.mpfr.push_back(t.mpfr) sig_on() r= self._core.mpz_mpfr.svp_postprocessing(kappa, block_size, solution_.mpfr) sig_off() elif self._type == mat_gso_long_d: for s in solution: t.d = float(s) solution_.d.push_back(t.d) sig_on() r = self._core.long_d.svp_postprocessing(kappa, block_size, solution_.d) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: for s in solution: t.ld = float(s) solution_.ld.push_back(t.ld) sig_on() r = self._core.long_ld.svp_postprocessing(kappa, block_size, solution_.ld) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: for s in solution: t.dpe = float(s) solution_.dpe.push_back(t.dpe) sig_on() r = self._core.long_dpe.svp_postprocessing(kappa, block_size, solution_.dpe) sig_off() elif self._type == mat_gso_long_mpfr: for s in solution: t.mpfr = float(s) solution_.mpfr.push_back(t.mpfr) sig_on() r= self._core.long_mpfr.svp_postprocessing(kappa, block_size, solution_.mpfr) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: for s in solution: t.dd = float(s) solution_.dd.push_back(t.dd) sig_on() r = self._core.mpz_dd.svp_postprocessing(kappa, block_size, solution_.dd) sig_off() elif self._type == mat_gso_mpz_qd: for s in solution: t.qd = float(s) solution_.qd.push_back(t.qd) sig_on() r = self._core.mpz_qd.svp_postprocessing(kappa, block_size, solution_.qd) sig_off() elif self._type == mat_gso_long_dd: for s in solution: t.dd = float(s) solution_.dd.push_back(t.dd) sig_on() r = self._core.long_dd.svp_postprocessing(kappa, block_size, solution_.dd) sig_off() elif self._type == mat_gso_long_qd: for s in solution: t.qd = float(s) solution_.qd.push_back(t.qd) sig_on() r = self._core.long_qd.svp_postprocessing(kappa, block_size, solution_.qd) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r) def svp_reduction(self, int kappa, int block_size, BKZParam param, dual=False): """Run (Dual-)SVP reduction (incl. pre and postprocessing) :param kappa: index :param block_size: block size :param param: reduction parameters :param dual: dual or primal reduction """ if kappa < 0 or kappa >= self.M.d: raise ValueError("kappa %d out of bounds (0, %d)"%(kappa, self.M.d)) if block_size < 2 or block_size > self.M.d: raise ValueError("block size %d out of bounds (2, %d)"%(block_size, self.M.d)) r = True if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.svp_reduction(kappa, block_size, param.o[0], int(dual)) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.svp_reduction(kappa, block_size, param.o[0], int(dual)) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.svp_reduction(kappa, block_size, param.o[0], dual) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r) def tour(self, int loop, BKZParam param, int min_row, int max_row): """One BKZ tour. :param loop: loop index :param param: reduction parameters :param min_row: start row :param max_row: maximum row to consider (exclusive) :returns: tuple ``(clean, max_kappa)`` where ``clean == True`` if no changes were made, and ``max_kappa`` is the maximum index for which no changes were made. """ if min_row < 0 or min_row >= self.M.d: raise ValueError("min row %d out of bounds (0, %d)"%(min_row, self.M.d)) if max_row < min_row or max_row > self.M.d: raise ValueError("max row %d out of bounds (%d, %d)"%(max_row, min_row, self.M.d)) r = True cdef int kappa_max = 0 if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.tour(loop, kappa_max, param.o[0], min_row, max_row) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r), kappa_max def sd_tour(self, int loop, BKZParam param, int min_row, int max_row): """One Dual-BKZ tour. :param loop: loop index :param param: reduction parameters :param min_row: start row :param max_row: maximum row to consider (exclusive) :returns: ``True`` if no changes were made, ``False`` otherwise. """ if min_row < 0 or min_row >= self.M.d: raise ValueError("min row %d out of bounds (0, %d)"%(min_row, self.M.d)) if max_row < min_row or max_row > self.M.d: raise ValueError("max row %d out of bounds (%d, %d)"%(max_row, min_row, self.M.d)) r = True if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.sd_tour(loop, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.sd_tour(loop, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.sd_tour(loop, param.o[0], min_row, max_row) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.sd_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.sd_tour(loop, param.o[0], min_row, max_row) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r) def slide_tour(self, int loop, BKZParam param, int min_row, int max_row): """One slide reduction tour. :param loop: loop index :param param: reduction parameters :param min_row: start row :param max_row: maximum row to consider (exclusive) :returns: ``True`` if no changes were made, ``False`` otherwise. .. note :: You must run ``lll_obj()`` before calling this function, otherwise this function will produce an error. """ if min_row < 0 or min_row >= self.M.d: raise ValueError("min row %d out of bounds (0, %d)"%(min_row, self.M.d)) if max_row < min_row or max_row > self.M.d: raise ValueError("max row %d out of bounds (%d, %d)"%(max_row, min_row, self.M.d)) r = True if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.slide_tour(loop, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.slide_tour(loop, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.slide_tour(loop, param.o[0], min_row, max_row) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.slide_tour(loop, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.slide_tour(loop, param.o[0], min_row, max_row) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r) def hkz(self, BKZParam param, int min_row, int max_row): """HKZ reduction between ``min_row`` and ``max_row``. :param param: reduction parameters :param min_row: start row :param max_row: maximum row to consider (exclusive) :returns: ``True`` if no changes were made, ``False`` otherwise. """ if min_row < 0 or min_row >= self.M.d: raise ValueError("min row %d out of bounds (0, %d)"%(min_row, self.M.d)) if max_row < min_row or max_row > self.M.d: raise ValueError("max row %d out of bounds (%d, %d)"%(max_row, min_row, self.M.d)) r = True cdef int kappa_max = 0 if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r= self._core.mpz_mpfr.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r= self._core.long_mpfr.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.hkz(kappa_max, param.o[0], min_row, max_row) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) return bool(r), kappa_max def rerandomize_block(self, int min_row, int max_row, int density): """Rerandomize block between ``min_row`` and ``max_row`` with a transform of ``density`` :param min_row: :param max_row: :param density: """ if self._type == mat_gso_mpz_d: sig_on() self._core.mpz_d.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() self._core.mpz_ld.rerandomize_block(min_row, max_row, density) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() self._core.mpz_dpe.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() self._core.mpz_mpfr.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_long_d: sig_on() self._core.long_d.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() self._core.long_ld.rerandomize_block(min_row, max_row, density) sig_off() ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() self._core.long_dpe.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() self._core.long_mpfr.rerandomize_block(min_row, max_row, density) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() self._core.mpz_dd.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() self._core.mpz_qd.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_long_dd: sig_on() self._core.long_dd.rerandomize_block(min_row, max_row, density) sig_off() elif self._type == mat_gso_long_qd: sig_on() self._core.long_qd.rerandomize_block(min_row, max_row, density) sig_off() else: raise RuntimeError("BKZReduction object '%s' has no core."%self) @property def status(self): """ Status of this reduction. """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.status elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: return self._core.mpz_ld.status ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.status elif self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.status elif self._type == mat_gso_long_d: return self._core.long_d.status elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: return self._core.long_ld.status ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: return self._core.long_dpe.status elif self._type == mat_gso_long_mpfr: return self._core.long_mpfr.status else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.status elif self._type == mat_gso_mpz_qd: return self._core.mpz_qd.status elif self._type == mat_gso_long_dd: return self._core.long_dd.status elif self._type == mat_gso_long_qd: return self._core.long_qd.status else: raise RuntimeError("BKZReduction object '%s' has no core."%self) @property def nodes(self): """ Total number of enumeration nodes visited during this reduction. """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.nodes elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: return self._core.mpz_ld.nodes ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.nodes elif self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.nodes elif self._type == mat_gso_long_d: return self._core.long_d.nodes elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: return self._core.long_ld.nodes ELSE: raise RuntimeError("BKZReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: return self._core.long_dpe.nodes elif self._type == mat_gso_long_mpfr: return self._core.long_mpfr.nodes else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.nodes elif self._type == mat_gso_mpz_qd: return self._core.mpz_qd.nodes elif self._type == mat_gso_long_dd: return self._core.long_dd.nodes elif self._type == mat_gso_long_qd: return self._core.long_qd.nodes else: raise RuntimeError("BKZReduction object '%s' has no core."%self) def bkz_reduction(IntegerMatrix B, BKZParam o, float_type=None, int precision=0): """ Run BKZ reduction. :param IntegerMatrix B: Integer matrix, modified in place. :param BKZParam o: BKZ parameters :param float_type: either ``None``: for automatic choice or an entry of `fpylll.config.float_types` :param precision: bit precision to use if ``float_type`` is ``'mpfr'`` :returns: modified matrix ``B`` """ check_precision(precision) cdef FloatType float_type_ = check_float_type(float_type) cdef int r = 0 if B._type != ZT_MPZ: raise NotImplementedError("C++ BKZ is not implemented over longs, try the Python version.") with nogil: sig_on() r = bkz_reduction_c(B._core.mpz, NULL, o.o[0], float_type_, precision) sig_off() if r and r not in (RED_BKZ_LOOPS_LIMIT, RED_BKZ_TIME_LIMIT): raise ReductionError( str(get_red_status_str(r)) ) return B class BKZ: DEFAULT = BKZ_DEFAULT VERBOSE = BKZ_VERBOSE NO_LLL = BKZ_NO_LLL BOUNDED_LLL = BKZ_BOUNDED_LLL GH_BND = BKZ_GH_BND AUTO_ABORT = BKZ_AUTO_ABORT MAX_LOOPS = BKZ_MAX_LOOPS MAX_TIME = BKZ_MAX_TIME DUMP_GSO = BKZ_DUMP_GSO SD_VARIANT = BKZ_SD_VARIANT SLD_RED = BKZ_SLD_RED Param = BKZParam EasyParam = staticmethod(BKZEasyParam) AutoAbort = BKZAutoAbort reduction = staticmethod(bkz_reduction) Reduction = BKZReduction DEFAULT_AUTO_ABORT_SCALE = BKZ_DEF_AUTO_ABORT_SCALE DEFAULT_AUTO_ABORT_MAX_NO_DEC = BKZ_DEF_AUTO_ABORT_MAX_NO_DEC DEFAULT_GH_FACTOR = BKZ_DEF_GH_FACTOR DEFAULT_MIN_SUCCESS_PROBABILITY = BKZ_DEF_MIN_SUCCESS_PROBABILITY DEFAULT_RERANDOMIZATION_DENSITY = BKZ_DEF_RERANDOMIZATION_DENSITY DEFAULT_STRATEGY = default_strategy DEFAULT_STRATEGY_PATH = default_strategy_path fpylll-0.6.1/src/fpylll/fplll/bkz_param.pxd000066400000000000000000000013251455321202600207060ustar00rootroot00000000000000# -*- coding: utf-8 -*- from libcpp.vector cimport vector from .decl cimport bkz_auto_abort_core_t, fplll_mat_gso_data_type_t from .gso cimport MatGSO from .fplll cimport BKZParam as BKZParam_c from .fplll cimport PruningParams as PruningParams_c from .fplll cimport Strategy as Strategy_c from .fplll cimport PrunerMetric cdef class Strategy: cdef Strategy_c _core cdef dict aux @staticmethod cdef Strategy from_cxx(Strategy_c & s) @staticmethod cdef to_cxx(Strategy_c& self, Strategy s) cdef class BKZParam: # BKZParam_c doesn't actually store strategies, store them here cdef vector[Strategy_c] strategies_c cdef BKZParam_c *o cdef readonly tuple strategies cdef dict aux fpylll-0.6.1/src/fpylll/fplll/bkz_param.pyx000066400000000000000000000506751455321202600207470ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Parameters for Block Korkine Zolotarev algorithm. .. moduleauthor:: Martin R. Albrecht """ include "fpylll/config.pxi" from cysignals.signals cimport sig_on, sig_off from .fplll cimport BKZParam as BKZParam_c from .fplll cimport BKZ_MAX_LOOPS, BKZ_MAX_TIME, BKZ_DUMP_GSO, BKZ_DEFAULT from .fplll cimport BKZ_VERBOSE, BKZ_NO_LLL, BKZ_BOUNDED_LLL, BKZ_GH_BND, BKZ_AUTO_ABORT from .fplll cimport BKZ_DEF_AUTO_ABORT_SCALE, BKZ_DEF_AUTO_ABORT_MAX_NO_DEC from .fplll cimport BKZ_DEF_GH_FACTOR, BKZ_DEF_MIN_SUCCESS_PROBABILITY from .fplll cimport BKZ_DEF_RERANDOMIZATION_DENSITY from .fplll cimport PRUNER_METRIC_PROBABILITY_OF_SHORTEST from .fplll cimport PRUNER_METRIC_EXPECTED_SOLUTIONS from .fplll cimport LLL_DEF_DELTA from .fplll cimport PruningParams as PruningParams_c from .fplll cimport Strategy as Strategy_c from .fplll cimport load_strategies_json as load_strategies_json_c from .fplll cimport strategy_full_path from fpylll.util cimport check_delta, check_pruner_metric from cython.operator cimport dereference as deref, preincrement as inc from fpylll.config import default_strategy_path, default_strategy from .pruner cimport PruningParams from collections import OrderedDict from os import path, environ import json cdef class Strategy: """ A strategy is a collection of pruning coefficients for a variety of radii and preprocessing block sizes. """ def __init__(self, block_size, preprocessing_block_sizes=tuple(), pruning_parameters=tuple(), **kwds): """ :param block_size: block size of this strategy :param preprocessing_block_sizes: preprocessing block sizes :param pruning_parameters: a list of pruning parameters All other keyword arguments are stored as auxiliary parameters in the ``aux`` attribute. """ if block_size < 0: raise ValueError("Block size must be ≥ 0") self._core.block_size = block_size for p in pruning_parameters: if not isinstance(p, PruningParams): p = PruningParams(*p) self._core.pruning_parameters.push_back((p)._core) if len(pruning_parameters) == 0: p = PruningParams(1.0, [1.0 for _ in range(self.block_size)], 1.0) self._core.pruning_parameters.push_back((p)._core) for p in preprocessing_block_sizes: if p<=2: raise ValueError("Preprocessing block_size must be > 2, got %s", p) if p >= block_size: raise ValueError("Preprocessing block_size must be < block size, got %s", p) self._core.preprocessing_block_sizes.push_back(p) self.aux = {} for k,v in kwds.items(): self.aux[k] = v def get_pruning(self, radius, gh): """ :param radius: target radius :param gh: gaussian heuristic radius """ gh_factor = radius/gh closest_dist = 2**80 best = None for pruning in self.pruning_parameters: if abs(pruning.gh_factor - gh_factor) < closest_dist: best = pruning closest_dist = abs(pruning.gh_factor - gh_factor) assert(best is not None) return best def dict(self): """ >>> from fpylll import load_strategies_json, BKZ >>> print(load_strategies_json(BKZ.DEFAULT_STRATEGY)[50].dict()) # doctest: +ELLIPSIS OrderedDict... >>> print(load_strategies_json(BKZ.DEFAULT_STRATEGY)[50]) Strategy< 50, (26), 0.52-0.55, {}> """ d = OrderedDict() d["block_size"] = self.block_size d["preprocessing_block_sizes"] = self.preprocessing_block_sizes d["pruning_parameters"] = tuple([(p.gh_factor, p.coefficients, p.expectation, p.metric, p.detailed_cost) for p in self.pruning_parameters]) for k,v in self.aux.items(): d[k] = v return d def __getitem__(self, what): """ >>> from fpylll.fplll.bkz_param import Strategy >>> s = Strategy(20, [10], [], foo=True) >>> s["foo"] True """ return self.aux[what] def __str__(self): preproc = ",".join([str(p) for p in self.preprocessing_block_sizes]) pruning = [p.expectation for p in self.pruning_parameters] if pruning: pruning = min(pruning), max(pruning) else: pruning = 1.0, 1.0 return "Strategy<%3d, (%s), %4.2f-%4.2f, %s>"%(self.block_size, preproc, pruning[0], pruning[1], self.aux) def __reduce__(self): """ >>> from fpylll import Pruning >>> from fpylll.fplll.bkz_param import Strategy >>> import pickle >>> p = Pruning.PruningParams(1.0, [1.0, 0.75, 0.5, 0.25], 0.5) >>> print(pickle.loads(pickle.dumps(Strategy(20, [10], [p])))) Strategy< 20, (10), 0.50-0.50, {}> """ return unpickle_Strategy, (self.__class__, tuple(self.dict().items())) @staticmethod cdef Strategy from_cxx(Strategy_c& s): cdef Strategy self = Strategy(0) self._core = s return self @staticmethod cdef to_cxx(Strategy_c& self, Strategy s): for p in s.pruning_parameters: self.pruning_parameters.push_back((p)._core) for p in s.preprocessing_block_sizes: self.preprocessing_block_sizes.push_back(p) self.block_size = s.block_size @property def block_size(self): """ >>> from fpylll import Pruning >>> from fpylll.fplll.bkz_param import Strategy >>> s = Strategy(20, [10], [Pruning.PruningParams(1.0, [1.0, 0.75, 0.5, 0.25], 0.5)]) >>> s.block_size 20 """ return self._core.block_size @property def preprocessing_block_sizes(self): """ >>> from fpylll import Pruning >>> from fpylll.fplll.bkz_param import Strategy >>> s = Strategy(20, [10], [Pruning.PruningParams(1.0, [1.0, 0.75, 0.5, 0.25], 0.5)]) >>> s.preprocessing_block_sizes (10,) """ cdef list preprocessing_block_sizes = [] cdef vector[size_t].iterator it = self._core.preprocessing_block_sizes.begin() while it != self._core.preprocessing_block_sizes.end(): preprocessing_block_sizes.append(deref(it)) inc(it) return tuple(preprocessing_block_sizes) @property def pruning_parameters(self): """ >>> from fpylll import Pruning >>> from fpylll.fplll.bkz_param import Strategy >>> s = Strategy(20, [10], [Pruning.PruningParams(1.0, [1.0, 0.75, 0.5, 0.25], 0.5)]) >>> print(s.pruning_parameters[0]) PruningParams<1.000000, (1.00,...,0.25), 0.5000> """ cdef list pruning_parameters = [] cdef vector[PruningParams_c].iterator it = self._core.pruning_parameters.begin() while it != self._core.pruning_parameters.end(): pruning_parameters.append(PruningParams.from_cxx(deref(it))) inc(it) return tuple(pruning_parameters) cdef strategies_c_to_strategies(vector[Strategy_c]& strategies): """ Convert C++ strategy vector to Python strategy list """ cdef vector[Strategy_c].iterator it = strategies.begin() ret = [] while it != strategies.end(): ret.append(Strategy.from_cxx(deref(it))) inc(it) return tuple(ret) cdef _load_strategies_aux_json(strategies, filename): """ Load auxiliary data into strategies. We call ``load_strategies_json_c`` ignores all attributes not known at the C++ level. Thus, we read the file twice to load those auxiliary attributes. It's a bit hamfisted but it works and allows us to avoid reimplementing ``load_strategies_json_c`` here. :: >>> import tempfile >>> from fpylll.fplll.bkz_param import Strategy, load_strategies_json, dump_strategies_json >>> fh, fn = tempfile.mkstemp(suffix=".json") >>> s = [Strategy(0, [], [], foo=True)] >>> dump_strategies_json(fn, s) >>> s = load_strategies_json(fn) >>> s[0]["foo"] True """ with open(filename, "r") as fh: json_ = json.load(fh) for i in range(len(strategies)): if not isinstance(strategies[i], Strategy): raise TypeError("{i}-th input {strategy} is not a Strategy".format(i=i, strategy=strategies[i])) for k,v in json_[i].items(): if not hasattr(strategies[i], k): (strategies[i]).aux[k] = v return strategies def load_strategies_json(filename): """ Load strategies from ``filename``. :: >>> from fpylll import load_strategies_json, BKZ >>> strategies = load_strategies_json(BKZ.DEFAULT_STRATEGY) >>> strategies[80].preprocessing_block_sizes (56,) >>> strategies[80].pruning_parameters[0].expectation 0.14362574329237188 """ # It turns out that on some platforms (e.g pre-compiled Sagemath) the BKZ.DEFAULT_STRATEGY # path gets a bit broken. If this happens there's a C++ exception thrown from fplll, which we # catch. If it just so happens that we fail, we read the Sagemath environment variables and try # to navigate to the default file relative to that directory. if not path.exists(filename) and filename == "default.json": if 'SAGE_LOCAL' in environ: # NOTE: some people are quite angry about this: https://github.com/fplll/fpylll/issues/221 filename = path.join(environ['SAGE_LOCAL'], 'fplll', 'strategies', 'default.json') else: filename = path.join(default_strategy_path, filename) if not path.exists(filename): raise FileNotFoundError("File '%s' not found."%filename) if isinstance(filename, bytes): filename = filename.decode("UTF-8") if isinstance(filename, (str, unicode)): filename = filename.encode('UTF-8') cdef vector[Strategy_c] strategies sig_on() strategies = load_strategies_json_c(filename) sig_off() strategies_ = strategies_c_to_strategies(strategies) strategies_ = _load_strategies_aux_json(strategies_, filename) return strategies_ def dump_strategies_json(filename, strategies): with open(filename, "w") as fh: json.dump([strategy.dict() for strategy in strategies], fh, indent=4, sort_keys=True) cdef load_strategies_python(vector[Strategy_c]& out, inp): for strategy in inp: if isinstance(strategy, OrderedDict): strategy = Strategy(**strategy) if not isinstance(strategy, Strategy): raise TypeError("Type '%s' of '%s' not supported."%(type(strategy), strategy)) out.push_back((strategy)._core) cdef class BKZParam: """ Parameters for the BKZ algorithm. """ def __init__(self, int block_size, strategies=None, float delta=LLL_DEF_DELTA, int flags=BKZ_DEFAULT, int max_loops=0, int max_time=0, auto_abort=None, gh_factor=None, float min_success_probability=BKZ_DEF_MIN_SUCCESS_PROBABILITY, int rerandomization_density=BKZ_DEF_RERANDOMIZATION_DENSITY, dump_gso_filename=None, **kwds): """ Create BKZ parameters object. :param block_size: an integer from 1 to ``nrows`` :param strategies: a filename or a list of Strategies :param delta: LLL parameter `0.25 < δ < 1.0` :param flags: flags :param max_loops: maximum number of full loops :param max_time: stop after time seconds (up to loop completion) :param auto_abort: heuristic, stop when the average slope of `\log(||b_i^*||)` does not decrease fast enough. If a tuple is given it is parsed as ``(scale, max_iter)`` such that the algorithm will terminate if for ``max_iter`` loops the slope is not smaller than ``scale * old_slope`` where ``old_slope`` was the old minimum. If ``True`` is given, this is equivalent to providing ``(1.0,5)`` which is fpLLL's default. :param gh_factor: heuristic, if set then the enumeration bound will be set to ``gh_factor`` times the Gaussian Heuristic. If ``True`` then ``gh_factor`` is set to 1.1, which is fpLLL's default. :param min_success_probability: minimum success probability in an SVP reduction (when using pruning) :param rerandomization_density: density of rerandomization operation when using extreme pruning :param dump_gso_filename: if this is not ``None`` then the logs of the norms of the Gram-Schmidt vectors are written to this file after each BKZ loop. All other keyword arguments are stored as auxiliary parameters in the ``aux`` attribute. """ # if the user sets these, they want the appropriate flags to be set if max_loops > 0: flags |= BKZ_MAX_LOOPS if max_time > 0: flags |= BKZ_MAX_TIME if gh_factor is not None: flags |= BKZ_GH_BND if gh_factor in (True, False, None): gh_factor = BKZ_DEF_GH_FACTOR if block_size <= 0: raise ValueError("block size must be > 0") if max_loops < 0: raise ValueError("maximum number of loops must be >= 0") if max_time < 0: raise ValueError("maximum time must be >= 0") if gh_factor <= 0: raise ValueError("GH factor must be > 0") check_delta(delta) if strategies: if isinstance(strategies, bytes): strategies = strategies.decode("UTF-8") if isinstance(strategies, (str, unicode)): strategies = strategies.encode('UTF-8') sig_on() self.strategies_c = load_strategies_json_c(strategy_full_path(strategies)) self.strategies = strategies_c_to_strategies(self.strategies_c) sig_off() else: load_strategies_python(self.strategies_c, strategies) if all(isinstance(x, Strategy) for x in strategies): self.strategies = tuple(strategies) elif all(isinstance(x, OrderedDict) for x in strategies): self.strategies = tuple([Strategy(**strategy) for strategy in strategies]) else: raise TypeError("Entry type of strategies must be Strategy or OrderedDict") cdef BKZParam_c *o = new BKZParam_c(block_size, self.strategies_c, delta) if not strategies: self.strategies = strategies_c_to_strategies(o.strategies) o.flags = flags o.gh_factor = float(gh_factor) if auto_abort is True: o.flags |= BKZ_AUTO_ABORT if o.flags & BKZ_AUTO_ABORT: if auto_abort in (True, None): pass else: try: a_scale, a_max = auto_abort o.auto_abort_scale = a_scale o.auto_abort_max_no_dec = a_max except TypeError: del o raise ValueError("Parameter auto_abort (%s) not understood."%auto_abort) if o.flags & BKZ_MAX_LOOPS: o.max_loops = max_loops if o.flags & BKZ_MAX_TIME: o.max_time = max_time if dump_gso_filename is not None: o.flags |= BKZ_DUMP_GSO if o.flags & BKZ_DUMP_GSO and dump_gso_filename is not None: o.dump_gso_filename = dump_gso_filename o.min_success_probability = min_success_probability o.rerandomization_density = rerandomization_density self.o = o self.aux = {} for k,v in kwds.iteritems(): self.aux[k] = v def __dealloc__(self): del self.o def __repr__(self): return "" % ( self.o.block_size, self.o.flags, hex(id(self))) def __reduce__(self): return unpickle_BKZParam, tuple(self.dict().items()) def __str__(self): cdef BKZParam param = self r = str(param.dict(all=False)) return r @property def block_size(self): return self.o.block_size @property def delta(self): return self.o.delta @property def flags(self): return self.o.flags @property def max_loops(self): return self.o.max_loops @property def max_time(self): return self.o.max_time @property def auto_abort(self): a_scale = self.o.auto_abort_scale a_max = self.o.auto_abort_max_no_dec return (a_scale, a_max) @property def gh_factor(self): return self.o.gh_factor @property def dump_gso_filename(self): return self.o.dump_gso_filename @property def min_success_probability(self): return self.o.min_success_probability @property def rerandomization_density(self): return self.o.rerandomization_density def __getitem__(self, what): """ >>> from fpylll import BKZ >>> p = BKZ.Param(40, max_loops=4, foo=True) >>> p["foo"] True """ return self.aux[what] def __setitem__(self, what, value): """ >>> from fpylll import BKZ >>> p = BKZ.Param(40, max_loops=4, foo=True) >>> p["foo"] = False >>> p["foo"] False """ if not isinstance(what, str): raise TypeError("Only strings are supported as auxiliary keys but got %s"%type(what)) if hasattr(self, what): raise ValueError("Provided key '%s' would shadow class attribute"%what) self.aux[what] = value def dict(self, all=True): """ >>> from fpylll import BKZ >>> d = BKZ.Param(40, max_loops=4, flags=BKZ.MAX_LOOPS).dict(False) >>> d["block_size"] 40 >>> d["max_loops"] 4 >>> d.get("delta", False) False """ d = {} d["block_size"] = self.block_size if all or abs(self.delta - LLL_DEF_DELTA) > 0.001: d["delta"] = self.delta d["flags"] = self.flags if all or self.max_loops != 0: d["max_loops"] = self.max_loops if all or self.max_time != 0: d["max_time"] = self.max_time if self.o.flags & BKZ_AUTO_ABORT: d["auto_abort"] = self.auto_abort if self.o.flags & BKZ_GH_BND: d["gh_factor"] = self.gh_factor if self.o.flags & BKZ_DUMP_GSO: d["dump_gso_filename"] = self.dump_gso_filename if all or self.min_success_probability != BKZ_DEF_MIN_SUCCESS_PROBABILITY: d["min_success_probability"] = self.min_success_probability if all or self.rerandomization_density != BKZ_DEF_RERANDOMIZATION_DENSITY: d["rerandomization_density"] = self.rerandomization_density if all: d["strategies"] = [strategy.dict() for strategy in self.strategies[:self.block_size+1]] if all: for k,v in self.aux.iteritems(): d[k] = v return d def new(self, **kwds): d = self.dict() d.update(kwds) return BKZParam(**d) def unpickle_BKZParam(*args): """ Deserialize this set of BKZ parameters. >>> from fpylll import BKZ >>> import pickle >>> pickle.loads(pickle.dumps(BKZ.Param(10, flags=BKZ.VERBOSE))) # doctest: +ELLIPSIS """ kwds = dict(args) return BKZParam(**kwds) def unpickle_Strategy(*args): """ """ cls, args = args kwds = dict(args) return cls(**kwds) def BKZEasyParam(block_size, **kwds): """ Set sane defaults for most use-cases: 1. set default strategies 2. switch on auto abort Additional parameters can be passed. When flags are passed, they are XORed with the default flags. :param block_size: BKZ block size """ easy_kwds = {} easy_kwds["block_size"] = block_size easy_kwds["strategies"] = default_strategy easy_kwds["flags"] = BKZ_DEFAULT|BKZ_AUTO_ABORT for k,v in kwds.iteritems(): if k == "flags": easy_kwds[k] ^= v else: easy_kwds[k] = v return BKZParam(**easy_kwds) fpylll-0.6.1/src/fpylll/fplll/decl.pxd000066400000000000000000000446211455321202600176550ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Fpylll datatypes These are mainly for internal consumption """ include "fpylll/config.pxi" from fpylll.gmp.mpz cimport mpz_t from fpylll.mpfr.mpfr cimport mpfr_t IF HAVE_QD: from fpylll.qd.qd cimport dd_real, qd_real from .fplll cimport dpe_t from .fplll cimport Z_NR, FP_NR from .fplll cimport ZZ_mat, MatGSOInterface, LLLReduction, BKZAutoAbort, BKZReduction, Enumeration from .fplll cimport Evaluator, FastEvaluator, ErrorBoundedEvaluator, FastErrorBoundedEvaluator, Pruner from libcpp.vector cimport vector ctypedef double d_t IF HAVE_LONG_DOUBLE: ctypedef long double ld_t IF HAVE_QD: ctypedef dd_real dd_t ctypedef qd_real qd_t ctypedef enum fplll_mat_gso_alg_type_t: mat_gso_gso_t = 0 mat_gso_gram_t = 1 IF HAVE_QD: ctypedef enum fplll_mat_gso_data_type_t: mat_gso_mpz_d = 1 mat_gso_mpz_ld = 2 mat_gso_mpz_dpe = 4 mat_gso_mpz_dd = 8 mat_gso_mpz_qd = 16 mat_gso_mpz_mpfr = 32 mat_gso_long_d = 64 mat_gso_long_ld = 128 mat_gso_long_dpe = 256 mat_gso_long_dd = 512 mat_gso_long_qd = 1024 mat_gso_long_mpfr = 2048 ELSE: ctypedef enum fplll_mat_gso_data_type_t: mat_gso_mpz_d = 1 mat_gso_mpz_ld = 2 mat_gso_mpz_dpe = 4 mat_gso_mpz_mpfr = 32 mat_gso_long_d = 64 mat_gso_long_ld = 128 mat_gso_long_dpe = 256 mat_gso_long_mpfr = 2048 IF HAVE_QD: ctypedef enum fplll_nr_type_t: nr_d = 1 nr_ld = 2 nr_dpe = 4 nr_dd = 8 nr_qd = 16 nr_mpfr = 32 ELSE: ctypedef enum fplll_nr_type_t: nr_d = 1 nr_ld = 2 nr_dpe = 4 nr_mpfr = 32 ctypedef enum fplll_z_type_t: z_long = 1 z_mpz = 2 ctypedef union zz_mat_core_t: ZZ_mat[long] *long ZZ_mat[mpz_t] *mpz ctypedef struct z_nr_t: Z_NR[long] long Z_NR[mpz_t] mpz IF HAVE_LONG_DOUBLE: IF HAVE_QD: # we cannot use a union because of non-trivial constructors ctypedef struct fp_nr_t: FP_NR[d_t] d FP_NR[ld_t] ld FP_NR[dpe_t] dpe FP_NR[dd_t] dd FP_NR[qd_t] qd FP_NR[mpfr_t] mpfr ELSE: ctypedef struct fp_nr_t: FP_NR[d_t] d FP_NR[ld_t] ld FP_NR[dpe_t] dpe FP_NR[mpfr_t] mpfr IF HAVE_QD: ctypedef union mat_gso_core_t: MatGSOInterface[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d MatGSOInterface[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld MatGSOInterface[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe MatGSOInterface[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd MatGSOInterface[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd MatGSOInterface[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr MatGSOInterface[Z_NR[long], FP_NR[d_t]] *long_d MatGSOInterface[Z_NR[long], FP_NR[ld_t]] *long_ld MatGSOInterface[Z_NR[long], FP_NR[dpe_t]] *long_dpe MatGSOInterface[Z_NR[long], FP_NR[dd_t]] *long_dd MatGSOInterface[Z_NR[long], FP_NR[qd_t]] *long_qd MatGSOInterface[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union mat_gso_core_t: MatGSOInterface[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d MatGSOInterface[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld MatGSOInterface[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe MatGSOInterface[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr MatGSOInterface[Z_NR[long], FP_NR[d_t]] *long_d MatGSOInterface[Z_NR[long], FP_NR[ld_t]] *long_ld MatGSOInterface[Z_NR[long], FP_NR[dpe_t]] *long_dpe MatGSOInterface[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union lll_reduction_core_t: LLLReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d LLLReduction[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld LLLReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe LLLReduction[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd LLLReduction[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd LLLReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr LLLReduction[Z_NR[long], FP_NR[d_t]] *long_d LLLReduction[Z_NR[long], FP_NR[ld_t]] *long_ld LLLReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe LLLReduction[Z_NR[long], FP_NR[dd_t]] *long_dd LLLReduction[Z_NR[long], FP_NR[qd_t]] *long_qd LLLReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union lll_reduction_core_t: LLLReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d LLLReduction[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld LLLReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe LLLReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr LLLReduction[Z_NR[long], FP_NR[d_t]] *long_d LLLReduction[Z_NR[long], FP_NR[ld_t]] *long_ld LLLReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe LLLReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union bkz_auto_abort_core_t: BKZAutoAbort[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZAutoAbort[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld BKZAutoAbort[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZAutoAbort[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd BKZAutoAbort[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd BKZAutoAbort[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZAutoAbort[Z_NR[long], FP_NR[d_t]] *long_d BKZAutoAbort[Z_NR[long], FP_NR[ld_t]] *long_ld BKZAutoAbort[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZAutoAbort[Z_NR[long], FP_NR[dd_t]] *long_dd BKZAutoAbort[Z_NR[long], FP_NR[qd_t]] *long_qd BKZAutoAbort[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union bkz_auto_abort_core_t: BKZAutoAbort[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZAutoAbort[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld BKZAutoAbort[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZAutoAbort[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZAutoAbort[Z_NR[long], FP_NR[d_t]] *long_d BKZAutoAbort[Z_NR[long], FP_NR[ld_t]] *long_ld BKZAutoAbort[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZAutoAbort[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union bkz_reduction_core_t: BKZReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZReduction[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld BKZReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZReduction[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd BKZReduction[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd BKZReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZReduction[Z_NR[long], FP_NR[d_t]] *long_d BKZReduction[Z_NR[long], FP_NR[ld_t]] *long_ld BKZReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZReduction[Z_NR[long], FP_NR[dd_t]] *long_dd BKZReduction[Z_NR[long], FP_NR[qd_t]] *long_qd BKZReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union bkz_reduction_core_t: BKZReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZReduction[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld BKZReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZReduction[Z_NR[long], FP_NR[d_t]] *long_d BKZReduction[Z_NR[long], FP_NR[ld_t]] *long_ld BKZReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union fast_evaluator_core_t: FastEvaluator[FP_NR[d_t]] *d FastEvaluator[FP_NR[ld_t]] *ld FastEvaluator[FP_NR[dpe_t]] *dpe FastEvaluator[FP_NR[dd_t]] *dd FastEvaluator[FP_NR[qd_t]] *qd FastErrorBoundedEvaluator *mpfr ELSE: ctypedef union fast_evaluator_core_t: FastEvaluator[FP_NR[d_t]] *d FastEvaluator[FP_NR[ld_t]] *ld FastEvaluator[FP_NR[dpe_t]] *dpe FastErrorBoundedEvaluator *mpfr IF HAVE_QD: ctypedef union evaluator_core_t: Evaluator[FP_NR[d_t]] *d Evaluator[FP_NR[ld_t]] *ld Evaluator[FP_NR[dpe_t]] *dpe Evaluator[FP_NR[dd_t]] *dd Evaluator[FP_NR[qd_t]] *qd ErrorBoundedEvaluator *mpfr ELSE: ctypedef union evaluator_core_t: Evaluator[FP_NR[d_t]] *d Evaluator[FP_NR[ld_t]] *ld Evaluator[FP_NR[dpe_t]] *dpe ErrorBoundedEvaluator *mpfr IF HAVE_QD: ctypedef union enumeration_core_t: Enumeration[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d Enumeration[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld Enumeration[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe Enumeration[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd Enumeration[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd Enumeration[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr Enumeration[Z_NR[long], FP_NR[d_t]] *long_d Enumeration[Z_NR[long], FP_NR[ld_t]] *long_ld Enumeration[Z_NR[long], FP_NR[dpe_t]] *long_dpe Enumeration[Z_NR[long], FP_NR[dd_t]] *long_dd Enumeration[Z_NR[long], FP_NR[qd_t]] *long_qd Enumeration[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union enumeration_core_t: Enumeration[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d Enumeration[Z_NR[mpz_t], FP_NR[ld_t]] *mpz_ld Enumeration[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe Enumeration[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr Enumeration[Z_NR[long], FP_NR[d_t]] *long_d Enumeration[Z_NR[long], FP_NR[ld_t]] *long_ld Enumeration[Z_NR[long], FP_NR[dpe_t]] *long_dpe Enumeration[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union pruner_core_t: Pruner[FP_NR[d_t]] *d Pruner[FP_NR[ld_t]] *ld Pruner[FP_NR[dpe_t]] *dpe Pruner[FP_NR[dd_t]] *dd Pruner[FP_NR[qd_t]] *qd Pruner[FP_NR[mpfr_t]] *mpfr ELSE: ctypedef union pruner_core_t: Pruner[FP_NR[d_t]] *d Pruner[FP_NR[ld_t]] *ld Pruner[FP_NR[dpe_t]] *dpe Pruner[FP_NR[mpfr_t]] *mpfr IF HAVE_QD: # we cannot use a union because of non-trivial constructors ctypedef struct vector_fp_nr_t: vector[FP_NR[d_t]] d vector[FP_NR[ld_t]] ld vector[FP_NR[dpe_t]] dpe vector[FP_NR[dd_t]] dd vector[FP_NR[qd_t]] qd vector[FP_NR[mpfr_t]] mpfr ELSE: ctypedef struct vector_fp_nr_t: vector[FP_NR[d_t]] d vector[FP_NR[ld_t]] ld vector[FP_NR[dpe_t]] dpe vector[FP_NR[mpfr_t]] mpfr ELSE: IF HAVE_QD: # we cannot use a union because of non-trivial constructors ctypedef struct fp_nr_t: FP_NR[d_t] d FP_NR[dpe_t] dpe FP_NR[dd_t] dd FP_NR[qd_t] qd FP_NR[mpfr_t] mpfr ELSE: ctypedef struct fp_nr_t: FP_NR[d_t] d FP_NR[dpe_t] dpe FP_NR[mpfr_t] mpfr IF HAVE_QD: ctypedef union mat_gso_core_t: MatGSOInterface[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d MatGSOInterface[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe MatGSOInterface[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd MatGSOInterface[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd MatGSOInterface[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr MatGSOInterface[Z_NR[long], FP_NR[d_t]] *long_d MatGSOInterface[Z_NR[long], FP_NR[dpe_t]] *long_dpe MatGSOInterface[Z_NR[long], FP_NR[dd_t]] *long_dd MatGSOInterface[Z_NR[long], FP_NR[qd_t]] *long_qd MatGSOInterface[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union mat_gso_core_t: MatGSOInterface[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d MatGSOInterface[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe MatGSOInterface[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr MatGSOInterface[Z_NR[long], FP_NR[d_t]] *long_d MatGSOInterface[Z_NR[long], FP_NR[dpe_t]] *long_dpe MatGSOInterface[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union lll_reduction_core_t: LLLReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d LLLReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe LLLReduction[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd LLLReduction[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd LLLReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr LLLReduction[Z_NR[long], FP_NR[d_t]] *long_d LLLReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe LLLReduction[Z_NR[long], FP_NR[dd_t]] *long_dd LLLReduction[Z_NR[long], FP_NR[qd_t]] *long_qd LLLReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union lll_reduction_core_t: LLLReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d LLLReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe LLLReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr LLLReduction[Z_NR[long], FP_NR[d_t]] *long_d LLLReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe LLLReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union bkz_auto_abort_core_t: BKZAutoAbort[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZAutoAbort[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZAutoAbort[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd BKZAutoAbort[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd BKZAutoAbort[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZAutoAbort[Z_NR[long], FP_NR[d_t]] *long_d BKZAutoAbort[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZAutoAbort[Z_NR[long], FP_NR[dd_t]] *long_dd BKZAutoAbort[Z_NR[long], FP_NR[qd_t]] *long_qd BKZAutoAbort[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union bkz_auto_abort_core_t: BKZAutoAbort[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZAutoAbort[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZAutoAbort[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZAutoAbort[Z_NR[long], FP_NR[d_t]] *long_d BKZAutoAbort[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZAutoAbort[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union bkz_reduction_core_t: BKZReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZReduction[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd BKZReduction[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd BKZReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZReduction[Z_NR[long], FP_NR[d_t]] *long_d BKZReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZReduction[Z_NR[long], FP_NR[dd_t]] *long_dd BKZReduction[Z_NR[long], FP_NR[qd_t]] *long_qd BKZReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union bkz_reduction_core_t: BKZReduction[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d BKZReduction[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe BKZReduction[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr BKZReduction[Z_NR[long], FP_NR[d_t]] *long_d BKZReduction[Z_NR[long], FP_NR[dpe_t]] *long_dpe BKZReduction[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union fast_evaluator_core_t: FastEvaluator[FP_NR[d_t]] *d FastEvaluator[FP_NR[dpe_t]] *dpe FastEvaluator[FP_NR[dd_t]] *dd FastEvaluator[FP_NR[qd_t]] *qd FastErrorBoundedEvaluator *mpfr ELSE: ctypedef union fast_evaluator_core_t: FastEvaluator[FP_NR[d_t]] *d FastEvaluator[FP_NR[dpe_t]] *dpe FastErrorBoundedEvaluator *mpfr IF HAVE_QD: ctypedef union evaluator_core_t: Evaluator[FP_NR[d_t]] *d Evaluator[FP_NR[dpe_t]] *dpe Evaluator[FP_NR[dd_t]] *dd Evaluator[FP_NR[qd_t]] *qd ErrorBoundedEvaluator *mpfr ELSE: ctypedef union evaluator_core_t: Evaluator[FP_NR[d_t]] *d Evaluator[FP_NR[dpe_t]] *dpe ErrorBoundedEvaluator *mpfr IF HAVE_QD: ctypedef union enumeration_core_t: Enumeration[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d Enumeration[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe Enumeration[Z_NR[mpz_t], FP_NR[dd_t]] *mpz_dd Enumeration[Z_NR[mpz_t], FP_NR[qd_t]] *mpz_qd Enumeration[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr Enumeration[Z_NR[long], FP_NR[d_t]] *long_d Enumeration[Z_NR[long], FP_NR[dpe_t]] *long_dpe Enumeration[Z_NR[long], FP_NR[dd_t]] *long_dd Enumeration[Z_NR[long], FP_NR[qd_t]] *long_qd Enumeration[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr ELSE: ctypedef union enumeration_core_t: Enumeration[Z_NR[mpz_t], FP_NR[d_t]] *mpz_d Enumeration[Z_NR[mpz_t], FP_NR[dpe_t]] *mpz_dpe Enumeration[Z_NR[mpz_t], FP_NR[mpfr_t]] *mpz_mpfr Enumeration[Z_NR[long], FP_NR[d_t]] *long_d Enumeration[Z_NR[long], FP_NR[dpe_t]] *long_dpe Enumeration[Z_NR[long], FP_NR[mpfr_t]] *long_mpfr IF HAVE_QD: ctypedef union pruner_core_t: Pruner[FP_NR[d_t]] *d Pruner[FP_NR[dpe_t]] *dpe Pruner[FP_NR[dd_t]] *dd Pruner[FP_NR[qd_t]] *qd Pruner[FP_NR[mpfr_t]] *mpfr ELSE: ctypedef union pruner_core_t: Pruner[FP_NR[d_t]] *d Pruner[FP_NR[dpe_t]] *dpe Pruner[FP_NR[mpfr_t]] *mpfr IF HAVE_QD: # we cannot use a union because of non-trivial constructors ctypedef struct vector_fp_nr_t: vector[FP_NR[d_t]] d vector[FP_NR[dpe_t]] dpe vector[FP_NR[dd_t]] dd vector[FP_NR[qd_t]] qd vector[FP_NR[mpfr_t]] mpfr ELSE: ctypedef struct vector_fp_nr_t: vector[FP_NR[d_t]] d vector[FP_NR[dpe_t]] dpe vector[FP_NR[mpfr_t]] mpfr ctypedef struct vector_z_nr_t: vector[Z_NR[mpz_t]] mpz vector[Z_NR[long]] long fpylll-0.6.1/src/fpylll/fplll/enumeration.pxd000066400000000000000000000006101455321202600212620ustar00rootroot00000000000000# -*- coding: utf-8 -*- from .decl cimport enumeration_core_t, evaluator_core_t, fplll_mat_gso_data_type_t from .fplll cimport PyCallbackEvaluatorWrapper as PyCallbackEvaluatorWrapper_c from .gso cimport MatGSO cdef class Enumeration: cdef readonly MatGSO M cdef enumeration_core_t _core cdef evaluator_core_t _eval_core cdef PyCallbackEvaluatorWrapper_c *_callback_wrapper fpylll-0.6.1/src/fpylll/fplll/enumeration.pyx000066400000000000000000000767641455321202600213360ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" from cython.operator cimport dereference as deref, preincrement as inc from libcpp.vector cimport vector from libcpp.pair cimport pair from libcpp cimport bool from cysignals.signals cimport sig_on, sig_off from .fplll cimport EvaluatorStrategy as EvaluatorStrategy_c from .fplll cimport EVALSTRATEGY_BEST_N_SOLUTIONS from .fplll cimport EVALSTRATEGY_FIRST_N_SOLUTIONS from .fplll cimport EVALSTRATEGY_OPPORTUNISTIC_N_SOLUTIONS from .fplll cimport Enumeration as Enumeration_c from .fplll cimport FastEvaluator as FastEvaluator_c from .fplll cimport CallbackEvaluator as CallbackEvaluator_c from .fplll cimport Evaluator as Evaluator_c from .fplll cimport FastErrorBoundedEvaluator as FastErrorBoundedEvaluator_c from .fplll cimport ErrorBoundedEvaluator as ErrorBoundedEvaluator_c from .fplll cimport MatGSOInterface as MatGSOInterface_c from .fplll cimport Z_NR, FP_NR, mpz_t from .fplll cimport EVALMODE_SV from .fplll cimport dpe_t from fpylll.mpfr.mpfr cimport mpfr_t from .decl cimport mat_gso_mpz_d, mat_gso_mpz_ld, mat_gso_mpz_dpe, mat_gso_mpz_mpfr, fp_nr_t from .decl cimport mat_gso_long_d, mat_gso_long_ld, mat_gso_long_dpe, mat_gso_long_mpfr from .decl cimport mat_gso_gso_t, mat_gso_gram_t from .decl cimport d_t from .fplll cimport FT_DOUBLE, FT_LONG_DOUBLE, FT_DPE, FT_MPFR, FloatType from .fplll cimport multimap from .fplll cimport FPLLL_MAX_ENUM_DIM from libcpp cimport bool cdef public bool evaluator_callback_call_obj(obj, int n, double *new_sol_coord): cdef list new_sol_coord_ = [] for i in range(n): new_sol_coord_.append(new_sol_coord[i]) return obj(new_sol_coord_); IF HAVE_LONG_DOUBLE: from .decl cimport ld_t IF HAVE_QD: from .decl cimport mat_gso_mpz_dd, mat_gso_mpz_qd, mat_gso_long_dd, mat_gso_long_qd, dd_t, qd_t from .fplll cimport FT_DD, FT_QD class EnumerationError(Exception): pass class EvaluatorStrategy: """ Strategies to update the enumeration radius and deal with multiple solutions. Possible values are: - ``BEST_N_SOLUTIONS`` Starting with the nr_solutions-th solution, every time a new solution is found the enumeration bound is updated to the length of the longest solution. If more than nr_solutions were found, the longest is dropped. - ``OPPORTUNISTIC_N_SOLUTIONS`` Every time a solution is found, update the enumeration distance to the length of the solution. If more than nr_solutions were found, the longest is dropped. - ``FIRST_N_SOLUTIONS`` The enumeration bound is not updated. As soon as nr_solutions are found, enumeration stops. """ BEST_N_SOLUTIONS = EVALSTRATEGY_BEST_N_SOLUTIONS OPPORTUNISTIC_N_SOLUTIONS = EVALSTRATEGY_OPPORTUNISTIC_N_SOLUTIONS FIRST_N_SOLUTIONS = EVALSTRATEGY_FIRST_N_SOLUTIONS cdef class Enumeration: def __cinit__(self, MatGSO M, int nr_solutions=1, strategy=EvaluatorStrategy.BEST_N_SOLUTIONS, bool sub_solutions=False, callbackf=None): """Create new enumeration object :param MatGSO M: GSO matrix :param nr_solutions: Number of solutions to be returned by enumeration :param strategy: EvaluatorStrategy to use when finding new solutions :param sub_solutions: Compute sub-solutions :param callbackf: A predicate to accept or reject a candidate solution """ cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[d_t]] *m_mpz_d IF HAVE_LONG_DOUBLE: cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[ld_t]] *m_mpz_ld cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[dpe_t]] *m_mpz_dpe IF HAVE_QD: cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[dd_t]] *m_mpz_dd cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[qd_t]] *m_mpz_qd cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[mpfr_t]] *m_mpz_mpfr cdef MatGSOInterface_c[Z_NR[long], FP_NR[d_t]] *m_l_d IF HAVE_LONG_DOUBLE: cdef MatGSOInterface_c[Z_NR[long], FP_NR[ld_t]] *m_l_ld cdef MatGSOInterface_c[Z_NR[long], FP_NR[dpe_t]] *m_l_dpe IF HAVE_QD: cdef MatGSOInterface_c[Z_NR[long], FP_NR[dd_t]] *m_l_dd cdef MatGSOInterface_c[Z_NR[long], FP_NR[qd_t]] *m_l_qd cdef MatGSOInterface_c[Z_NR[long], FP_NR[mpfr_t]] *m_l_mpfr self.M = M if M._type == mat_gso_mpz_d: m_mpz_d = M._core.mpz_d if callbackf is None: self._eval_core.d = new FastEvaluator_c[FP_NR[double]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.d = new CallbackEvaluator_c[FP_NR[double]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.mpz_d = new Enumeration_c[Z_NR[mpz_t], FP_NR[double]](m_mpz_d[0], self._eval_core.d[0]) elif M._type == mat_gso_long_d: m_l_d = M._core.long_d if callbackf is None: self._eval_core.d = new FastEvaluator_c[FP_NR[d_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.d = new CallbackEvaluator_c[FP_NR[d_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.long_d = new Enumeration_c[Z_NR[long], FP_NR[double]](m_l_d[0], self._eval_core.d[0]) elif M._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: m_mpz_ld = M._core.mpz_ld if callbackf is None: self._eval_core.ld = new FastEvaluator_c[FP_NR[ld_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.ld = new CallbackEvaluator_c[FP_NR[ld_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.mpz_ld = new Enumeration_c[Z_NR[mpz_t], FP_NR[ld_t]](m_mpz_ld[0], self._eval_core.ld[0]) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) elif M._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: m_l_ld = M._core.long_ld if callbackf is None: self._eval_core.ld = new FastEvaluator_c[FP_NR[ld_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.ld = new CallbackEvaluator_c[FP_NR[ld_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.long_ld = new Enumeration_c[Z_NR[long], FP_NR[ld_t]](m_l_ld[0], self._eval_core.ld[0]) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) elif M._type == mat_gso_mpz_dpe: m_mpz_dpe = M._core.mpz_dpe if callbackf is None: self._eval_core.dpe = new FastEvaluator_c[FP_NR[dpe_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.dpe = new CallbackEvaluator_c[FP_NR[dpe_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.mpz_dpe = new Enumeration_c[Z_NR[mpz_t], FP_NR[dpe_t]](m_mpz_dpe[0], self._eval_core.dpe[0]) elif M._type == mat_gso_long_dpe: m_l_dpe = M._core.long_dpe if callbackf is None: self._eval_core.dpe = new FastEvaluator_c[FP_NR[dpe_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.dpe = new CallbackEvaluator_c[FP_NR[dpe_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.long_dpe = new Enumeration_c[Z_NR[long], FP_NR[dpe_t]](m_l_dpe[0], self._eval_core.dpe[0]) elif M._type == mat_gso_mpz_mpfr: if callbackf is not None: raise NotImplementedError("Callbacks are not implemented for MPFR.") m_mpz_mpfr = M._core.mpz_mpfr self._eval_core.mpfr = new FastErrorBoundedEvaluator_c(M.d, M._core.mpz_mpfr.get_mu_matrix(), M._core.mpz_mpfr.get_r_matrix(), EVALMODE_SV, nr_solutions, strategy, sub_solutions) self._core.mpz_mpfr = new Enumeration_c[Z_NR[mpz_t], FP_NR[mpfr_t]](m_mpz_mpfr[0], self._eval_core.mpfr[0]) elif M._type == mat_gso_long_mpfr: if callbackf is not None: raise NotImplementedError("Callbacks are not implemented for MPFR.") m_l_mpfr = M._core.long_mpfr self._eval_core.mpfr = new FastErrorBoundedEvaluator_c(M.d, M._core.long_mpfr.get_mu_matrix(), M._core.long_mpfr.get_r_matrix(), EVALMODE_SV, nr_solutions, strategy, sub_solutions) self._core.long_mpfr = new Enumeration_c[Z_NR[long], FP_NR[mpfr_t]](m_l_mpfr[0], self._eval_core.mpfr[0]) else: IF HAVE_QD: if M._type == mat_gso_mpz_dd: m_mpz_dd = M._core.mpz_dd if callbackf is None: self._eval_core.dd = new FastEvaluator_c[FP_NR[dd_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.dd = new CallbackEvaluator_c[FP_NR[dd_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.mpz_dd = new Enumeration_c[Z_NR[mpz_t], FP_NR[dd_t]](m_mpz_dd[0], self._eval_core.dd[0]) elif M._type == mat_gso_mpz_qd: m_mpz_qd = M._core.mpz_qd if callbackf is None: self._eval_core.qd = new FastEvaluator_c[FP_NR[qd_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.qd = new CallbackEvaluator_c[FP_NR[qd_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.mpz_qd = new Enumeration_c[Z_NR[mpz_t], FP_NR[qd_t]](m_mpz_qd[0], self._eval_core.qd[0]) elif M._type == mat_gso_long_dd: m_l_dd = M._core.long_dd if callbackf is None: self._eval_core.dd = new FastEvaluator_c[FP_NR[dd_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.dd = new CallbackEvaluator_c[FP_NR[dd_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.long_dd = new Enumeration_c[Z_NR[long], FP_NR[dd_t]](m_l_dd[0], self._eval_core.dd[0]) elif M._type == mat_gso_long_qd: m_l_qd = M._core.long_qd if callbackf is None: self._eval_core.qd = new FastEvaluator_c[FP_NR[qd_t]](nr_solutions, strategy, sub_solutions) else: self._callback_wrapper = new PyCallbackEvaluatorWrapper_c(callbackf) self._eval_core.qd = new CallbackEvaluator_c[FP_NR[qd_t]]( self._callback_wrapper[0], NULL, nr_solutions, strategy, sub_solutions) self._core.long_qd = new Enumeration_c[Z_NR[long], FP_NR[qd_t]](m_l_qd[0], self._eval_core.qd[0]) else: raise RuntimeError("MatGSO object '%s' has no core."%self) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) def __dealloc__(self): if self.M._type == mat_gso_mpz_d: del self._eval_core.d del self._core.mpz_d IF HAVE_LONG_DOUBLE: if self.M._type == mat_gso_mpz_ld: del self._eval_core.ld del self._core.mpz_ld if self.M._type == mat_gso_mpz_dpe: del self._eval_core.dpe del self._core.mpz_dpe IF HAVE_QD: if self.M._type == mat_gso_mpz_dd: del self._eval_core.dd del self._core.mpz_dd if self.M._type == mat_gso_mpz_qd: del self._eval_core.qd del self._core.mpz_qd if self.M._type == mat_gso_mpz_mpfr: del self._eval_core.mpfr del self._core.mpz_mpfr if self.M._type == mat_gso_long_d: del self._eval_core.d del self._core.long_d IF HAVE_LONG_DOUBLE: if self.M._type == mat_gso_long_ld: del self._eval_core.ld del self._core.long_ld if self.M._type == mat_gso_long_dpe: del self._eval_core.dpe del self._core.long_dpe IF HAVE_QD: if self.M._type == mat_gso_long_dd: del self._eval_core.dd del self._core.long_dd if self.M._type == mat_gso_long_qd: del self._eval_core.qd del self._core.long_qd if self.M._type == mat_gso_long_mpfr: del self._eval_core.mpfr del self._core.long_mpfr if self._callback_wrapper: del self._callback_wrapper def enumerate(self, int first, int last, max_dist, max_dist_expo, target=None, subtree=None, pruning=None, dual=False, subtree_reset=False): """Run enumeration on `M` :param int first: first row :param int last: last row (exclusive) :param max_dist: length bound :param max_dist_expo: exponent of length bound :param target: target coordinates for CVP/BDD or ``None`` for SVP :param subtree: :param pruning: pruning parameters :param dual: run enumeration in the primal or dual lattice. :param subtree_reset: :returns: list of pairs containing the solutions' coefficient vectors and their lengths """ cdef int block_size = last-first cdef fp_nr_t tmp cdef vector[FP_NR[d_t]] target_coord_d IF HAVE_LONG_DOUBLE: cdef vector[FP_NR[ld_t]] target_coord_ld cdef vector[FP_NR[dpe_t]] target_coord_dpe IF HAVE_QD: cdef vector[FP_NR[dd_t]] target_coord_dd cdef vector[FP_NR[qd_t]] target_coord_qd cdef vector[FP_NR[mpfr_t]] target_coord_mpfr cdef vector[double] sub_tree_ if subtree is not None: for it in target: sub_tree_.push_back(float(it)) cdef vector[double] pruning_ if not pruning: for i in range(block_size): pruning_.push_back(1) else: for i in range(block_size): pruning_.push_back(pruning[i]) cdef double max_dist__ = max_dist cdef FP_NR[d_t] max_dist_d = max_dist__ IF HAVE_LONG_DOUBLE: cdef FP_NR[ld_t] max_dist_ld = max_dist__ cdef FP_NR[dpe_t] max_dist_dpe = max_dist__ IF HAVE_QD: cdef FP_NR[dd_t] max_dist_dd = max_dist__ cdef FP_NR[qd_t] max_dist_qd = max_dist__ cdef FP_NR[mpfr_t] max_dist_mpfr = max_dist__ solutions = [] cdef multimap[FP_NR[double], vector[FP_NR[double]]].reverse_iterator solutions_d IF HAVE_LONG_DOUBLE: cdef multimap[FP_NR[longdouble], vector[FP_NR[longdouble]]].reverse_iterator solutions_ld cdef multimap[FP_NR[dpe_t], vector[FP_NR[dpe_t]]].reverse_iterator solutions_dpe IF HAVE_QD: cdef multimap[FP_NR[dd_t], vector[FP_NR[dd_t]]].reverse_iterator solutions_dd cdef multimap[FP_NR[qd_t], vector[FP_NR[qd_t]]].reverse_iterator solutions_qd cdef multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]].reverse_iterator solutions_mpfr if self.M._type == mat_gso_mpz_d or self.M._type == mat_gso_long_d: if target is not None: for it in target: tmp.d = float(it) target_coord_d.push_back(tmp.d) sig_on() if self.M._type == mat_gso_mpz_d: self._core.mpz_d.enumerate(first, last, max_dist_d, max_dist_expo, target_coord_d, sub_tree_, pruning_, dual) else: self._core.long_d.enumerate(first, last, max_dist_d, max_dist_expo, target_coord_d, sub_tree_, pruning_, dual) sig_off() if not self._eval_core.d.size(): raise EnumerationError("No solution found.") solutions_d = self._eval_core.d.begin() while solutions_d != self._eval_core.d.end(): cur_dist = deref(solutions_d).first.get_d() cur_sol = [] for j in range(deref(solutions_d).second.size()): cur_sol.append(deref(solutions_d).second[j].get_d()) solutions.append((cur_dist, tuple(cur_sol))) inc(solutions_d) IF HAVE_LONG_DOUBLE: if self.M._type == mat_gso_mpz_ld or self.M._type == mat_gso_long_ld: if target is not None: for it in target: tmp.ld = float(it) target_coord_ld.push_back(tmp.ld) sig_on() if self.M._type == mat_gso_mpz_ld: self._core.mpz_ld.enumerate(first, last, max_dist_ld, max_dist_expo, target_coord_ld, sub_tree_, pruning_, dual) else: self._core.long_ld.enumerate(first, last, max_dist_ld, max_dist_expo, target_coord_ld, sub_tree_, pruning_, dual) sig_off() if not self._eval_core.ld.size(): raise EnumerationError("No solution found.") solutions_ld = self._eval_core.ld.begin() while solutions_ld != self._eval_core.ld.end(): cur_dist = deref(solutions_ld).first.get_d() cur_sol = [] for j in range(deref(solutions_ld).second.size()): cur_sol.append(deref(solutions_ld).second[j].get_d()) solutions.append((cur_dist, tuple(cur_sol))) inc(solutions_ld) if self.M._type == mat_gso_mpz_dpe or self.M._type == mat_gso_long_dpe: if target is not None: for it in target: tmp.dpe = float(it) target_coord_dpe.push_back(tmp.dpe) sig_on() if self.M._type == mat_gso_mpz_dpe: self._core.mpz_dpe.enumerate(first, last, max_dist_dpe, max_dist_expo, target_coord_dpe, sub_tree_, pruning_, dual) else: self._core.long_dpe.enumerate(first, last, max_dist_dpe, max_dist_expo, target_coord_dpe, sub_tree_, pruning_, dual) sig_off() if not self._eval_core.dpe.size(): raise EnumerationError("No solution found.") solutions_dpe = self._eval_core.dpe.begin() while solutions_dpe != self._eval_core.dpe.end(): cur_dist = deref(solutions_dpe).first.get_d() cur_sol = [] for j in range(deref(solutions_dpe).second.size()): cur_sol.append(deref(solutions_dpe).second[j].get_d()) solutions.append((cur_dist, tuple(cur_sol))) inc(solutions_dpe) IF HAVE_QD: if self.M._type == mat_gso_mpz_dd or self.M._type == mat_gso_long_dd: if target is not None: for it in target: tmp.dd = float(it) target_coord_dd.push_back(tmp.dd) sig_on() if self.M._type == mat_gso_mpz_dd: self._core.mpz_dd.enumerate(first, last, max_dist_dd, max_dist_expo, target_coord_dd, sub_tree_, pruning_, dual) else: self._core.long_dd.enumerate(first, last, max_dist_dd, max_dist_expo, target_coord_dd, sub_tree_, pruning_, dual) sig_off() if not self._eval_core.dd.size(): raise EnumerationError("No solution found.") solutions_dd = self._eval_core.dd.begin() while solutions_dd != self._eval_core.dd.end(): cur_dist = deref(solutions_dd).first.get_d() cur_sol = [] for j in range(deref(solutions_dd).second.size()): cur_sol.append(deref(solutions_dd).second[j].get_d()) solutions.append((cur_dist, tuple(cur_sol))) inc(solutions_dd) if self.M._type == mat_gso_mpz_qd or self.M._type == mat_gso_long_qd: if target is not None: for it in target: tmp.qd = float(it) target_coord_qd.push_back(tmp.qd) sig_on() if self.M._type == mat_gso_mpz_qd: self._core.mpz_qd.enumerate(first, last, max_dist_qd, max_dist_expo, target_coord_qd, sub_tree_, pruning_, dual) else: self._core.long_qd.enumerate(first, last, max_dist_qd, max_dist_expo, target_coord_qd, sub_tree_, pruning_, dual) sig_off() if not self._eval_core.qd.size(): raise EnumerationError("No solution found.") solutions_qd = self._eval_core.qd.begin() while solutions_qd != self._eval_core.qd.end(): cur_dist = deref(solutions_qd).first.get_d() cur_sol = [] for j in range(deref(solutions_qd).second.size()): cur_sol.append(deref(solutions_qd).second[j].get_d()) solutions.append((cur_dist, tuple(cur_sol))) inc(solutions_qd) if self.M._type == mat_gso_mpz_mpfr or self.M._type == mat_gso_long_mpfr: if target is not None: for it in target: tmp.mpfr = float(it) target_coord_mpfr.push_back(tmp.mpfr) sig_on() if self.M._type == mat_gso_mpz_mpfr: self._core.mpz_mpfr.enumerate(first, last, max_dist_mpfr, max_dist_expo, target_coord_mpfr, sub_tree_, pruning_, dual) else: self._core.long_mpfr.enumerate(first, last, max_dist_mpfr, max_dist_expo, target_coord_mpfr, sub_tree_, pruning_, dual) sig_off() if not self._eval_core.mpfr.size(): raise EnumerationError("No solution found.") solutions_mpfr = self._eval_core.mpfr.begin() while solutions_mpfr != self._eval_core.mpfr.end(): cur_dist = deref(solutions_mpfr).first.get_d() cur_sol = [] for j in range(deref(solutions_mpfr).second.size()): cur_sol.append(deref(solutions_mpfr).second[j].get_d()) solutions.append((cur_dist, tuple(cur_sol))) inc(solutions_mpfr) if solutions == []: raise NotImplementedError("GSO datatype not implemented.") return solutions @property def sub_solutions(self): """ Return sub-solutions computed in last enumeration call. >>> from fpylll import * >>> FPLLL.set_random_seed(1337) >>> _ = FPLLL.set_threads(1) >>> A = IntegerMatrix.random(80, "qary", bits=30, k=40) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pruning = Pruning.run(M.get_r(0, 0), 2**40, M.r()[:30], 0.8) >>> enum = Enumeration(M, strategy=EvaluatorStrategy.BEST_N_SOLUTIONS, sub_solutions=True) >>> _ = enum.enumerate(0, 30, 0.999*M.get_r(0, 0), 0, pruning=pruning.coefficients) >>> [int(round(a)) for a,b in enum.sub_solutions[:5]] [5569754193, 5556022462, 5083806188, 5022873440, 4260865083] """ cdef list sub_solutions = [] cdef vector[pair[FP_NR[d_t], vector[FP_NR[d_t]]]].iterator _sub_solutions_d if self.M._type == mat_gso_mpz_d or self.M._type == mat_gso_long_d: _sub_solutions_d = self._eval_core.d.sub_solutions.begin() while _sub_solutions_d != self._eval_core.d.sub_solutions.end(): cur_dist = deref(_sub_solutions_d).first.get_d() if cur_dist == 0.0: cur_dist = None cur_sol = [] for j in range(deref(_sub_solutions_d).second.size()): cur_sol.append(deref(_sub_solutions_d).second[j].get_d()) sub_solutions.append(tuple([cur_dist, tuple(cur_sol)])) inc(_sub_solutions_d) else: raise NotImplementedError return tuple(sub_solutions) def get_nodes(self, level=None): """ Return number of visited nodes in last enumeration call. :param level: return for ``level`` except when ``None`` in which case the sum is returned. """ cdef int _level = -1 if level is not None: if level < -1 or level >= FPLLL_MAX_ENUM_DIM: raise ValueError("Level {level} out of bounds.".format(level=level)) _level = level if self.M._type == mat_gso_mpz_d: return self._core.mpz_d.get_nodes(_level) IF HAVE_LONG_DOUBLE: if self.M._type == mat_gso_mpz_ld: return self._core.mpz_ld.get_nodes(_level) if self.M._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.get_nodes(_level) IF HAVE_QD: if self.M._type == mat_gso_mpz_dd: return self._core.mpz_dd.get_nodes(_level) if self.M._type == mat_gso_mpz_qd: return self._core.mpz_qd.get_nodes(_level) if self.M._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.get_nodes(_level) if self.M._type == mat_gso_long_d: return self._core.long_d.get_nodes(_level) IF HAVE_LONG_DOUBLE: if self.M._type == mat_gso_long_ld: return self._core.long_ld.get_nodes(_level) if self.M._type == mat_gso_long_dpe: return self._core.long_dpe.get_nodes(_level) IF HAVE_QD: if self.M._type == mat_gso_long_dd: return self._core.long_dd.get_nodes(_level) if self.M._type == mat_gso_long_qd: return self._core.long_qd.get_nodes(_level) if self.M._type == mat_gso_long_mpfr: return self._core.long_mpfr.get_nodes(_level) fpylll-0.6.1/src/fpylll/fplll/enumeration_callback_helper.h000066400000000000000000000030471455321202600241000ustar00rootroot00000000000000/** Based on https://stackoverflow.com/questions/39044063/pass-a-closure-from-cython-to-c */ #include #include // TODO annoyingly Cython 3.0 insists on this gone, Cython < 3.0 insists on this being present // extern "C" { bool evaluator_callback_call_obj(PyObject *obj, int n, double *new_sol_coord); // } class PyCallbackEvaluatorWrapper { public: // constructors and destructors mostly do reference counting PyCallbackEvaluatorWrapper(PyObject *o) : held(o) { Py_XINCREF(o); } PyCallbackEvaluatorWrapper(const PyCallbackEvaluatorWrapper &rhs) : PyCallbackEvaluatorWrapper(rhs.held) { } PyCallbackEvaluatorWrapper(PyCallbackEvaluatorWrapper &&rhs) : held(rhs.held) { rhs.held = 0; } // need no-arg constructor to stack allocate in Cython PyCallbackEvaluatorWrapper() : PyCallbackEvaluatorWrapper(nullptr) { } ~PyCallbackEvaluatorWrapper() { Py_XDECREF(held); } PyCallbackEvaluatorWrapper &operator=(const PyCallbackEvaluatorWrapper &rhs) { PyCallbackEvaluatorWrapper tmp = rhs; return (*this = std::move(tmp)); } PyCallbackEvaluatorWrapper &operator=(PyCallbackEvaluatorWrapper &&rhs) { held = rhs.held; rhs.held = 0; return *this; } bool operator()(size_t n, fplll::enumf *new_sol_coord, void *ctx) { if (held) // nullptr check { // note, no way of checking for errors until you return to Python return evaluator_callback_call_obj(held, n, new_sol_coord); } return false; } private: PyObject *held; }; fpylll-0.6.1/src/fpylll/fplll/fplll.pxd000066400000000000000000001016471455321202600200610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # General Includes from fpylll.gmp.mpz cimport mpz_t from fpylll.mpfr.mpfr cimport mpfr_t from fpylll.gmp.random cimport gmp_randstate_t from libcpp.vector cimport vector from libcpp.string cimport string from libcpp.pair cimport pair from libcpp cimport bool from libcpp.functional cimport function cdef extern from "" namespace "std": cdef cppclass multimap[T, U]: cppclass iterator: pair[T,U]& operator*() iterator operator++() iterator operator--() bint operator==(iterator) bint operator!=(iterator) iterator operator=() cppclass reverse_iterator: pair[T,U]& operator*() iterator operator++() iterator operator--() bint operator==(reverse_iterator) bint operator!=(reverse_iterator) iterator operator=() map() U& operator[](T&) U& at(T&) iterator begin() reverse_iterator rbegin() iterator end() reverse_iterator rend() size_t count(T&) bint empty() void erase(iterator) void erase(iterator, iterator) size_t erase(T&) iterator find(T&) pair[iterator, bint] insert(pair[T,U]) size_t size() cdef extern from "fplll/fplll_config.h": """ #ifdef FPLLL_WITH_RECURSIVE_ENUM #define FPLLL_HAVE_RECURSIVE_ENUM 1 #else #define FPLLL_HAVE_RECURSIVE_ENUM 0 #endif """ int FPLLL_MAJOR_VERSION int FPLLL_MINOR_VERSION int FPLLL_MICRO_VERSION int FPLLL_MAX_ENUM_DIM bool FPLLL_HAVE_RECURSIVE_ENUM int FPLLL_MAX_PARALLEL_ENUM_DIM # # Numbers cdef extern from "fplll/nr/nr.h" namespace "fplll": ctypedef double enumf cdef cppclass Z_NR[T]: T& get_data() nogil void set "operator=" (T d) nogil double get_d() nogil long exponent() nogil void set_str(const char* s) nogil int cmp(const Z_NR[T]& m) nogil int sgn() nogil void operator=(const Z_NR[T]& z) nogil void operator=(const mpz_t& z) nogil void operator=(long i) nogil int operator<(const Z_NR[T]& a) nogil int operator<(long a) nogil int operator>(const Z_NR[T]& a) nogil int operator>(long a) nogil int operator<=(const Z_NR[T]& a) nogil int operator<=(long a) nogil int operator>=(const Z_NR[T]& a) nogil int operator>=(long a) nogil int operator==(const Z_NR[T]& a) nogil int operator==(long a) nogil int operator!=(const Z_NR[T]& a) nogil int operator!=(long a) nogil void add(const Z_NR[T]& a, const Z_NR[T]& b) nogil void add_ui(const Z_NR[T]& a, unsigned int b) nogil void sub(const Z_NR[T]& a, const Z_NR[T]& b) nogil void sub_ui(const Z_NR[T]& a, unsigned int b) nogil void neg(const Z_NR[T]& a) nogil void mul(const Z_NR[T]& a, const Z_NR[T]& b) nogil void mul_si(const Z_NR[T]& a, long b) nogil void mul_ui(const Z_NR[T]& a, unsigned long b) nogil void mul_2si(const Z_NR[T]& a, long b) nogil void div_2si(const Z_NR[T]& a, long b) nogil void addmul(const Z_NR[T]& a, const Z_NR[T]& b) nogil void addmul_ui(const Z_NR[T]& a, unsigned long b) nogil void addmul_si(const Z_NR[T]& a, long b) nogil void submul(const Z_NR[T]& a, const Z_NR[T]& b) nogil void submul_ui(const Z_NR[T]& a, unsigned long b) nogil void abs(const Z_NR[T]& a) nogil void swap(Z_NR[T]& a) nogil void randb(int bits) nogil void randb_si(int bits) nogil void randm(const Z_NR[T]& max) nogil void randm_si(const Z_NR[T]& max) nogil cdef cppclass FP_NR[T]: T& get_data() nogil double get_d() nogil inline void operator=(const FP_NR[T]& a) nogil inline void operator=(double a) nogil inline void operator=(const char *s) nogil @staticmethod unsigned int get_prec() nogil @staticmethod unsigned int set_prec(unsigned int) nogil cdef extern from "fplll/nr/nr.h": cdef struct dpe_struct: pass ctypedef dpe_struct *dpe_t # Random Numbers cdef extern from "fplll/nr/nr.h" namespace "fplll": cdef cppclass RandGen: @staticmethod void init() @staticmethod void init_with_seed(unsigned long seed) @staticmethod void init_with_time() @staticmethod void init_with_time2() @staticmethod int get_initialized() @staticmethod gmp_randstate_t& get_gmp_state() # Definitions & Enums cdef extern from "fplll/defs.h" namespace "fplll": cdef enum RedStatus: RED_SUCCESS RED_GSO_FAILURE RED_BABAI_FAILURE RED_LLL_FAILURE RED_ENUM_FAILURE RED_BKZ_FAILURE RED_BKZ_TIME_LIMIT RED_BKZ_LOOPS_LIMIT RED_STATUS_MAX cdef enum LLLFlags: LLL_VERBOSE LLL_EARLY_RED LLL_SIEGEL LLL_DEFAULT cdef enum BKZFlags: BKZ_DEFAULT BKZ_VERBOSE BKZ_NO_LLL BKZ_MAX_LOOPS BKZ_MAX_TIME BKZ_BOUNDED_LLL BKZ_AUTO_ABORT BKZ_DUMP_GSO BKZ_GH_BND BKZ_SD_VARIANT BKZ_SLD_RED cdef enum LLLMethod: LM_WRAPPER LM_PROVED LM_HEURISTIC LM_FAST cdef enum SVPMethod: SVPM_FAST SVPM_PROVED cdef enum SVPFlags: SVP_DEFAULT SVP_VERBOSE SVP_OVERRIDE_BND cdef enum CVPMethod: CVPM_FAST CVPM_PROVED cdef enum CVPFlags: CVP_DEFAULT CVP_VERBOSE cdef enum IntType: ZT_MPZ ZT_LONG ZT_DOUBLE cdef enum FloatType: FT_DEFAULT FT_DOUBLE FT_LONG_DOUBLE FT_DD FT_QD FT_DPE FT_MPFR cdef enum EvaluatorMode: EVALMODE_SV EVALMODE_CV EVALMODE_COUNT EVALMODE_PRINT cdef double LLL_DEF_DELTA cdef double LLL_DEF_ETA const double BKZ_DEF_AUTO_ABORT_SCALE const int BKZ_DEF_AUTO_ABORT_MAX_NO_DEC const double BKZ_DEF_GH_FACTOR const double BKZ_DEF_MIN_SUCCESS_PROBABILITY const int BKZ_DEF_RERANDOMIZATION_DENSITY # Vectors (only used in some places) cdef extern from "fplll/nr/numvect.h" namespace "fplll": cdef cppclass NumVect[T]: cppclass iterator: iterator operator++() iterator operator--() bint operator==(iterator) bint operator!=(iterator) iterator operator=() NumVect() NumVect(const NumVect[T]& v) NumVect(int size) NumVect(int size, T &t) void operator=(NumVect &v) void swap(NumVect &v) const iterator begin() iterator end() int size() bool empty() void resize(int size) void resize(int size, const T &t) void gen_zero(int size) void push_back(const T &t) void pop_back() T &front() T &back() void extend(int maxSize) void clear() T &operator[](int i) void add(const NumVect[T] &v, int n) void add(const NumVect[T] &v) void sub(const NumVect[T] &v, int n) void sub(const NumVect[T] &v) void mul(const NumVect[T] &v, int n, T c) void mul(const NumVect[T] &v, T c) void addmul(const NumVect[T] &v, T x, int n) void addmul(const NumVect[T] &v, T x) void addmul_2exp(const NumVect[T] &v, const T &x, long expo, T &tmp) void addmul_2exp(const NumVect[T] &v, const T &x, long expo, int n, T &tmp) void addmul_si(const NumVect[T] &v, long x) void addmul_si(const NumVect[T] &v, long x, int n) void addmul_si_2exp(const NumVect[T] &v, long x, long expo, T &tmp) void addmul_si_2exp(const NumVect[T] &v, long x, long expo, int n, T &tmp) # (v[first],...,v[last]) becomes (v[first+1],...,v[last],v[first]) */ void rotate_left(int first, int last) # (v[first],...,v[last]) becomes (v[last],v[first],...,v[last-1]) */ void rotate_right(int first, int last) # Returns expo >= 0 such that all elements are < 2^expo. long get_max_exponent() void fill(long value) bool is_zero(int fromCol = 0) const int size_nz() const # Matrices over the Integers cdef extern from "fplll/nr/matrix.h" namespace "fplll": cdef cppclass MatrixRow[T]: T& operator[](int i) nogil int size() nogil int is_zero() nogil int is_zero(int frm) nogil int size_nz() nogil void fill(long value) nogil void add(const MatrixRow[T] v) nogil void add(const MatrixRow[T] v, int n) nogil void sub(const MatrixRow[T] v) nogil void sub(const MatrixRow[T] v, int n) nogil void addmul_2exp(const MatrixRow[T]& v, const T& x, long expo, T& tmp) nogil void addmul_2exp(const MatrixRow[T]& v, const T& x, long expo, int n, T& tmp) nogil void addmul_si(const MatrixRow[T]& v, long x) nogil void addmul_si(const MatrixRow[T]& v, long x, int n) nogil void addmul_si_2exp(const MatrixRow[T]& v, long x, long expo, T& tmp) nogil void addmul_si_2exp(const MatrixRow[T]& v, long x, long expo, int n, T& tmp) nogil void dot_product(T &result, const MatrixRow[T] &v0) nogil void dot_product(T &result, const MatrixRow[T] &v0, int n) nogil void dot_product[T](T& result, const MatrixRow[T]& v1, const MatrixRow[T]& v2, int n) nogil void dot_product[T](T& result, const MatrixRow[T]& v1, const MatrixRow[T]& v2) nogil cdef cppclass Matrix[T]: Matrix() Matrix(int r, int c) int get_rows() int get_cols() T& operator()(int i, int j) MatrixRow[T] operator[](int i) void clear() int empty() void resize(int rows, int cols) nogil void set_rows(int rows) nogil void set_cols(int cols) nogil void swap(Matrix[T]& m) nogil void swap_rows(int r1, int r2) nogil void rotate_left(int first, int last) nogil void rotate_right(int first, int last) nogil void rotate(int first, int middle, int last) nogil void rotate_gram_left(int first, int last, int nValidRows) nogil void rotate_gram_right(int first, int last, int nValidRows) nogil void transpose() nogil long get_max_exp() nogil cdef cppclass ZZ_mat[T]: ZZ_mat() ZZ_mat(int r, int c) int get_rows() nogil int get_cols() nogil void set_rows(int rows) nogil void set_cols(int cols) nogil Z_NR[T]& operator()(int i, int j) nogil MatrixRow[Z_NR[T]] operator[](int i) nogil void gen_identity(int nrows) nogil void gen_intrel(int bits) nogil void gen_simdioph(int bits, int bits2) nogil void gen_uniform(int bits) nogil void gen_ntrulike(int bits) nogil void gen_ntrulike_withq(int q) nogil void gen_ntrulike2(int bits) nogil void gen_ntrulike2_withq(int q) nogil void gen_qary_withq(int k, int q) nogil void gen_qary_prime(int k, int bits) nogil void gen_trg(double alpha) nogil # Gram Schmidt Orthogonalization cdef extern from "fplll/gso.h" namespace "fplll": cdef enum MatGSOInterfaceFlags: GSO_DEFAULT GSO_INT_GRAM GSO_ROW_EXPO GSO_OP_FORCE_LONG cdef cppclass MatGSO[ZT, FT]: MatGSO(Matrix[ZT] B, Matrix[ZT] U, Matrix[ZT] UinvT, int flags) int d Matrix[ZT]& b vector[long] row_expo void row_op_begin(int first, int last) void row_op_end(int first, int last) FT& get_gram(FT& f, int i, int j) const Matrix[FT]& get_mu_matrix() nogil const FT& get_mu_exp(int i, int j, long& expo) nogil const FT& get_mu_exp(int i, int j) nogil FT& get_mu(FT& f, int i, int j) nogil const Matrix[FT]& get_r_matrix() nogil const FT& get_r_exp(int i, int j, long& expo) nogil const FT& get_r_exp(int i, int j) nogil FT& get_r(FT& f, int i, int j) nogil long get_max_mu_exp(int i, int nColumns) nogil int update_gso_row(int i, int lastJ) nogil int update_gso_row(int i) nogil int update_gso() nogil void discover_all_rows() nogil void set_r(int i, int j, FT& f) nogil void move_row(int oldR, int newR) nogil void row_swap(int row1, int row2) nogil void row_addmul(int i, int j, const FT& x) nogil void row_addmul_we(int i, int j, const FT& x, long expoAdd) nogil void lock_cols() nogil void unlock_cols() nogil void create_row() nogil void create_rows(int nNewRows) nogil void remove_last_row() nogil void remove_last_rows(int nRemovedRows) nogil void apply_transform(const Matrix[FT]& transform, int srcBase, int targetBase) nogil void apply_transform(const Matrix[FT]& transform, int srcBase) nogil void dump_mu_d(double* mu, int offset, int block_size) nogil void dump_mu_d(vector[double] mu, int offset, int block_size) nogil void dump_r_d(double* r, int offset, int block_size) nogil void dump_r_d(vector[double] r, int offset, int block_size) nogil double get_current_slope(int start_row, int stop_row) nogil FT get_root_det(int start_row, int stop_row) nogil FT get_log_det(int start_row, int stop_row) nogil FT get_slide_potential(int start_row, int stop_row, int block_size) nogil void to_canonical(vector[FT] &w, const vector[FT] &v, long start) nogil void from_canonical(vector[FT] &v, const vector[FT] &w, long start, long dimension) nogil int babai(vector[ZT] w, vector[FT] v, int start, int dimension, bool gsa) nogil const int enable_int_gram const int enable_row_expo const int enable_transform const int enable_inverse_transform const int row_op_force_long cdef extern from "fplll/gso_gram.h" namespace "fplll": cdef cppclass MatGSOGram[ZT, FT]: MatGSOGram(Matrix[ZT] B, Matrix[ZT] U, Matrix[ZT] UinvT, int flags) long get_max_exp_of_b() nogil bool b_row_is_zero(int i) nogil int get_cols_of_b() nogil int get_rows_of_b() nogil void negate_row_of_b(int i) nogil void set_g(Matrix[ZT] arg_g) void create_rows(int n_new_rows) nogil void remove_last_rows(int n_removed_rows) nogil void move_row(int old_r, int new_r) nogil void row_addmul_we(int i, int j, const FT &x, long expo_add) nogil void row_add(int i, int j) nogil void row_sub(int i, int j) nogil FT &get_gram(FT &f, int i, int j) nogil cdef extern from "fplll/gso_interface.h" namespace "fplll": cdef cppclass MatGSOInterface[ZT, FT]: MatGSOInterface(Matrix[ZT] B, Matrix[ZT] U, Matrix[ZT] UinvT, int flags) int d long get_max_exp_of_b() nogil bool b_row_is_zero(int i) nogil int get_cols_of_b() nogil int get_rows_of_b() nogil void negate_row_of_b(int i) nogil vector[long] row_expo inline void row_op_begin(int first, int last) nogil void row_op_end(int first, int last) nogil FT &get_gram(FT &f, int i, int j) nogil ZT &get_int_gram(ZT &f, int i, int j) nogil const Matrix[FT] &get_mu_matrix() nogil const Matrix[FT] &get_r_matrix() nogil const Matrix[ZT] &get_g_matrix() nogil inline const FT &get_mu_exp(int i, int j, long &expo) nogil inline const FT &get_mu_exp(int i, int j) nogil inline FT &get_mu(FT &f, int i, int j) nogil ZT get_max_gram() nogil FT get_max_bstar() nogil inline const FT &get_r_exp(int i, int j, long &expo) nogil inline const FT &get_r_exp(int i, int j) nogil inline FT &get_r(FT &f, int i, int j) nogil long get_max_mu_exp(int i, int n_columns) nogil bool update_gso_row(int i, int last_j) nogil inline bool update_gso_row(int i) nogil inline bool update_gso() nogil inline void discover_all_rows() nogil void set_r(int i, int j, FT &f) nogil void move_row(int old_r, int new_r) nogil void row_swap(int row1, int row2) nogil inline void row_addmul(int i, int j, const FT &x) nogil void row_addmul_we(int i, int j, const FT &x, long expo_add) nogil void row_add(int i, int j) nogil void row_sub(int i, int j) nogil void lock_cols() nogil void unlock_cols() nogil inline void create_row() nogil void create_rows(int n_new_rows) nogil inline void remove_last_row() nogil void remove_last_rows(int n_removed_rows) nogil void apply_transform(const Matrix[FT] &transform, int src_base, int target_base) nogil void apply_transform(const Matrix[FT] &transform, int src_base) nogil void dump_mu_d(double* mu, int offset, int block_size) nogil void dump_mu_d(vector[double] mu, int offset, int block_size) nogil void dump_r_d(double* r, int offset, int block_size) nogil void dump_r_d(vector[double] r, int offset, int block_size) nogil double get_current_slope(int start_row, int stop_row) nogil FT get_root_det(int start_row, int end_row) nogil FT get_log_det(int start_row, int end_row) nogil FT get_slide_potential(int start_row, int end_row, int block_size) nogil int babai(vector[ZT] w, vector[FT] v, int start, int dimension) nogil const bool enable_int_gram const bool enable_row_expo const bool enable_transform const bool enable_inverse_transform const bool row_op_force_long # LLL cdef extern from "fplll/lll.h" namespace "fplll": cdef cppclass LLLReduction[ZT,FT]: LLLReduction(MatGSOInterface[ZT, FT]& m, double delta, double eta, int flags) int lll() nogil int lll(int kappa_min) nogil int lll(int kappa_min, int kappa_start) nogil int lll(int kappa_min, int kappa_start, int kappa_end) nogil int lll(int kappa_min, int kappa_start, int kappa_end, int size_reduction_start) nogil int size_reduction() nogil int size_reduction(int kappa_min) nogil int size_reduction(int kappa_min, int kappa_end) nogil int size_reduction(int kappa_min, int kappa_end, int size_reduction_start) nogil int status int final_kappa int last_early_red int zeros int n_swaps int is_lll_reduced[ZT, FT](MatGSOInterface[ZT, FT]& m, double delta, double eta) nogil # LLL Wrapper cdef extern from "fplll/wrapper.h" namespace "fplll": cdef cppclass Wrapper: Wrapper(ZZ_mat[mpz_t]& b, ZZ_mat[mpz_t]& u, ZZ_mat[mpz_t]& uInv, double delta, double eta, int flags) int lll() nogil int status # Evaluator cdef extern from "enumeration_callback_helper.h": cdef cppclass PyCallbackEvaluatorWrapper: PyCallbackEvaluatorWrapper() PyCallbackEvaluatorWrapper(object) cdef extern from "fplll/enum/evaluator.h" namespace "fplll": cdef enum EvaluatorStrategy: EVALSTRATEGY_BEST_N_SOLUTIONS EVALSTRATEGY_OPPORTUNISTIC_N_SOLUTIONS EVALSTRATEGY_FIRST_N_SOLUTIONS cdef cppclass Evaluator[FT]: Evaluator() void eval_sol(const vector[FT]& newSolCoord, const enumf& newPartialDist, enumf& maxDist, long normExp) int max_sols EvaluatorStrategy strategy multimap[FT, vector[FT]] solutions size_t sol_count vector[pair[FT, vector[FT]]] sub_solutions multimap[FP_NR[FT], vector[FP_NR[FT]]].reverse_iterator begin() multimap[FP_NR[FT], vector[FP_NR[FT]]].reverse_iterator end() int size() bool empty() cdef cppclass FastEvaluator[FT]: FastEvaluator() FastEvaluator(size_t nr_solutions, EvaluatorStrategy strategy, bool find_subsolutions) void eval_sol(const vector[FT]& newSolCoord, const enumf& newPartialDist, enumf& maxDist, long normExp) int max_sols EvaluatorStrategy strategy multimap[FT, vector[FT]] solutions size_t sol_count vector[pair[FT, vector[FT]]] sub_solutions multimap[FP_NR[FT], vector[FP_NR[FT]]].reverse_iterator begin() multimap[FP_NR[FT], vector[FP_NR[FT]]].reverse_iterator end() int size() bool empty() cdef cppclass CallbackEvaluator[FT]: CallbackEvaluator() CallbackEvaluator(PyCallbackEvaluatorWrapper, void *ctx, size_t nr_solutions, EvaluatorStrategy strategy, bool find_subsolutions) void eval_sol(const vector[FT]& newSolCoord, const enumf& newPartialDist, enumf& maxDist, long normExp) int max_sols EvaluatorStrategy strategy multimap[FT, vector[FT]] solutions size_t sol_count vector[pair[FT, vector[FT]]] sub_solutions multimap[FP_NR[FT], vector[FP_NR[FT]]].reverse_iterator begin() multimap[FP_NR[FT], vector[FP_NR[FT]]].reverse_iterator end() int size() bool empty() cdef cppclass FastErrorBoundedEvaluator: FastErrorBoundedEvaluator() FastErrorBoundedEvaluator(int d, Matrix[FP_NR[mpfr_t]] mu, Matrix[FP_NR[mpfr_t]] r, EvaluatorMode eval_mode, size_t nr_solutions, EvaluatorStrategy strategy, bool find_subsolutions) void eval_sol(const vector[FP_NR[mpfr_t]]& newSolCoord, const enumf& newPartialDist, enumf& maxDist, long normExp) int size() int max_sols EvaluatorStrategy strategy multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]] solutions multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]].reverse_iterator begin() multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]].reverse_iterator end() cdef cppclass ErrorBoundedEvaluator: ErrorBoundedEvaluator() ErrorBoundedEvaluator(int d, Matrix[FP_NR[mpfr_t]] mu, Matrix[FP_NR[mpfr_t]] r, EvaluatorMode eval_mode, size_t nr_solutions, EvaluatorStrategy strategy, bool find_subsolutions) void eval_sol(const vector[FP_NR[mpfr_t]]& newSolCoord, const enumf& newPartialDist, enumf& maxDist, long normExp) int size() int max_sols EvaluatorStrategy strategy multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]] solutions multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]].reverse_iterator begin() multimap[FP_NR[mpfr_t], vector[FP_NR[mpfr_t]]].reverse_iterator end() # Enumeration cdef extern from "fplll/enum/enumerate.h" namespace "fplll": cdef cppclass Enumeration[ZT, FT]: Enumeration(MatGSOInterface[ZT, FT]& gso, Evaluator[FT]& evaluator) Enumeration(MatGSOInterface[ZT, FT]& gso, FastEvaluator[FT]& evaluator) Enumeration(MatGSOInterface[ZT, FP_NR[mpfr_t]]& gso, ErrorBoundedEvaluator& evaluator) Enumeration(MatGSOInterface[ZT, FP_NR[mpfr_t]]& gso, FastErrorBoundedEvaluator& evaluator) void enumerate(int first, int last, FT& fMaxDist, long maxDistExpo, const vector[FT]& targetCoord, const vector[double]& subTree, const vector[double]& pruning) void enumerate(int first, int last, FT& fMaxDist, long maxDistExpo, const vector[FT]& targetCoord, const vector[double]& subTree, const vector[double]& pruning, int dual) void enumerate(int first, int last, FT& fMaxDist, long maxDistExpo, const vector[FT]& targetCoord, const vector[double]& subTree, const vector[double]& pruning, int dual, int subtree_reset) unsigned long get_nodes(int level) cdef extern from "fplll/enum/enumerate_ext.h" namespace "fplll": ctypedef void extenum_cb_set_config (double *mu, size_t mudim, bool mutranspose, double *rdiag, double *pruning) ctypedef double extenum_cb_process_sol(double dist, double *sol); ctypedef void extenum_cb_process_subsol(double dist, double *subsol, int offset); ctypedef unsigned long extenum_fc_enumerate(int dim, enumf maxdist, function[extenum_cb_set_config] cbfunc, function[extenum_cb_process_sol] cbsol, function[extenum_cb_process_subsol] cbsubsol, bool dual, bool findsubsols) void set_external_enumerator(function[extenum_fc_enumerate] extenum) function[extenum_fc_enumerate] get_external_enumerator() # SVP cdef extern from "fplll/svpcvp.h" namespace "fplll": int shortest_vector(ZZ_mat[mpz_t]& b, vector[Z_NR[mpz_t]] &sol_coord, SVPMethod method, int flags) nogil int shortest_vector_pruning(ZZ_mat[mpz_t]& b, vector[Z_NR[mpz_t]]& sol_coord, const vector[double]& pruning, int flags) nogil int shortest_vector_pruning(ZZ_mat[mpz_t]& b, vector[Z_NR[mpz_t]]& sol_coord, vector[vector[Z_NR[mpz_t]]]& auxsol_coord, vector[double]& auxsol_dist, const int max_aux_sols, const vector[double]& pruning, int flags) nogil int closest_vector(ZZ_mat[mpz_t] b, vector[Z_NR[mpz_t]] &intTarget, vector[Z_NR[mpz_t]]& sol_coord, CVPMethod method, int flags) nogil # BKZ cdef extern from "fplll/bkz_param.h" namespace "fplll": cdef cppclass PruningParams: double gh_factor vector[double] coefficients double expectation PrunerMetric metric vector[double] detailed_cost PruningParams() @staticmethod PruningParams LinearPruningParams(int block_size, int level) cdef cppclass Strategy: size_t block_size vector[PruningParams] pruning_parameters vector[size_t] preprocessing_block_sizes @staticmethod Strategy EmptyStrategy() PruningParams get_pruning(double radius, double gh) cdef cppclass BKZParam: BKZParam() nogil BKZParam(int block_size) nogil BKZParam(int block_size, vector[Strategy] strategies, double delta) nogil BKZParam(int block_size, vector[Strategy] strategies, double delta, int flags, int max_loops, int max_time, double auto_abort_scale, int auto_abort_max_no_dec) nogil BKZParam(int block_size, vector[Strategy] strategies, double delta, int flags, int max_loops, int max_time, double auto_abort_scale, int auto_abort_max_no_dec, double gh_factor) nogil int block_size double delta int flags int max_loops double max_time double auto_abort_scale int auto_abort_max_no_dec vector[Strategy] strategies double gh_factor double min_success_probability int rerandomization_density string dump_gso_filename vector[Strategy] load_strategies_json(const string &filename) except + nogil const string default_strategy_path() nogil const string default_strategy() nogil const string strategy_full_path(const string &strategy_path) nogil cdef extern from "fplll/bkz.h" namespace "fplll": cdef cppclass BKZReduction[ZT, FT]: BKZReduction(MatGSOInterface[ZT, FT] &m, LLLReduction[ZT, FT] &lll_obj, const BKZParam ¶m) nogil int svp_preprocessing(int kappa, int block_size, const BKZParam ¶m) nogil int svp_postprocessing(int kappa, int block_size, const vector[FT] &solution) nogil int svp_reduction(int kappa, int block_size, const BKZParam ¶m, int dual) except + nogil int tour(const int loop, int &kappa_max, const BKZParam ¶m, int min_row, int max_row) except + nogil int sd_tour(const int loop, const BKZParam ¶m, int min_row, int max_row) except + nogil int slide_tour(const int loop, const BKZParam ¶m, int min_row, int max_row) except + nogil int hkz(int &kappaMax, const BKZParam ¶m, int min_row, int max_row) except + nogil int bkz() void rerandomize_block(int min_row, int max_row, int density) except + nogil void dump_gso(const string filename, const string prefix, int append) except + nogil int status long nodes cdef cppclass BKZAutoAbort[ZT, FT]: BKZAutoAbort(MatGSOInterface[ZT, FT]& m, int num_rows) nogil BKZAutoAbort(MatGSOInterface[ZT, FT]& m, int num_rows, int start_row) nogil int test_abort() nogil int test_abort(double scale) nogil int test_abort(double scale, int max_no_dec) nogil void adjust_radius_to_gh_bound[FT](FT& max_dist, long max_dist_expo, int block_size, FT& root_det_mpfr, double gh_factor) nogil FT get_root_det[FT](MatGSOInterface[Z_NR[mpz_t], FT]& m, int start, int end) FT get_log_det[FT](MatGSOInterface[Z_NR[mpz_t], FT]& m, int start, int end) FT get_sld_potential[FT](MatGSOInterface[Z_NR[mpz_t], FT]& m, int start, int end, int block_size) double get_current_slope[FT](MatGSOInterface[Z_NR[mpz_t], FT]& m, int startRow, int stopRow) nogil # Utility cdef extern from "fplll/util.h" namespace "fplll": void vector_matrix_product(vector[Z_NR[mpz_t]] &result, vector[Z_NR[mpz_t]] &x, const ZZ_mat[mpz_t] &m) nogil # Pruner cdef extern from "fplll/pruner/pruner.h" namespace "fplll": cdef enum PrunerFlags: PRUNER_CVP PRUNER_START_FROM_INPUT PRUNER_GRADIENT PRUNER_NELDER_MEAD PRUNER_VERBOSE PRUNER_SINGLE PRUNER_HALF cdef enum PrunerMetric: PRUNER_METRIC_PROBABILITY_OF_SHORTEST PRUNER_METRIC_EXPECTED_SOLUTIONS cdef cppclass Pruner[FT]: Pruner(const int n) Pruner(const FT enumeration_radius, const FT preproc_cost, const vector[double] &gso_r) Pruner(const FT enumeration_radius, const FT preproc_cost, const vector[double] &gso_r, const FT target, const PrunerMetric metric, int flags) Pruner(const FT enumeration_radius, const FT preproc_cost, const vector[vector[double]] &gso_r) Pruner(const FT enumeration_radius, const FT preproc_cost, const vector[vector[double]] &gso_r, const FT target, const PrunerMetric metric, int flags) void optimize_coefficients(vector[double] &pr) void optimize_coefficients_cost_vary_prob(vector[double] &pr) void optimize_coefficients_cost_fixed_prob(vector[double] &pr) void optimize_coefficients_evec(vector[double] &pr) void optimize_coefficients_full(vector[double] &pr) double single_enum_cost(const vector[double] &pr, vector[double] *detailed_cost) double single_enum_cost(const vector[double] &pr) double repeated_enum_cost(const vector[double] &pr) double measure_metric(const vector[double] &pr) FT gaussian_heuristic() void prune[FT](PruningParams &pruning, const double enumeration_radius, const double preproc_cost, const vector[double] &gso_r) void prune[FT](PruningParams &pruning, const double enumeration_radius, const double preproc_cost, const vector[double] &gso_r, const double target, const PrunerMetric metric, const int flags) void prune[FT](PruningParams &pruning, const double enumeration_radius, const double preproc_cost, const vector[vector[double]] &gso_r) void prune[FT](PruningParams &pruning, const double enumeration_radius, const double preproc_cost, const vector[vector[double]] &gso_r, const double target, const PrunerMetric metric, const int flags) FT svp_probability[FT](const PruningParams &pruning) FT svp_probability[FT](const vector[double] &pr) # Threads cdef extern from "fplll/threadpool.h" namespace "fplll": int get_threads() int set_threads(int th) # Highlevel Functions cdef extern from "fplll/fplll.h" namespace "fplll": int lll_reduction(ZZ_mat[mpz_t] b, double delta, double eta, LLLMethod method, FloatType float_type, int precision, int flags) nogil int lll_reduction(ZZ_mat[mpz_t] b, ZZ_mat[mpz_t] u, double delta, double eta, LLLMethod method, FloatType float_type, int precision, int flags) nogil int bkz_reduction(ZZ_mat[mpz_t] *b, ZZ_mat[mpz_t] *u, BKZParam ¶m, FloatType float_type, int precision) nogil int bkz_reduction(ZZ_mat[mpz_t] *b, int block_size, int flags, FloatType float_type, int precision) nogil int hkz_reduction(ZZ_mat[mpz_t] b) nogil const char* get_red_status_str(int status) nogil fpylll-0.6.1/src/fpylll/fplll/gso.pxd000066400000000000000000000006541455321202600175340ustar00rootroot00000000000000# -*- coding: utf-8 -*- from .integer_matrix cimport IntegerMatrix from .decl cimport mat_gso_core_t, fplll_mat_gso_data_type_t, fplll_mat_gso_alg_type_t cdef class MatGSO: cdef fplll_mat_gso_data_type_t _type cdef fplll_mat_gso_alg_type_t _alg cdef mat_gso_core_t _core cdef readonly IntegerMatrix B cdef readonly IntegerMatrix _G cdef readonly IntegerMatrix U cdef readonly IntegerMatrix UinvT fpylll-0.6.1/src/fpylll/fplll/gso.pyx000066400000000000000000003274431455321202600175710ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Elementary basis operations, Gram matrix and Gram-Schmidt orthogonalization. .. moduleauthor:: Martin R. Albrecht A ``MatGSO`` object stores the following information: - The integral basis `B`, - the Gram-Schmidt coefficients `μ_{i,j} = `⟨b_i, b^*_j⟩ / ||b^*_j||^2` for `i>j`, and - the coefficients `r_{i,j} = ⟨b_i, b^*_j⟩` for `i≥j` It holds that: `B = R × Q = (μ × D) × (D^{-1} × B^*)` where `Q` is orthonormal and `R` is lower triangular. """ include "fpylll/config.pxi" from cysignals.signals cimport sig_on, sig_off from .decl cimport mat_gso_mpz_d, mat_gso_mpz_ld, mat_gso_mpz_dpe, mat_gso_mpz_mpfr from .decl cimport vector_fp_nr_t, vector_z_nr_t, z_nr_t, fp_nr_t, zz_mat_core_t from .decl cimport mat_gso_long_d, mat_gso_long_ld, mat_gso_long_dpe, mat_gso_long_mpfr from .decl cimport d_t from .decl cimport mat_gso_gso_t, mat_gso_gram_t from .fplll cimport FT_DOUBLE, FT_LONG_DOUBLE, FT_DPE, FT_MPFR, FloatType from .fplll cimport ZT_LONG, ZT_MPZ, IntType from .fplll cimport GSO_DEFAULT from .fplll cimport GSO_INT_GRAM from .fplll cimport GSO_OP_FORCE_LONG from .fplll cimport GSO_ROW_EXPO from .fplll cimport MatGSO as MatGSO_c, MatGSOGram as MatGSOGram_c, MatGSOInterface as MatGSOInterface_c from .fplll cimport Z_NR, FP_NR, Matrix from .fplll cimport dpe_t from .fplll cimport get_current_slope from fpylll.gmp.mpz cimport mpz_t from fpylll.mpfr.mpfr cimport mpfr_t from fpylll.util cimport preprocess_indices, check_float_type from fpylll.io cimport vector_fp_nr_barf, vector_fp_nr_slurp, vector_z_nr_slurp from fpylll.io cimport mpz_get_python from .integer_matrix cimport IntegerMatrix IF HAVE_LONG_DOUBLE: from .decl cimport ld_t IF HAVE_QD: from .decl cimport mat_gso_mpz_dd, mat_gso_mpz_qd, mat_gso_long_dd, mat_gso_long_qd, dd_t, qd_t from .fplll cimport FT_DD, FT_QD class MatGSORowOpContext(object): """ A context in which performing row operations is safe. When the context is left, the appropriate updates are performed by calling ``row_op_end()``. """ def __init__(self, M, i, j): """Construct new context for ``M[i:j]``. :param M: MatGSO object :param i: start row :param j: stop row """ self.i = i self.j = j self.M = M def __enter__(self): """ Enter context for working on rows. """ self.M.row_op_begin(self.i, self.j) return self def __exit__(self, exception_type, exception_value, exception_traceback): """ Exit context for working on rows. :param exception_type: :param exception_value: :param exception_traceback: """ self.M.row_op_end(self.i, self.j) return False cdef class MatGSO: """ MatGSO provides an interface for performing elementary operations on a basis and computing its Gram matrix and its Gram-Schmidt orthogonalization. The Gram-Schmidt coefficients are computed on demand. The object keeps track of which coefficients are valid after each row operation. """ def __init__(self, IntegerMatrix B, U=None, UinvT=None, int flags=GSO_DEFAULT, float_type="double", gram=False, update=False): """ :param IntegerMatrix B: The matrix on which row operations are performed. It must not be empty. :param IntegerMatrix U: If ``U`` is not empty, operations on ``B`` are also done on ``u`` (in this case both must have the same number of rows). If ``u`` is initially the identity matrix, multiplying transform by the initial basis gives the current basis. :param IntegerMatrix UinvT: Inverse transform (should be empty, which disables the computation, or initialized with identity matrix). It works only if ``U`` is not empty. :param int flags: Flags - ``GSO.INT_GRAM`` - If true, coefficients of the Gram matrix are computed with exact integer arithmetic. Otherwise, they are computed in floating-point. Note that when exact arithmetic is used, all coefficients of the first ``n_known_rows`` are continuously updated, whereas in floating-point, they are computed only on-demand. This option cannot be enabled when ``GSO.ROW_EXPO`` is set. - ``GSO.ROW_EXPO`` - If true, each row of ``B`` is normalized by a power of 2 before doing conversion to floating-point, which hopefully avoids some overflows. This option cannot be enabled if ``GSO.INT_GRAM`` is set and works only with ``float_type="double"`` and ``float_type="long double"``. It is useless and **must not** be used for ``float_type="dpe"``, ``float_type="dd"``, ``float_type="qd"`` or ``float_type=mpfr_t``. - ``GSO.OP_FORCE_LONG`` - Affects the behaviour of ``row_addmul``. See its documentation. :param float_type: A floating point type, i.e. an element of ``fpylll.config.float_types``. If ``float_type="mpfr"`` set precision with ``set_precision()`` before constructing this object and do not change the precision during the lifetime of this object. :param gram: The input ``B`` is a Gram matrix of the lattice, rather than a basis. :param update: Call ``update_gso()``. Note that matching integer types for ``B``, ``U`` and ``UinvT`` are enforced:: >>> from fpylll import IntegerMatrix, LLL, GSO >>> B = IntegerMatrix.random(5, 'uniform', bits = 8, int_type = "long") >>> M = GSO.Mat(B, U = IntegerMatrix.identity(B.nrows)) Traceback (most recent call last): ... TypeError: U.int_type != B.int_type >>> from fpylll import IntegerMatrix, LLL, GSO >>> B = IntegerMatrix.random(5, 'uniform', bits=8, int_type="long") >>> M = GSO.Mat(B, U = IntegerMatrix.identity(B.nrows, int_type="long")) """ if U is None: self.U = IntegerMatrix(0, 0) elif isinstance(U, IntegerMatrix): if U.nrows != B.nrows: raise ValueError("U.nrows != B.nrows") if U.int_type != B.int_type: raise TypeError("U.int_type != B.int_type") self.U = U else: raise TypeError("type of U (%s) not supported"%type(U)) if UinvT is None: self.UinvT = IntegerMatrix(0, 0) elif isinstance(UinvT, IntegerMatrix): if U is None: raise ValueError("Uinvt != None but U == None.") if UinvT.nrows != B.nrows: raise ValueError("UinvT.nrows != B.nrows") self.UinvT = UinvT if UinvT.int_type != B.int_type: raise TypeError("UinvT.int_type != B.int_type") else: raise TypeError("type of UinvT (%s) not supported"%type(UinvT)) cdef Matrix[Z_NR[mpz_t]] *b_m = B._core.mpz cdef Matrix[Z_NR[mpz_t]] *u_m = self.U._core.mpz cdef Matrix[Z_NR[mpz_t]] *u_inv_t_m = self.UinvT._core.mpz cdef Matrix[Z_NR[long]] *b_l = B._core.long cdef Matrix[Z_NR[long]] *u_l = self.U._core.long cdef Matrix[Z_NR[long]] *u_inv_t_l = self.UinvT._core.long cdef FloatType float_type_ = check_float_type(float_type) if gram: # Do some sanity checking that we're not getting a random non-Gram matrix # # This isn't sufficient but the full check would be too expensive. for i in range(B.nrows): if B._get(i, i) < 0: raise ValueError("Diagonal of input matrix has negative entries.") if not gram: self._alg = mat_gso_gso_t if B._type == ZT_MPZ: if float_type_ == FT_DOUBLE: self._type = mat_gso_mpz_d self._core.mpz_d = new MatGSO_c[Z_NR[mpz_t],FP_NR[d_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) elif float_type_ == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: self._type = mat_gso_mpz_ld self._core.mpz_ld = new MatGSO_c[Z_NR[mpz_t],FP_NR[ld_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) ELSE: raise ValueError("Float type '%s' not understood." % float_type) elif float_type_ == FT_DPE: self._type = mat_gso_mpz_dpe self._core.mpz_dpe = new MatGSO_c[Z_NR[mpz_t],FP_NR[dpe_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) elif float_type_ == FT_MPFR: self._type = mat_gso_mpz_mpfr self._core.mpz_mpfr = new MatGSO_c[Z_NR[mpz_t],FP_NR[mpfr_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) else: IF HAVE_QD: if float_type_ == FT_DD: self._type = mat_gso_mpz_dd self._core.mpz_dd = new MatGSO_c[Z_NR[mpz_t],FP_NR[dd_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) elif float_type_ == FT_QD: self._type = mat_gso_mpz_qd self._core.mpz_qd = new MatGSO_c[Z_NR[mpz_t],FP_NR[qd_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) elif B._type == ZT_LONG: if float_type_ == FT_DOUBLE: self._type = mat_gso_long_d self._core.long_d = new MatGSO_c[Z_NR[long],FP_NR[d_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) elif float_type_ == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: self._type = mat_gso_long_ld self._core.long_ld = new MatGSO_c[Z_NR[long],FP_NR[ld_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) ELSE: raise ValueError("Float type '%s' not understood." % float_type) elif float_type_ == FT_DPE: self._type = mat_gso_long_dpe self._core.long_dpe = new MatGSO_c[Z_NR[long],FP_NR[dpe_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) elif float_type_ == FT_MPFR: self._type = mat_gso_long_mpfr self._core.long_mpfr = new MatGSO_c[Z_NR[long],FP_NR[mpfr_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) else: IF HAVE_QD: if float_type_ == FT_DD: self._type = mat_gso_long_dd self._core.long_dd = new MatGSO_c[Z_NR[long],FP_NR[dd_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) elif float_type_ == FT_QD: self._type = mat_gso_long_qd self._core.long_qd = new MatGSO_c[Z_NR[long],FP_NR[qd_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) self.B = B else: flags |= GSO_INT_GRAM self._alg = mat_gso_gram_t if B._type == ZT_MPZ: if float_type_ == FT_DOUBLE: self._type = mat_gso_mpz_d self._core.mpz_d = new MatGSOGram_c[Z_NR[mpz_t],FP_NR[d_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) elif float_type_ == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: self._type = mat_gso_mpz_ld self._core.mpz_ld = new MatGSOGram_c[Z_NR[mpz_t],FP_NR[ld_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) ELSE: raise ValueError("Float type '%s' not understood." % float_type) elif float_type_ == FT_DPE: self._type = mat_gso_mpz_dpe self._core.mpz_dpe = new MatGSOGram_c[Z_NR[mpz_t],FP_NR[dpe_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) elif float_type_ == FT_MPFR: self._type = mat_gso_mpz_mpfr self._core.mpz_mpfr = new MatGSOGram_c[Z_NR[mpz_t],FP_NR[mpfr_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) else: IF HAVE_QD: if float_type_ == FT_DD: self._type = mat_gso_mpz_dd self._core.mpz_dd = new MatGSOGram_c[Z_NR[mpz_t],FP_NR[dd_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) elif float_type_ == FT_QD: self._type = mat_gso_mpz_qd self._core.mpz_qd = new MatGSOGram_c[Z_NR[mpz_t],FP_NR[qd_t]](b_m[0], u_m[0], u_inv_t_m[0], flags) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) elif B._type == ZT_LONG: if float_type_ == FT_DOUBLE: self._type = mat_gso_long_d self._core.long_d = new MatGSOGram_c[Z_NR[long],FP_NR[d_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) elif float_type_ == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: self._type = mat_gso_long_ld self._core.long_ld = new MatGSOGram_c[Z_NR[long],FP_NR[ld_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) ELSE: raise ValueError("Float type '%s' not understood." % float_type) elif float_type_ == FT_DPE: self._type = mat_gso_long_dpe self._core.long_dpe = new MatGSOGram_c[Z_NR[long],FP_NR[dpe_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) elif float_type_ == FT_MPFR: self._type = mat_gso_long_mpfr self._core.long_mpfr = new MatGSOGram_c[Z_NR[long],FP_NR[mpfr_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) else: IF HAVE_QD: if float_type_ == FT_DD: self._type = mat_gso_long_dd self._core.long_dd = new MatGSOGram_c[Z_NR[long],FP_NR[dd_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) elif float_type_ == FT_QD: self._type = mat_gso_long_qd self._core.long_qd = new MatGSOGram_c[Z_NR[long],FP_NR[qd_t]](b_l[0], u_l[0], u_inv_t_l[0], flags) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) self._G = B if update: self.update_gso() def __dealloc__(self): # We are making sure the correct destructor is called, even when it's not virtual, by explicit casting cdef MatGSO_c[Z_NR[long], FP_NR[double]]* gso_long_d IF HAVE_LONG_DOUBLE: cdef MatGSO_c[Z_NR[long], FP_NR[ld_t]]* gso_long_ld cdef MatGSO_c[Z_NR[long], FP_NR[dpe_t]]* gso_long_dpe IF HAVE_QD: cdef MatGSO_c[Z_NR[long], FP_NR[dd_t]]* gso_long_dd cdef MatGSO_c[Z_NR[long], FP_NR[qd_t]]* gso_long_qd cdef MatGSO_c[Z_NR[long], FP_NR[mpfr_t]]* gso_long_mpfr cdef MatGSO_c[Z_NR[mpz_t], FP_NR[double]]* gso_mpz_d IF HAVE_LONG_DOUBLE: cdef MatGSO_c[Z_NR[mpz_t], FP_NR[ld_t]]* gso_mpz_ld cdef MatGSO_c[Z_NR[mpz_t], FP_NR[dpe_t]]* gso_mpz_dpe IF HAVE_QD: cdef MatGSO_c[Z_NR[mpz_t], FP_NR[dd_t]]* gso_mpz_dd cdef MatGSO_c[Z_NR[mpz_t], FP_NR[qd_t]]* gso_mpz_qd cdef MatGSO_c[Z_NR[mpz_t], FP_NR[mpfr_t]]* gso_mpz_mpfr cdef MatGSOGram_c[Z_NR[long], FP_NR[double]]* gram_long_d IF HAVE_LONG_DOUBLE: cdef MatGSOGram_c[Z_NR[long], FP_NR[ld_t]]* gram_long_ld cdef MatGSOGram_c[Z_NR[long], FP_NR[dpe_t]]* gram_long_dpe IF HAVE_QD: cdef MatGSOGram_c[Z_NR[long], FP_NR[dd_t]]* gram_long_dd cdef MatGSOGram_c[Z_NR[long], FP_NR[qd_t]]* gram_long_qd cdef MatGSOGram_c[Z_NR[long], FP_NR[mpfr_t]]* gram_long_mpfr cdef MatGSOGram_c[Z_NR[mpz_t], FP_NR[double]]* gram_mpz_d IF HAVE_LONG_DOUBLE: cdef MatGSOGram_c[Z_NR[mpz_t], FP_NR[ld_t]]* gram_mpz_ld cdef MatGSOGram_c[Z_NR[mpz_t], FP_NR[dpe_t]]* gram_mpz_dpe IF HAVE_QD: cdef MatGSOGram_c[Z_NR[mpz_t], FP_NR[dd_t]]* gram_mpz_dd cdef MatGSOGram_c[Z_NR[mpz_t], FP_NR[qd_t]]* gram_mpz_qd cdef MatGSOGram_c[Z_NR[mpz_t], FP_NR[mpfr_t]]* gram_mpz_mpfr if self._alg == mat_gso_gso_t: if self._type == mat_gso_long_d: gso_long_d = self._core.long_d del gso_long_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: gso_long_ld = self._core.long_ld del gso_long_ld if self._type == mat_gso_long_dpe: gso_long_dpe = self._core.long_dpe del gso_long_dpe IF HAVE_QD: if self._type == mat_gso_long_dd: gso_long_dd = self._core.long_dd del gso_long_dd if self._type == mat_gso_long_qd: gso_long_qd = self._core.long_qd del gso_long_qd if self._type == mat_gso_long_mpfr: gso_long_mpfr = self._core.long_mpfr del gso_long_mpfr if self._type == mat_gso_mpz_d: gso_mpz_d = self._core.mpz_d del gso_mpz_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: gso_mpz_ld = self._core.mpz_ld del gso_mpz_ld if self._type == mat_gso_mpz_dpe: gso_mpz_dpe = self._core.mpz_dpe del gso_mpz_dpe IF HAVE_QD: if self._type == mat_gso_mpz_dd: gso_mpz_dd = self._core.mpz_dd del gso_mpz_dd if self._type == mat_gso_mpz_qd: gso_mpz_qd = self._core.mpz_qd del gso_mpz_qd if self._type == mat_gso_mpz_mpfr: gso_mpz_mpfr = self._core.mpz_mpfr del gso_mpz_mpfr elif self._alg == mat_gso_gram_t: if self._type == mat_gso_long_d: gram_long_d = self._core.long_d del gram_long_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: gram_long_ld = self._core.long_ld del gram_long_ld if self._type == mat_gso_long_dpe: gram_long_dpe = self._core.long_dpe del gram_long_dpe IF HAVE_QD: if self._type == mat_gso_long_dd: gram_long_dd = self._core.long_dd del gram_long_dd if self._type == mat_gso_long_qd: gram_long_qd = self._core.long_qd del gram_long_qd if self._type == mat_gso_long_mpfr: gram_long_mpfr = self._core.long_mpfr del gram_long_mpfr if self._type == mat_gso_mpz_d: gram_mpz_d = self._core.mpz_d del gram_mpz_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: gram_mpz_ld = self._core.mpz_ld del gram_mpz_ld if self._type == mat_gso_mpz_dpe: gram_mpz_dpe = self._core.mpz_dpe del gram_mpz_dpe IF HAVE_QD: if self._type == mat_gso_mpz_dd: gram_mpz_dd = self._core.mpz_dd del gram_mpz_dd if self._type == mat_gso_mpz_qd: gram_mpz_qd = self._core.mpz_qd del gram_mpz_qd if self._type == mat_gso_mpz_mpfr: gram_mpz_mpfr = self._core.mpz_mpfr del gram_mpz_mpfr def __reduce__(self): """ Make sure attempts at pickling raise an error until proper pickling is implemented. """ raise NotImplementedError @property def G(self): """ Return the Gram matrix. - If this GSO object operates on a Gram matrix, return that. - If this GSO object operates on a basis with ``GSO.INT_GRAM`` set, construct the Gram matrix and return it - Otherwise, a ``NotImplementedError`` is raised >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(10, "qary", k=5, bits=10) >>> M = GSO.Mat(A, flags=GSO.INT_GRAM); _ = M.update_gso() >>> G = M.G >>> print(G) [ 2176 0 0 0 0 0 0 0 0 0 ] [ 1818 4659 0 0 0 0 0 0 0 0 ] [ 2695 5709 7416 0 0 0 0 0 0 0 ] [ 2889 5221 7077 7399 0 0 0 0 0 0 ] [ 2746 3508 4717 4772 4618 0 0 0 0 0 ] [ 2332 1590 2279 2332 2597 2809 0 0 0 0 ] [ 265 1749 2491 2438 0 0 2809 0 0 0 ] [ 159 265 212 1219 318 0 0 2809 0 0 ] [ 742 636 1537 2067 1802 0 0 0 2809 0 ] [ 159 2650 2650 1908 1696 0 0 0 0 2809 ] >>> A[0].norm()**2 2176.0 >>> M = GSO.Mat(G, gram=True); _ = M.update_gso() >>> G == M.G True >>> M = GSO.Mat(A) >>> M.G Traceback (most recent call last): ... NotImplementedError: Computing the Gram Matrix currently requires GSO.INT_GRAM """ cdef long i, j if self._alg == mat_gso_gram_t: return self._G elif self.int_gram_enabled: if self.int_type == "mpz": G = IntegerMatrix(self.d, self.d, int_type="mpz") if self._type == mat_gso_mpz_d: for i in range(self.d): for j in range(self.d): G[i, j] = mpz_get_python(self._core.mpz_d.get_g_matrix()[i][j].get_data()) elif self._type == mat_gso_mpz_dpe: for i in range(self.d): for j in range(self.d): G[i, j] = mpz_get_python(self._core.mpz_dpe.get_g_matrix()[i][j].get_data()) elif self._type == mat_gso_mpz_mpfr: for i in range(self.d): for j in range(self.d): G[i, j] = mpz_get_python(self._core.mpz_mpfr.get_g_matrix()[i][j].get_data()) else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: for i in range(self.d): for j in range(self.d): G[i, j] = mpz_get_python(self._core.mpz_dd.get_g_matrix()[i][j].get_data()) elif self._type == mat_gso_mpz_qd: for i in range(self.d): for j in range(self.d): G[i, j] = mpz_get_python(self._core.mpz_qd.get_g_matrix()[i][j].get_data()) else: raise RuntimeError("MatGSO object '%s' has no core."%self) ELIF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: for i in range(self.d): for j in range(self.d): G[i, j] = mpz_get_python(self._core.mpz_ld.get_g_matrix()[i][j].get_data()) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) return G elif self.int_type == "long": G = IntegerMatrix(self.d, self.d, int_type="long") if self._type == mat_gso_long_d: for i in range(self.d): for j in range(self.d): G[i, j] = self._core.long_d.get_g_matrix()[i][j].get_data() elif self._type == mat_gso_long_dpe: for i in range(self.d): for j in range(self.d): G[i, j] = self._core.long_dpe.get_g_matrix()[i][j].get_data() elif self._type == mat_gso_long_mpfr: for i in range(self.d): for j in range(self.d): G[i, j] = self._core.long_mpfr.get_g_matrix()[i][j].get_data() else: IF HAVE_QD: if self._type == mat_gso_long_dd: for i in range(self.d): for j in range(self.d): G[i, j] = self._core.long_dd.get_g_matrix()[i][j].get_data() elif self._type == mat_gso_long_qd: for i in range(self.d): for j in range(self.d): G[i, j] = self._core.long_qd.get_g_matrix()[i][j].get_data() else: raise RuntimeError("MatGSO object '%s' has no core."%self) ELIF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: for i in range(self.d): for j in range(self.d): G[i, j] = self._core.long_ld.get_g_matrix()[i][j].get_data() ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) return G else: raise NotImplementedError("Computing the Gram Matrix currently requires GSO.INT_GRAM") @property def float_type(self): """ >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(10, 10) >>> M = GSO.Mat(A) >>> M.float_type 'double' >>> FPLLL.set_precision(100) 53 >>> M = GSO.Mat(A, float_type='mpfr') >>> M.float_type 'mpfr' """ if self._type == mat_gso_mpz_d or self._type == mat_gso_long_d: return "double" IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld or self._type == mat_gso_long_ld: return "long double" if self._type == mat_gso_mpz_dpe or self._type == mat_gso_long_dpe: return "dpe" IF HAVE_QD: if self._type == mat_gso_mpz_dd or self._type == mat_gso_long_dd: return "dd" if self._type == mat_gso_mpz_qd or self._type == mat_gso_long_qd: return "qd" if self._type == mat_gso_mpz_mpfr or self._type == mat_gso_long_mpfr: return "mpfr" raise RuntimeError("MatGSO object '%s' has no core."%self) @property def int_type(self): """ """ if self._type in (mat_gso_mpz_d, mat_gso_mpz_dpe, mat_gso_mpz_mpfr): return "mpz" elif self._type in (mat_gso_long_d, mat_gso_long_dpe, mat_gso_long_mpfr): return "long" IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return "mpz" elif self._type == mat_gso_long_ld: return "long" IF HAVE_QD: if self._type in (mat_gso_mpz_dd, mat_gso_mpz_qd): return "mpz" elif self._type in (mat_gso_long_dd, mat_gso_long_qd): return "long" raise RuntimeError("MatGSO object '%s' has no core."%self) @property def d(self): """ Number of rows of ``B`` (dimension of the lattice). >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(11, 11) >>> M = GSO.Mat(A) >>> M.d 11 """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.d if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.d IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.d if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.d if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.d if self._type == mat_gso_long_d: return self._core.long_d.d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.d if self._type == mat_gso_long_dpe: return self._core.long_dpe.d IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.d if self._type == mat_gso_long_qd: return self._core.long_qd.d if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.d raise RuntimeError("MatGSO object '%s' has no core."%self) @property def int_gram_enabled(self): """ Exact computation of dot products. >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(11, 11) >>> M = GSO.Mat(A) >>> M.int_gram_enabled False >>> M = GSO.Mat(A, flags=GSO.INT_GRAM) >>> M.int_gram_enabled True """ if self._type == mat_gso_mpz_d: return bool(self._core.mpz_d.enable_int_gram) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return bool(self._core.mpz_ld.enable_int_gram) if self._type == mat_gso_mpz_dpe: return bool(self._core.mpz_dpe.enable_int_gram) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return bool(self._core.mpz_dd.enable_int_gram) if self._type == mat_gso_mpz_qd: return bool(self._core.mpz_qd.enable_int_gram) if self._type == mat_gso_mpz_mpfr: return bool(self._core.mpz_mpfr.enable_int_gram) if self._type == mat_gso_long_d: return bool(self._core.long_d.enable_int_gram) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return bool(self._core.long_ld.enable_int_gram) if self._type == mat_gso_long_dpe: return bool(self._core.long_dpe.enable_int_gram) IF HAVE_QD: if self._type == mat_gso_long_dd: return bool(self._core.long_dd.enable_int_gram) if self._type == mat_gso_long_qd: return bool(self._core.long_qd.enable_int_gram) if self._type == mat_gso_long_mpfr: return bool(self._core.long_mpfr.enable_int_gram) raise RuntimeError("MatGSO object '%s' has no core."%self) @property def row_expo_enabled(self): """ Normalization of each row of b by a power of 2. >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(11, 11) >>> M = GSO.Mat(A) >>> M.row_expo_enabled False >>> M = GSO.Mat(A, flags=GSO.ROW_EXPO) >>> M.row_expo_enabled True """ if self._type == mat_gso_mpz_d: return bool(self._core.mpz_d.enable_row_expo) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return bool(self._core.mpz_ld.enable_row_expo) if self._type == mat_gso_mpz_dpe: return bool(self._core.mpz_dpe.enable_row_expo) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return bool(self._core.mpz_dd.enable_row_expo) if self._type == mat_gso_mpz_qd: return bool(self._core.mpz_qd.enable_row_expo) if self._type == mat_gso_mpz_mpfr: return bool(self._core.mpz_mpfr.enable_row_expo) if self._type == mat_gso_long_d: return bool(self._core.long_d.enable_row_expo) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return bool(self._core.long_ld.enable_row_expo) if self._type == mat_gso_long_dpe: return bool(self._core.long_dpe.enable_row_expo) IF HAVE_QD: if self._type == mat_gso_long_dd: return bool(self._core.long_dd.enable_row_expo) if self._type == mat_gso_long_qd: return bool(self._core.long_qd.enable_row_expo) if self._type == mat_gso_long_mpfr: return bool(self._core.long_mpfr.enable_row_expo) raise RuntimeError("MatGSO object '%s' has no core."%self) @property def transform_enabled(self): """ Computation of the transform matrix. >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(11, 11) >>> M = GSO.Mat(A) >>> M.transform_enabled False >>> U = IntegerMatrix.identity(11) >>> M = GSO.Mat(A, U=U) >>> M.transform_enabled True """ if self._type == mat_gso_mpz_d: return bool(self._core.mpz_d.enable_transform) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return bool(self._core.mpz_ld.enable_transform) if self._type == mat_gso_mpz_dpe: return bool(self._core.mpz_dpe.enable_transform) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return bool(self._core.mpz_dd.enable_transform) if self._type == mat_gso_mpz_qd: return bool(self._core.mpz_qd.enable_transform) if self._type == mat_gso_mpz_mpfr: return bool(self._core.mpz_mpfr.enable_transform) if self._type == mat_gso_long_d: return bool(self._core.long_d.enable_transform) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return bool(self._core.long_ld.enable_transform) if self._type == mat_gso_long_dpe: return bool(self._core.long_dpe.enable_transform) IF HAVE_QD: if self._type == mat_gso_long_dd: return bool(self._core.long_dd.enable_transform) if self._type == mat_gso_long_qd: return bool(self._core.long_qd.enable_transform) if self._type == mat_gso_long_mpfr: return bool(self._core.long_mpfr.enable_transform) raise RuntimeError("MatGSO object '%s' has no core."%self) @property def inverse_transform_enabled(self): """ Computation of the inverse transform matrix (transposed). >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(11, 11) >>> M = GSO.Mat(A) >>> M.inverse_transform_enabled False >>> U = IntegerMatrix.identity(11) >>> UinvT = IntegerMatrix.identity(11) >>> M = GSO.Mat(A, U=U, UinvT=UinvT) >>> M.inverse_transform_enabled True """ if self._type == mat_gso_mpz_d: return bool(self._core.mpz_d.enable_inverse_transform) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return bool(self._core.mpz_ld.enable_inverse_transform) if self._type == mat_gso_mpz_dpe: return bool(self._core.mpz_dpe.enable_inverse_transform) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return bool(self._core.mpz_dd.enable_inverse_transform) if self._type == mat_gso_mpz_qd: return bool(self._core.mpz_qd.enable_inverse_transform) if self._type == mat_gso_mpz_mpfr: return bool(self._core.mpz_mpfr.enable_inverse_transform) if self._type == mat_gso_long_d: return bool(self._core.long_d.enable_inverse_transform) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return bool(self._core.long_ld.enable_inverse_transform) if self._type == mat_gso_long_dpe: return bool(self._core.long_dpe.enable_inverse_transform) IF HAVE_QD: if self._type == mat_gso_long_dd: return bool(self._core.long_dd.enable_inverse_transform) if self._type == mat_gso_long_qd: return bool(self._core.long_qd.enable_inverse_transform) if self._type == mat_gso_long_mpfr: return bool(self._core.long_mpfr.enable_inverse_transform) raise RuntimeError("MatGSO object '%s' has no core."%self) @property def row_op_force_long(self): """ Changes the behaviour of ``row_addmul``, see its documentation. >>> from fpylll import IntegerMatrix, GSO, FPLLL >>> A = IntegerMatrix(11, 11) >>> M = GSO.Mat(A) >>> M.row_op_force_long False >>> M = GSO.Mat(A, flags=GSO.OP_FORCE_LONG) >>> M.row_op_force_long True """ if self._type == mat_gso_mpz_d: return bool(self._core.mpz_d.row_op_force_long) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return bool(self._core.mpz_ld.row_op_force_long) if self._type == mat_gso_mpz_dpe: return bool(self._core.mpz_dpe.row_op_force_long) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return bool(self._core.mpz_dd.row_op_force_long) if self._type == mat_gso_mpz_qd: return bool(self._core.mpz_qd.row_op_force_long) if self._type == mat_gso_mpz_mpfr: return bool(self._core.mpz_mpfr.row_op_force_long) if self._type == mat_gso_long_d: return bool(self._core.long_d.row_op_force_long) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return bool(self._core.long_ld.row_op_force_long) if self._type == mat_gso_long_dpe: return bool(self._core.long_dpe.row_op_force_long) IF HAVE_QD: if self._type == mat_gso_long_dd: return bool(self._core.long_dd.row_op_force_long) if self._type == mat_gso_long_qd: return bool(self._core.long_qd.row_op_force_long) if self._type == mat_gso_long_mpfr: return bool(self._core.long_mpfr.row_op_force_long) raise RuntimeError("MatGSO object '%s' has no core."%self) def row_op_begin(self, int first, int last): """ Must be called before a sequence of ``row_addmul``. :param int first: start index for ``row_addmul`` operations. :param int last: final index (exclusive). .. note:: It is preferable to use ``MatGSORowOpContext`` via ``row_ops``. """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.row_op_begin(first, last) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.row_op_begin(first, last) if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.row_op_begin(first, last) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.row_op_begin(first, last) if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.row_op_begin(first, last) if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.row_op_begin(first, last) if self._type == mat_gso_long_d: return self._core.long_d.row_op_begin(first, last) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.row_op_begin(first, last) if self._type == mat_gso_long_dpe: return self._core.long_dpe.row_op_begin(first, last) IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.row_op_begin(first, last) if self._type == mat_gso_long_qd: return self._core.long_qd.row_op_begin(first, last) if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.row_op_begin(first, last) raise RuntimeError("MatGSO object '%s' has no core."%self) def row_op_end(self, int first, int last): """ Must be called after a sequence of ``row_addmul``. This invalidates the `i`-th line of the GSO. :param int first: start index to invalidate. :param int last: final index to invalidate (exclusive). .. note:: It is preferable to use ``MatGSORowOpContext`` via ``row_ops``. """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.row_op_end(first, last) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.row_op_end(first, last) if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.row_op_end(first, last) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.row_op_end(first, last) if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.row_op_end(first, last) if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.row_op_end(first, last) if self._type == mat_gso_long_d: return self._core.long_d.row_op_end(first, last) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.row_op_end(first, last) if self._type == mat_gso_long_dpe: return self._core.long_dpe.row_op_end(first, last) IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.row_op_end(first, last) if self._type == mat_gso_long_qd: return self._core.long_qd.row_op_end(first, last) if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.row_op_end(first, last) raise RuntimeError("MatGSO object '%s' has no core."%self) def row_ops(self, int first, int last): """Return context in which ``row_addmul`` operations are safe. :param int first: start index. :param int last: final index (exclusive). """ return MatGSORowOpContext(self, first, last) def get_gram(self, int i, int j): """ Return Gram matrix coefficients (0 ≤ i ≤ ``n_known_rows`` and 0 ≤ j ≤ i). If ``enable_row_expo`` is false, returns the dot product `⟨b_i, b_j⟩`. If ``enable_row_expo`` is true, returns `⟨b_i, b_j⟩/ 2^{(r_i + r_j)}`, where `r_i` and `r_j` are the row exponents of rows `i` and `j` respectively. :param int i: 0 ≤ i < d :param int j: 0 ≤ j ≤ i """ preprocess_indices(i, j, self.d, self.d) cdef fp_nr_t t # TODO: don't just return doubles if self._type == mat_gso_mpz_d: return self._core.mpz_d.get_gram(t.d, i, j).get_d() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.get_gram(t.ld, i, j).get_d() if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.get_gram(t.dpe, i, j).get_d() IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.get_gram(t.dd, i, j).get_d() if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.get_gram(t.qd, i, j).get_d() if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.get_gram(t.mpfr, i, j).get_d() if self._type == mat_gso_long_d: return self._core.long_d.get_gram(t.d, i, j).get_d() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.get_gram(t.ld, i, j).get_d() if self._type == mat_gso_long_dpe: return self._core.long_dpe.get_gram(t.dpe, i, j).get_d() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.get_gram(t.dd, i, j).get_d() if self._type == mat_gso_long_qd: return self._core.long_qd.get_gram(t.qd, i, j).get_d() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.get_gram(t.mpfr, i, j).get_d() raise RuntimeError("MatGSO object '%s' has no core."%self) def get_int_gram(self, int i, int j): """ Return *integer* Gram matrix coefficients (0 ≤ i ≤ ``n_known_rows`` and 0 ≤ j ≤ i). If ``enable_row_expo`` is false, returns the dot product `⟨b_i, b_j⟩`. :param int i: 0 ≤ i < d :param int j: 0 ≤ j ≤ i """ preprocess_indices(i, j, self.d, self.d) cdef z_nr_t t if self._type == mat_gso_mpz_d: self._core.mpz_d.get_int_gram(t.mpz, i, j) return mpz_get_python(t.mpz.get_data()) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: self._core.mpz_ld.get_int_gram(t.mpz, i, j) return mpz_get_python(t.mpz.get_data()) if self._type == mat_gso_mpz_dpe: self._core.mpz_dpe.get_int_gram(t.mpz, i, j) return mpz_get_python(t.mpz.get_data()) IF HAVE_QD: if self._type == mat_gso_mpz_dd: self._core.mpz_dd.get_int_gram(t.mpz, i, j) return mpz_get_python(t.mpz.get_data()) if self._type == mat_gso_mpz_qd: self._core.mpz_qd.get_int_gram(t.mpz, i, j) return mpz_get_python(t.mpz.get_data()) if self._type == mat_gso_mpz_mpfr: self._core.mpz_mpfr.get_int_gram(t.mpz, i, j) return mpz_get_python(t.mpz.get_data()) if self._type == mat_gso_long_d: return self._core.long_d.get_int_gram(t.long, i, j).get_data() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.get_int_gram(t.long, i, j).get_data() if self._type == mat_gso_long_dpe: return self._core.long_dpe.get_int_gram(t.long, i, j).get_data() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.get_int_gram(t.long, i, j).get_data() if self._type == mat_gso_long_qd: return self._core.long_qd.get_int_gram(t.long, i, j).get_data() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.get_int_gram(t.long, i, j).get_data() raise RuntimeError("MatGSO object '%s' has no core."%self) def get_r(self, int i, int j): """ Return `⟨b_i, b*_j⟩`. :param i: :param j: >>> from fpylll import * >>> FPLLL.set_random_seed(0) >>> A = IntegerMatrix.random(5, "uniform", bits=5) >>> M = GSO.Mat(A) >>> M.update_gso() True >>> M.get_r(1, 0) 1396.0 """ preprocess_indices(i, j, self.d, self.d) cdef fp_nr_t t # TODO: don't just return doubles if self._type == mat_gso_mpz_d: return self._core.mpz_d.get_r(t.d, i, j).get_d() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.get_r(t.ld, i, j).get_d() if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.get_r(t.dpe, i, j).get_d() IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.get_r(t.dd, i, j).get_d() if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.get_r(t.qd, i, j).get_d() if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.get_r(t.mpfr, i, j).get_d() if self._type == mat_gso_long_d: return self._core.long_d.get_r(t.d, i, j).get_d() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.get_r(t.ld, i, j).get_d() if self._type == mat_gso_long_dpe: return self._core.long_dpe.get_r(t.dpe, i, j).get_d() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.get_r(t.dd, i, j).get_d() if self._type == mat_gso_long_qd: return self._core.long_qd.get_r(t.qd, i, j).get_d() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.get_r(t.mpfr, i, j).get_d() raise RuntimeError("MatGSO object '%s' has no core."%self) def get_r_exp(self, int i, int j): """ Return `f = r_{i, j}` and exponent `x` such that `⟨b_i, b^*_j⟩ = f ⋅ 2^x`. If ``enable_row_expo`` is false, `x` is always 0. If ``enable_row_expo`` is true, `x = r_i + r_j`, where `r_i` and `r_j` are the row exponents of rows `i` and `j` respectively. .. note:: It is assumed that `r(i, j)` is valid. :param i: :param j: """ preprocess_indices(i, j, self.d, self.d) cdef double r = 0.0 cdef long expo = 0 # TODO: don't just return doubles if self._type == mat_gso_mpz_d: r = self._core.mpz_d.get_r_exp(i, j, expo).get_data() return r, expo IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: r = self._core.mpz_ld.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_mpz_dpe: r = self._core.mpz_dpe.get_r_exp(i, j, expo).get_d() return r, expo IF HAVE_QD: if self._type == mat_gso_mpz_dd: r = self._core.mpz_dd.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_mpz_qd: r = self._core.mpz_qd.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_mpz_mpfr: r = self._core.mpz_mpfr.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_d: r = self._core.long_d.get_r_exp(i, j, expo).get_data() return r, expo IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: r = self._core.long_ld.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_dpe: r = self._core.long_dpe.get_r_exp(i, j, expo).get_d() return r, expo IF HAVE_QD: if self._type == mat_gso_long_dd: r = self._core.long_dd.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_qd: r = self._core.long_qd.get_r_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_mpfr: r = self._core.long_mpfr.get_r_exp(i, j, expo).get_d() return r, expo raise RuntimeError("MatGSO object '%s' has no core."%self) def get_mu(self, int i, int j): """ Return ` / ||b^*_j||^2`. :param i: :param j: """ preprocess_indices(i, j, self.d, self.d) cdef fp_nr_t t # TODO: don't just return doubles if self._type == mat_gso_mpz_d: return self._core.mpz_d.get_mu(t.d, i, j).get_d() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.get_mu(t.ld, i, j).get_d() if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.get_mu(t.dpe, i, j).get_d() IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.get_mu(t.dd, i, j).get_d() if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.get_mu(t.qd, i, j).get_d() if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.get_mu(t.mpfr, i, j).get_d() if self._type == mat_gso_long_d: return self._core.long_d.get_mu(t.d, i, j).get_d() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.get_mu(t.ld, i, j).get_d() if self._type == mat_gso_long_dpe: return self._core.long_dpe.get_mu(t.dpe, i, j).get_d() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.get_mu(t.dd, i, j).get_d() if self._type == mat_gso_long_qd: return self._core.long_qd.get_mu(t.qd, i, j).get_d() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.get_mu(t.mpfr, i, j).get_d() raise RuntimeError("MatGSO object '%s' has no core."%self) def get_mu_exp(self, int i, int j): """ Return `f = μ_{i, j}` and exponent `x` such that `f ⋅ 2^x = ⟨b_i, b^*_j⟩ / ‖b^*_j‖^2`. If ``enable_row_expo`` is false, `x` is always zero. If ``enable_row_expo`` is true, `x = r_i - r_j`, where `r_i` and `r_j` are the row exponents of rows `i` and `j` respectively. .. note:: It is assumed that `μ_{i, j}` is valid. :param i: :param j: """ preprocess_indices(i, j, self.d, self.d) cdef double r = 0.0 cdef long expo = 0 # TODO: don't just return doubles if self._type == mat_gso_mpz_d: r = self._core.mpz_d.get_mu_exp(i, j, expo).get_data() return r, expo IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: r = self._core.mpz_ld.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_mpz_dpe: r = self._core.mpz_dpe.get_mu_exp(i, j, expo).get_d() return r, expo IF HAVE_QD: if self._type == mat_gso_mpz_dd: r = self._core.mpz_dd.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_mpz_qd: r = self._core.mpz_qd.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_mpz_mpfr: r = self._core.mpz_mpfr.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_d: r = self._core.long_d.get_mu_exp(i, j, expo).get_data() return r, expo IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: r = self._core.long_ld.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_dpe: r = self._core.long_dpe.get_mu_exp(i, j, expo).get_d() return r, expo IF HAVE_QD: if self._type == mat_gso_long_dd: r = self._core.long_dd.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_qd: r = self._core.long_qd.get_mu_exp(i, j, expo).get_d() return r, expo if self._type == mat_gso_long_mpfr: r = self._core.long_mpfr.get_mu_exp(i, j, expo).get_d() return r, expo raise RuntimeError("MatGSO object '%s' has no core."%self) def update_gso(self): """ Updates all GSO coefficients (`μ` and `r`). """ cdef int r if self._type == mat_gso_mpz_d: with nogil: r = self._core.mpz_d.update_gso() return bool(r) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: with nogil: r = self._core.mpz_ld.update_gso() return bool(r) if self._type == mat_gso_mpz_dpe: with nogil: r = self._core.mpz_dpe.update_gso() return bool(r) IF HAVE_QD: if self._type == mat_gso_mpz_dd: with nogil: r = self._core.mpz_dd.update_gso() return bool(r) if self._type == mat_gso_mpz_qd: with nogil: r = self._core.mpz_qd.update_gso() return bool(r) if self._type == mat_gso_mpz_mpfr: with nogil: r = self._core.mpz_mpfr.update_gso() return bool(r) if self._type == mat_gso_long_d: with nogil: r = self._core.long_d.update_gso() return bool(r) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: with nogil: r = self._core.long_ld.update_gso() return bool(r) if self._type == mat_gso_long_dpe: with nogil: r = self._core.long_dpe.update_gso() return bool(r) IF HAVE_QD: if self._type == mat_gso_long_dd: with nogil: r = self._core.long_dd.update_gso() return bool(r) if self._type == mat_gso_long_qd: with nogil: r = self._core.long_qd.update_gso() return bool(r) if self._type == mat_gso_long_mpfr: with nogil: r = self._core.long_mpfr.update_gso() return bool(r) raise RuntimeError("MatGSO object '%s' has no core."%self) def update_gso_row(self, int i, int last_j): """ Updates `r_{i, j}` and `μ_{i, j}` if needed for all `j` in `[0, last_j]`. All coefficients of `r` and `μ` above the `i`-th row in columns `[0, min(last_j, i - 1)]` must be valid. :param int i: :param int last_j: """ if self._type == mat_gso_mpz_d: return bool(self._core.mpz_d.update_gso_row(i, last_j)) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return bool(self._core.mpz_ld.update_gso_row(i, last_j)) if self._type == mat_gso_mpz_dpe: return bool(self._core.mpz_dpe.update_gso_row(i, last_j)) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return bool(self._core.mpz_dd.update_gso_row(i, last_j)) if self._type == mat_gso_mpz_qd: return bool(self._core.mpz_qd.update_gso_row(i, last_j)) if self._type == mat_gso_mpz_mpfr: return bool(self._core.mpz_mpfr.update_gso_row(i, last_j)) if self._type == mat_gso_long_d: return bool(self._core.long_d.update_gso_row(i, last_j)) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return bool(self._core.long_ld.update_gso_row(i, last_j)) if self._type == mat_gso_long_dpe: return bool(self._core.long_dpe.update_gso_row(i, last_j)) IF HAVE_QD: if self._type == mat_gso_long_dd: return bool(self._core.long_dd.update_gso_row(i, last_j)) if self._type == mat_gso_long_qd: return bool(self._core.long_qd.update_gso_row(i, last_j)) if self._type == mat_gso_long_mpfr: return bool(self._core.long_mpfr.update_gso_row(i, last_j)) raise RuntimeError("MatGSO object '%s' has no core."%self) def discover_all_rows(self): """ Allows ``row_addmul`` for all rows even if the GSO has never been computed. """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.discover_all_rows() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.discover_all_rows() if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.discover_all_rows() IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.discover_all_rows() if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.discover_all_rows() if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.discover_all_rows() if self._type == mat_gso_long_d: return self._core.long_d.discover_all_rows() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.discover_all_rows() if self._type == mat_gso_long_dpe: return self._core.long_dpe.discover_all_rows() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.discover_all_rows() if self._type == mat_gso_long_qd: return self._core.long_qd.discover_all_rows() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.discover_all_rows() raise RuntimeError("MatGSO object '%s' has no core."%self) def move_row(self, int old_r, int new_r): """ Row ``old_r`` becomes row ``new_r`` and intermediate rows are shifted. If ``new_r < old_r``, then ``old_r`` must be ``< n_known_rows``. :param int old_r: row index :param int new_r: row index """ preprocess_indices(old_r, new_r, self.d, self.d) if self._type == mat_gso_mpz_d: return self._core.mpz_d.move_row(old_r, new_r) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.move_row(old_r, new_r) if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.move_row(old_r, new_r) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.move_row(old_r, new_r) if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.move_row(old_r, new_r) if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.move_row(old_r, new_r) if self._type == mat_gso_long_d: return self._core.long_d.move_row(old_r, new_r) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.move_row(old_r, new_r) if self._type == mat_gso_long_dpe: return self._core.long_dpe.move_row(old_r, new_r) IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.move_row(old_r, new_r) if self._type == mat_gso_long_qd: return self._core.long_qd.move_row(old_r, new_r) if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.move_row(old_r, new_r) raise RuntimeError("MatGSO object '%s' has no core."%self) def swap_rows(self, int i, int j): """ Swap rows ``i`` and ``j``. :param int i: row index :param int j: row index """ preprocess_indices(i, j, self.d, self.d) if self._type == mat_gso_mpz_d: return self._core.mpz_d.row_swap(i, j) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.row_swap(i, j) if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.row_swap(i, j) IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.row_swap(i, j) if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.row_swap(i, j) if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.row_swap(i, j) if self._type == mat_gso_long_d: return self._core.long_d.row_swap(i, j) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.row_swap(i, j) if self._type == mat_gso_long_dpe: return self._core.long_dpe.row_swap(i, j) IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.row_swap(i, j) if self._type == mat_gso_long_qd: return self._core.long_qd.row_swap(i, j) if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.row_swap(i, j) raise RuntimeError("MatGSO object '%s' has no core."%self) def negate_row(self, int i): """Set `b_i` to `-b_i`. :param int i: index of the row to negate Example:: >>> from fpylll import * >>> FPLLL.set_random_seed(42) >>> A = IntegerMatrix(6, 6) >>> A.randomize("ntrulike", bits=6, q=31) >>> print(A) [ 1 0 0 12 25 25 ] [ 0 1 0 25 12 25 ] [ 0 0 1 25 25 12 ] [ 0 0 0 31 0 0 ] [ 0 0 0 0 31 0 ] [ 0 0 0 0 0 31 ] >>> M = GSO.Mat(A) >>> M.update_gso() True >>> with M.row_ops(2,2): ... M.negate_row(2) ... >>> print(A) [ 1 0 0 12 25 25 ] [ 0 1 0 25 12 25 ] [ 0 0 -1 -25 -25 -12 ] [ 0 0 0 31 0 0 ] [ 0 0 0 0 31 0 ] [ 0 0 0 0 0 31 ] """ self.row_addmul(i, i, -2.0) def row_addmul(self, int i, int j, x): """Set `b_i = b_i + x ⋅ b_j`. After one or several calls to ``row_addmul``, ``row_op_end`` must be called. If ``row_op_force_long=true``, ``x`` is always converted to (``2^expo * long``) instead of (``2^expo * ZT``), which is faster if ``ZT=mpz_t`` but might lead to a loss of precision in LLL, more Babai iterations are needed. :param int i: target row :param int j: source row :param x: multiplier """ preprocess_indices(i, j, self.d, self.d) cdef fp_nr_t x_ if self._type == mat_gso_mpz_d: x_.d = float(x) return self._core.mpz_d.row_addmul(i, j, x_.d) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: x_.ld = float(x) return self._core.mpz_ld.row_addmul(i, j, x_.ld) if self._type == mat_gso_mpz_dpe: x_.dpe = float(x) return self._core.mpz_dpe.row_addmul(i, j, x_.dpe) IF HAVE_QD: if self._type == mat_gso_mpz_dd: x_.dd = float(x) return self._core.mpz_dd.row_addmul(i, j, x_.dd) if self._type == mat_gso_mpz_qd: x_.qd = float(x) return self._core.mpz_qd.row_addmul(i, j, x_.qd) if self._type == mat_gso_mpz_mpfr: x_.mpfr = float(x) return self._core.mpz_mpfr.row_addmul(i, j, x_.mpfr) if self._type == mat_gso_long_d: x_.d = float(x) return self._core.long_d.row_addmul(i, j, x_.d) IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: x_.ld = float(x) return self._core.long_ld.row_addmul(i, j, x_.ld) if self._type == mat_gso_long_dpe: x_.dpe = float(x) return self._core.long_dpe.row_addmul(i, j, x_.dpe) IF HAVE_QD: if self._type == mat_gso_long_dd: x_.dd = float(x) return self._core.long_dd.row_addmul(i, j, x_.dd) if self._type == mat_gso_long_qd: x_.qd = float(x) return self._core.long_qd.row_addmul(i, j, x_.qd) if self._type == mat_gso_long_mpfr: x_.mpfr = float(x) return self._core.long_mpfr.row_addmul(i, j, x_.mpfr) raise RuntimeError("MatGSO object '%s' has no core."%self) def create_row(self): """Adds a zero row to ``B`` (and to ``U`` if ``enable_tranform=true``). One or several operations can be performed on this row with ``row_addmul``, then ``row_op_end`` must be called. Do not use if ``inverse_transform_enabled=true``. """ if self.inverse_transform_enabled: raise ValueError("create_row is incompatible with ``inverse_transform_enabled``") if self._type == mat_gso_mpz_d: return self._core.mpz_d.create_row() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.create_row() if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.create_row() IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.create_row() if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.create_row() if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.create_row() if self._type == mat_gso_long_d: return self._core.long_d.create_row() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.create_row() if self._type == mat_gso_long_dpe: return self._core.long_dpe.create_row() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.create_row() if self._type == mat_gso_long_qd: return self._core.long_qd.create_row() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.create_row() raise RuntimeError("MatGSO object '%s' has no core."%self) def remove_last_row(self): """Remove. the last row of ``B`` (and of ``U`` if ``enable_transform=true``). Do not use if ``inverse_transform_enabled=true``. """ if self.inverse_transform_enabled: raise ValueError("remove_last_row is incompatible with ``inverse_transform_enabled``") if self._type == mat_gso_mpz_d: return self._core.mpz_d.remove_last_row() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.remove_last_row() if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.remove_last_row() IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.remove_last_row() if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.remove_last_row() if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.remove_last_row() if self._type == mat_gso_long_d: return self._core.long_d.remove_last_row() IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.remove_last_row() if self._type == mat_gso_long_dpe: return self._core.long_dpe.remove_last_row() IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.remove_last_row() if self._type == mat_gso_long_qd: return self._core.long_qd.remove_last_row() if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.remove_last_row() raise RuntimeError("MatGSO object '%s' has no core."%self) def get_current_slope(self, int start_row, int stop_row): """ Finds the slope of the curve fitted to the lengths of the vectors from ``start_row`` to ``stop_row``. The slope gives an indication of the quality of the LLL-reduced basis. :param int start_row: start row index :param int stop_row: stop row index (exclusive) .. note:: we call ``get_current_slope`` which is declared in bkz.h """ preprocess_indices(start_row, stop_row, self.d, self.d+1) if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.get_current_slope(start_row, stop_row) sig_off() return r IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: sig_on() r = self._core.mpz_ld.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.get_current_slope(start_row, stop_row) sig_off() return r IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_mpz_mpfr: sig_on() r = self._core.mpz_mpfr.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_long_d: sig_on() r = self._core.long_d.get_current_slope(start_row, stop_row) sig_off() return r IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: sig_on() r = self._core.long_ld.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.get_current_slope(start_row, stop_row) sig_off() return r IF HAVE_QD: if self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.get_current_slope(start_row, stop_row) sig_off() return r if self._type == mat_gso_long_mpfr: sig_on() r = self._core.long_mpfr.get_current_slope(start_row, stop_row) sig_off() return r raise RuntimeError("MatGSO object '%s' has no core."%self) def get_root_det(self, int start_row, int stop_row): """ Return (squared) root determinant of the basis. :param int start_row: start row (inclusive) :param int stop_row: stop row (exclusive) """ preprocess_indices(start_row, stop_row, self.d, self.d+1) if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.get_root_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.get_root_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_mpz_mpfr: sig_on() r = self._core.mpz_mpfr.get_root_det(start_row, stop_row).get_d() sig_off() return r else: IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: sig_on() r = self._core.mpz_ld.get_root_det(start_row, stop_row).get_d() sig_off() return r IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.get_root_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.get_root_det(start_row, stop_row).get_d() sig_off() return r if self._type == mat_gso_long_d: sig_on() r = self._core.long_d.get_root_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.get_root_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_long_mpfr: sig_on() r = self._core.long_mpfr.get_root_det(start_row, stop_row).get_d() sig_off() return r else: IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: sig_on() r = self._core.long_ld.get_root_det(start_row, stop_row).get_d() sig_off() return r IF HAVE_QD: if self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.get_root_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.get_root_det(start_row, stop_row).get_d() sig_off() return r raise RuntimeError("MatGSO object '%s' has no core."%self) def get_log_det(self, int start_row, int stop_row): """ Return log of the (squared) determinant of the basis. :param int start_row: start row (inclusive) :param int stop_row: stop row (exclusive) """ preprocess_indices(start_row, stop_row, self.d, self.d+1) if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.get_log_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.get_log_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_mpz_mpfr: sig_on() r = self._core.mpz_mpfr.get_log_det(start_row, stop_row).get_d() sig_off() return r else: IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: sig_on() r = self._core.mpz_ld.get_log_det(start_row, stop_row).get_d() sig_off() return r IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.get_log_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.get_log_det(start_row, stop_row).get_d() sig_off() return r if self._type == mat_gso_long_d: sig_on() r = self._core.long_d.get_log_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.get_log_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_long_mpfr: sig_on() r = self._core.long_mpfr.get_log_det(start_row, stop_row).get_d() sig_off() return r else: IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: sig_on() r = self._core.long_ld.get_log_det(start_row, stop_row).get_d() sig_off() return r IF HAVE_QD: if self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.get_log_det(start_row, stop_row).get_d() sig_off() return r elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.get_log_det(start_row, stop_row).get_d() sig_off() return r raise RuntimeError("MatGSO object '%s' has no core."%self) def get_slide_potential(self, int start_row, int stop_row, int block_size): """ Return slide potential of the basis :param int start_row: start row (inclusive) :param int stop_row: stop row (exclusive) :param int block_size: block size """ preprocess_indices(start_row, stop_row, self.d, self.d+1) if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r elif self._type == mat_gso_mpz_mpfr: sig_on() r = self._core.mpz_mpfr.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r else: IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: sig_on() r = self._core.mpz_ld.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r if self._type == mat_gso_long_d: sig_on() r = self._core.long_d.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r elif self._type == mat_gso_long_mpfr: sig_on() r = self._core.long_mpfr.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r else: IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: sig_on() r = self._core.long_ld.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r IF HAVE_QD: if self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.get_slide_potential(start_row, stop_row, block_size).get_d() sig_off() return r raise RuntimeError("MatGSO object '%s' has no core."%self) def from_canonical(self, w, int start=0, int dimension=-1): """Given a vector `w` wrt the canonical basis `\ZZ^n` return a vector `v` wrt the Gram-Schmidt basis `B^*` :param v: a tuple-like object of dimension ``M.B.ncols`` :param start: only consider subbasis starting at ``start`` :param dimension: only consider ``dimension`` vectors or all if ``-1`` :returns: a tuple of dimension ``dimension``` or ``M.d``` when ``dimension`` is ``None`` This operation is the inverse of ``to_canonical``:: >>> import random >>> A = IntegerMatrix.random(5, "uniform", bits=6) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> v = tuple(IntegerMatrix.random(5, "uniform", bits=6)[0]); v (35, 24, 55, 40, 23) >>> w = M.from_canonical(v); w # doctest: +ELLIPSIS (0.98294..., 0.5636..., -3.4594479..., 0.9768..., 0.261316...) >>> v_ = tuple([int(round(wi)) for wi in M.to_canonical(w)]); v_ (35, 24, 55, 40, 23) >>> v == v_ True """ if self._alg != mat_gso_gso_t: raise TypeError("This function is only defined for GSO objects over a basis") cdef vector_fp_nr_t cv, cw cdef fp_nr_t tmp if self._type == mat_gso_mpz_d: vector_fp_nr_barf(cw, w, FT_DOUBLE) sig_on() (self._core.mpz_d).from_canonical(cv.d, cw.d, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_DOUBLE) elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cw, w, FT_LONG_DOUBLE) sig_on() (self._core.mpz_ld).from_canonical(cv.ld, cw.ld, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_LONG_DOUBLE) ELSE: raise RuntimeError("Float type long double not supported.") # elif self._type == mat_gso_mpz_dpe: # vector_fp_nr_barf(cw, w, FT_DPE) # sig_on() # (self._core.mpz_dpe).from_canonical(cv.dpe, cw.dpe, start, dimension) # sig_off() # return vector_fp_nr_slurp(cv, FT_DPE) elif self._type == mat_gso_mpz_mpfr: vector_fp_nr_barf(cw, w, FT_MPFR) sig_on() (self._core.mpz_mpfr).from_canonical(cv.mpfr, cw.mpfr, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_MPFR) elif self._type == mat_gso_long_d: vector_fp_nr_barf(cw, w, FT_DOUBLE) sig_on() (self._core.long_d).from_canonical(cv.d, cw.d, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_DOUBLE) elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cw, w, FT_LONG_DOUBLE) sig_on() (self._core.long_ld).from_canonical(cv.ld, cw.ld, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_LONG_DOUBLE) ELSE: raise RuntimeError("Float type long double not supported.") # elif self._type == mat_gso_long_dpe: # vector_fp_nr_barf(cw, w, FT_DPE) # sig_on() # (self._core.long_dpe).from_canonical(cv.dpe, cw.dpe, start, dimension) # sig_off() # return vector_fp_nr_slurp(cv, FT_DPE) # elif self._type == mat_gso_long_mpfr: # vector_fp_nr_barf(cw, w, FT_MPFR) # sig_on() # (self._core.long_mpfr).from_canonical(cv.mpfr, cw.mpfr, start, dimension) # sig_off() # return vector_fp_nr_slurp(cv, FT_MPFR) IF HAVE_QD: if self._type == mat_gso_mpz_dd: vector_fp_nr_barf(cw, w, FT_DD) sig_on() (self._core.mpz_dd).from_canonical(cv.dd, cw.dd, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_DD) elif self._type == mat_gso_mpz_qd: vector_fp_nr_barf(cw, w, FT_QD) sig_on() (self._core.mpz_qd).from_canonical(cv.qd, cw.qd, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_QD) elif self._type == mat_gso_long_dd: vector_fp_nr_barf(cw, w, FT_DD) sig_on() (self._core.long_dd).from_canonical(cv.dd, cw.dd, start, dimension) sig_off() return vector_fp_nr_slurp(cv, FT_DD) # elif self._type == mat_gso_long_qd: # vector_fp_nr_barf(cw, w, FT_QD) # sig_on() # (self._core.long_ld).from_canonical(cv.qd, cw.qd, start, dimension) # sig_off() # return vector_fp_nr_slurp(cw, FT_QD) raise NotImplementedError # def _from_canonical_old(self, w, int start=0, int dimension=-1): # cdef Py_ssize_t i, j, d # if self._alg != mat_gso_gso_t: # raise TypeError("This function is only defined for GSO objects over a basis") # if dimension == -1: # d = self.d - start # else: # d = dimension # cdef list ret = [0]*(start+d) # for i in range(start+d): # for j in range(self.B.ncols): # ret[i] += self.B[i, j] * w[j] # for j in range(i): # ret[i] -= self.get_mu(i, j) * ret[j] # # we drop the first ``start`` entries anyway, so no need to update # for i in range(d): # ret[start+i] /= self.get_r(start+i, start+i) # return tuple(ret)[start:] def to_canonical(self, v, int start=0): """ Given a vector `v` wrt the Gram-Schmidt basis `B^*` return a vector `w` wrt the canonical basis `ZZ^n`, i.e. solve `w = v⋅B^*`. :param v: a tuple-like object of dimension ``M.d`` :param start: only consider subbasis starting at ``start`` :returns: a tuple of dimension ``M.B.ncols`` """ if self._alg != mat_gso_gso_t: raise TypeError("This function is only defined for GSO objects over a basis") cdef vector_fp_nr_t cv, cw cdef fp_nr_t tmp if self._type == mat_gso_mpz_d: vector_fp_nr_barf(cv, v, FT_DOUBLE) sig_on() (self._core.mpz_d).to_canonical(cw.d, cv.d, start) sig_off() return vector_fp_nr_slurp(cw, FT_DOUBLE) elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cv, v, FT_LONG_DOUBLE) sig_on() (self._core.mpz_ld).to_canonical(cw.ld, cv.ld, start) sig_off() return vector_fp_nr_slurp(cw, FT_LONG_DOUBLE) ELSE: raise RuntimeError("Float type long double not supported.") # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_mpz_dpe: # vector_fp_nr_barf(cv, v, FT_DPE) # sig_on() # (self._core.mpz_dpe).to_canonical(cw.dpe, cv.dpe, start) # sig_off() # return vector_fp_nr_slurp(cw, FT_DPE) elif self._type == mat_gso_mpz_mpfr: vector_fp_nr_barf(cv, v, FT_MPFR) sig_on() (self._core.mpz_mpfr).to_canonical(cw.mpfr, cv.mpfr, start) sig_off() return vector_fp_nr_slurp(cw, FT_MPFR) elif self._type == mat_gso_long_d: vector_fp_nr_barf(cv, v, FT_DOUBLE) sig_on() (self._core.long_d).to_canonical(cw.d, cv.d, start) sig_off() return vector_fp_nr_slurp(cw, FT_DOUBLE) elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cv, v, FT_LONG_DOUBLE) sig_on() (self._core.long_ld).to_canonical(cw.ld, cv.ld, start) sig_off() return vector_fp_nr_slurp(cw, FT_LONG_DOUBLE) ELSE: raise RuntimeError("Float type long double not supported.") # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_long_dpe: # vector_fp_nr_barf(cv, v, FT_DPE) # sig_on() # (self._core.long_dpe).to_canonical(cw.dpe, cv.dpe, start) # sig_off() # return vector_fp_nr_slurp(cw, FT_DPE) # elif self._type == mat_gso_long_mpfr: # vector_fp_nr_barf(cv, v, FT_MPFR) # sig_on() # (self._core.long_mpfr).to_canonical(cw.mpfr, cv.mpfr, start) # sig_off() # return vector_fp_nr_slurp(cw, FT_MPFR) IF HAVE_QD: if self._type == mat_gso_mpz_dd: vector_fp_nr_barf(cv, v, FT_DD) sig_on() (self._core.mpz_dd).to_canonical(cw.dd, cv.dd, start) sig_off() return vector_fp_nr_slurp(cw, FT_DD) # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_mpz_qd: # vector_fp_nr_barf(cv, v, FT_QD) # sig_on() # (self._core.mpz_ld).to_canonical(cw.qd, cv.qd, start) # sig_off() # return vector_fp_nr_slurp(cw, FT_QD) elif self._type == mat_gso_long_dd: vector_fp_nr_barf(cv, v, FT_DD) sig_on() (self._core.long_dd).to_canonical(cw.dd, cv.dd, start) sig_off() return vector_fp_nr_slurp(cw, FT_DD) # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_long_qd: # vector_fp_nr_barf(cv, v, FT_QD) # sig_on() # (self._core.long_ld).to_canonical(cw.qd, cv.qd, start) # sig_off() # return vector_fp_nr_slurp(cw, FT_QD) raise NotImplementedError # def _to_canonical_old(self, v, int start=0): # # We have some `v` s.t. `w = v ⋅ B^*` and we want to recover `w`. We do not store `B^*`, but # # we store `(B, μ)` s.t. `μ ⋅ B^* = B`: # # - `w = v ⋅ B^*` # # - `w = v ⋅ μ^{-1} ⋅ B` # # - let `x` s.t. `x⋅μ = v` # # - `w = x ⋅ B` # # 1. triangular system solving # cdef list x = list(v) # cdef Py_ssize_t i, j # cdef Py_ssize_t d = min(len(x), self.d-start) # for i in range(d)[::-1]: # for j in range(i+1, d): # x[i] -= self.get_mu(start+j, start+i) * x[j] # # 2. multiply by `B` # w = [0]*self.B.ncols # for i in range(d): # for j in range(self.B.ncols): # w[j] += x[i] * self.B[start+i,j] # return tuple(w) def babai(self, v, int start=0, int dimension=-1, gso=False): """ Return vector `w` s.t. `‖w⋅B - v‖` is small using Babai's nearest plane algorithm. :param v: a tuple-like object :param start: only consider subbasis starting at ``start`` :param dimension: only consider ``dimension`` vectors or all if ``-1`` :param gso: if ``True`` vector is represented wrt to the Gram-Schmidt basis, otherwise canonical basis is assumed. :returns: a tuple of dimension ``M.B.nrows`` The output vector is a coefficient vector wrt to `B`:: >>> from fpylll.util import vector_norm >>> A = LLL.reduction(IntegerMatrix.random(50, "qary", k=25, q=7681)) >>> M = GSO.Mat(A, update=True) >>> v = (100,)*50 >>> w = M.babai(v) >>> vector_norm(A.multiply_left(w), v) # ‖w⋅A - v‖ 58851 >>> vector_norm(A.multiply_left(w)) # ‖w⋅A‖ 530251 We may consider the input vector as a coefficient vector wrt to `B^*`:: >>> v = M.from_canonical((100,)*50) >>> w = M.babai(v, gso=True) >>> vector_norm(A.multiply_left(w), (100,)*50) 58851 We compute a more interesting example and solve a simple Knapsack:: >>> from fpylll import * >>> _ = FPLLL.set_precision(500) >>> n = 10 >>> B = IntegerMatrix(n, n + 1) >>> B.randomize("intrel", bits=500) >>> v_opt = B.multiply_left([1,0,1,0,1,1,0,0,1,1]) >>> s = v_opt[0] # s = , where a is vector of knapsack values. >>> t = [s] + (n * [0]) >>> _ = LLL.reduction(B) >>> M = GSO.Mat(B, update=True, float_type="mpfr") >>> y = M.babai(t) >>> v = B.multiply_left(y) >>> t[0] == v[0] True >>> v_ = CVP.babai(B, t) >>> v_ == v True .. note :: A separate implementation is available at `CVP.babai()`. That implementation is more numerically stable than this one (by repeatedly calling the Nearest Plane algorithm at a given precision to improve the solution). On the other hand, this implementation supports floating point target vectors and `CVP.babai()` does not. """ if dimension == -1: dimension = self.d - start cdef vector_fp_nr_t cv cdef vector_z_nr_t cw if gso is False and self._alg != mat_gso_gso_t: raise TypeError("Can only convert to GSO representation with a basis.") if not gso: if self._type == mat_gso_mpz_d: vector_fp_nr_barf(cv, v, FT_DOUBLE) sig_on() (self._core.mpz_d).babai(cw.mpz, cv.d, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cv, v, FT_LONG_DOUBLE) sig_on() (self._core.mpz_ld).babai(cw.mpz, cv.ld, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) ELSE: raise RuntimeError("Float type long double not supported.") # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_mpz_dpe: # vector_fp_nr_barf(cv, v, FT_DPE) # sig_on() # (self._core.mpz_dpe).babai(cw.mpz, cv.dpe, start, dimension, gso) # sig_off() # return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_mpz_mpfr: vector_fp_nr_barf(cv, v, FT_MPFR) sig_on() (self._core.mpz_mpfr).babai(cw.mpz, cv.mpfr, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_long_d: vector_fp_nr_barf(cv, v, FT_DOUBLE) sig_on() (self._core.long_d).babai(cw.long, cv.d, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cv, v, FT_LONG_DOUBLE) sig_on() (self._core.long_ld).babai(cw.long, cv.ld, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) ELSE: raise RuntimeError("Float type long double not supported.") # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_long_dpe: # vector_fp_nr_barf(cv, v, FT_DPE) # sig_on() # (self._core.long_dpe).babai(cw.long, cv.dpe, start, dimension, gso) # sig_off() # return vector_z_nr_slurp(cw, ZT_LONG) # elif self._type == mat_gso_long_mpfr: # vector_fp_nr_barf(cv, v, FT_MPFR) # sig_on() # (self._core.long_mpfr).babai(cw.long, cv.mpfr, start, dimension, gso) # sig_off() # return vector_z_nr_slurp(cw, ZT_LONG) IF HAVE_QD: if self._type == mat_gso_mpz_dd: vector_fp_nr_barf(cv, v, FT_DD) sig_on() (self._core.mpz_dd).babai(cw.mpz, cv.dd, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_mpz_qd: # vector_fp_nr_barf(cv, v, FT_QD) # sig_on() # (self._core.mpz_qd).babai(cw.mpz, cv.qd, start, dimension, gso) # sig_off() # return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_long_dd: vector_fp_nr_barf(cv, v, FT_DD) sig_on() (self._core.long_dd).babai(cw.long, cv.dd, start, dimension, gso) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) # # https://github.com/fplll/fplll/issues/493 # elif self._type == mat_gso_long_qd: # vector_fp_nr_barf(cv, v, FT_QD) # sig_on() # (self._core.long_qd).babai(cw.long, cv.qd, start, dimension, gso) # sig_off() # return vector_z_nr_slurp(cw, ZT_LONG) raise NotImplementedError else: if self._type == mat_gso_mpz_d: vector_fp_nr_barf(cv, v, FT_DOUBLE) sig_on() self._core.mpz_d.babai(cw.mpz, cv.d, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cv, v, FT_LONG_DOUBLE) sig_on() self._core.mpz_ld.babai(cw.mpz, cv.ld, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) ELSE: raise RuntimeError("Float type long double not supported.") elif self._type == mat_gso_mpz_dpe: vector_fp_nr_barf(cv, v, FT_DPE) sig_on() self._core.mpz_dpe.babai(cw.mpz, cv.dpe, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_mpz_mpfr: vector_fp_nr_barf(cv, v, FT_MPFR) sig_on() self._core.mpz_mpfr.babai(cw.mpz, cv.mpfr, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_long_d: vector_fp_nr_barf(cv, v, FT_DOUBLE) sig_on() self._core.long_d.babai(cw.long, cv.d, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: vector_fp_nr_barf(cv, v, FT_LONG_DOUBLE) sig_on() self._core.long_ld.babai(cw.long, cv.ld, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) ELSE: raise RuntimeError("Float type long double not supported.") elif self._type == mat_gso_long_dpe: vector_fp_nr_barf(cv, v, FT_DPE) sig_on() self._core.long_dpe.babai(cw.long, cv.dpe, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) elif self._type == mat_gso_long_mpfr: vector_fp_nr_barf(cv, v, FT_MPFR) sig_on() self._core.long_mpfr.babai(cw.long, cv.mpfr, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) IF HAVE_QD: if self._type == mat_gso_mpz_dd: vector_fp_nr_barf(cv, v, FT_DD) sig_on() self._core.mpz_dd.babai(cw.mpz, cv.dd, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_mpz_qd: vector_fp_nr_barf(cv, v, FT_QD) sig_on() self._core.mpz_qd.babai(cw.mpz, cv.qd, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_MPZ) elif self._type == mat_gso_long_dd: vector_fp_nr_barf(cv, v, FT_DD) sig_on() self._core.long_dd.babai(cw.long, cv.dd, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) elif self._type == mat_gso_long_qd: vector_fp_nr_barf(cv, v, FT_QD) sig_on() self._core.long_qd.babai(cw.long, cv.qd, start, dimension) sig_off() return vector_z_nr_slurp(cw, ZT_LONG) raise NotImplementedError # def _babai_old(self, v, int start=0, int dimension=-1, gso=False): # if dimension == -1: # dimension = self.d - start # if not gso: # v = self.from_canonical(v, start, dimension) # cdef Py_ssize_t i, j # cdef list vv = list(v) # for i in range(dimension)[::-1]: # vv[i] = int(round(vv[i])) # for j in range(i): # vv[j] -= self.get_mu(start+i, start+j) * vv[i] # return tuple(vv) def r(self, start=0, end=-1): """ Return ``r`` vector from ``start`` to ``end`` """ if end == -1: end = self.d return tuple([self.get_r(i, i) for i in range(start, end)]) class GSO: DEFAULT=GSO_DEFAULT INT_GRAM=GSO_INT_GRAM ROW_EXPO=GSO_ROW_EXPO OP_FORCE_LONG=GSO_OP_FORCE_LONG Mat = MatGSO fpylll-0.6.1/src/fpylll/fplll/integer_matrix.pxd000066400000000000000000000006471455321202600217670ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll.gmp.types cimport mpz_t from .fplll cimport IntType from .decl cimport zz_mat_core_t cdef class IntegerMatrix: cdef IntType _type cdef zz_mat_core_t _core cdef long _nrows(self) cdef long _ncols(self) cdef object _get(self, int i, int j) cdef int _set(self, int i, int j, value) except -1 cdef class IntegerMatrixRow: cdef int row cdef IntegerMatrix m fpylll-0.6.1/src/fpylll/fplll/integer_matrix.pyx000066400000000000000000001563741455321202600220250ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Dense matrices over the Integers. .. moduleauthor:: Martin R. Albrecht """ include "fpylll/config.pxi" from cpython cimport PyIndex_Check from cysignals.signals cimport sig_on, sig_off from .fplll cimport Matrix, MatrixRow, Z_NR from fpylll.util cimport preprocess_indices, check_int_type from fpylll.io cimport assign_Z_NR_mpz, assign_mpz, mpz_get_python from .fplll cimport IntType, ZT_MPZ, ZT_LONG, ZZ_mat import re from math import log10, ceil, sqrt, floor from .decl cimport z_long, z_mpz from fpylll.gmp.pylong cimport mpz_get_pyintlong from fpylll.gmp.mpz cimport mpz_init, mpz_mod, mpz_fdiv_q_ui, mpz_clear, mpz_cmp, mpz_sub, mpz_set, mpz_set_si, mpz_get_si cdef class IntegerMatrixRow: """ A reference to a row in an integer matrix. """ def __init__(self, IntegerMatrix M, int row): """Create a row reference. :param IntegerMatrix M: Integer matrix :param int row: row index Row references are immutable:: >>> from fpylll import IntegerMatrix >>> A = IntegerMatrix(2, 3) >>> A[0,0] = 1; A[0,1] = 2; A[0,2] = 3 >>> r = A[0] >>> r[0] 1 >>> r[0] = 1 Traceback (most recent call last): ... TypeError: 'fpylll.fplll.integer_matrix.IntegerMatrixRow' object does not support item assignment """ preprocess_indices(row, row, M.nrows, M.nrows) self.row = row self.m = M def __getitem__(self, int column): """Return entry at ``column`` :param int column: integer offset """ preprocess_indices(column, column, self.m._ncols(), self.m._ncols()) if self.m._type == ZT_MPZ: return mpz_get_python(self.m._core.mpz[0][self.row][column].get_data()) elif self.m._type == ZT_LONG: return self.m._core.long[0][self.row][column].get_data() else: raise RuntimeError("Integer type '%s' not understood."%self._m._type) def __str__(self): """ String representation of this row. Example:: >>> from fpylll import IntegerMatrix >>> A = IntegerMatrix(2, 3) >>> A[0,0] = 1; A[0,1] = 2; A[0,2] = 3 >>> print(str(A[0])) (1, 2, 3) """ cdef int i r = [] for i in range(self.m._ncols()): t = self.m._get(self.row, i) r.append(str(t)) return "(" + ", ".join(r) + ")" def __repr__(self): """ Example:: >>> from fpylll import IntegerMatrix >>> A = IntegerMatrix(2, 3) >>> A[0,0] = 1; A[0,1] = 2; A[0,2] = 3 >>> print(repr(A[0])) # doctest: +ELLIPSIS row 0 of """ return "row %d of %r"%(self.row, self.m) def __reduce__(self): """ Make sure attempts at pickling raise an error until proper pickling is implemented. """ raise NotImplementedError def __abs__(self): """Return ℓ_2 norm of this vector. Example:: >>> A = IntegerMatrix.from_iterable(1, 3, [1,2,3]) >>> A[0].norm() # doctest: +ELLIPSIS 3.74165... >>> 1*1 + 2*2 + 3*3 14 >>> from math import sqrt >>> sqrt(14) # doctest: +ELLIPSIS 3.74165... """ cdef Z_NR[mpz_t] t_mpz cdef Z_NR[long] t_l # TODO: don't just use doubles if self.m._type == ZT_MPZ: self.m._core.mpz[0][self.row].dot_product(t_mpz, self.m._core.mpz[0][self.row]) return sqrt(t_mpz.get_d()) elif self.m._type == ZT_LONG: self.m._core.long[0][self.row].dot_product(t_l, self.m._core.long[0][self.row]) return sqrt(t_l.get_d()) else: raise RuntimeError("Integer type '%s' not understood."%self._m._type) norm = __abs__ def __len__(self): """ Return vector length. Example:: >>> A = IntegerMatrix.from_matrix([[1,2],[3,4]], 2, 2) >>> len(A[0]) 2 """ if self.m._type == ZT_MPZ: return self.m._core.mpz[0][self.row].size() elif self.m._type == ZT_LONG: return self.m._core.long[0][self.row].size() else: raise RuntimeError("Integer type '%s' not understood."%self.m._type) def is_zero(self, int frm=0): """ Return ``True`` if this vector consists of only zeros starting at index ``frm`` Example:: >>> A = IntegerMatrix.from_matrix([[1,0,0]]) >>> A[0].is_zero() False >>> A[0].is_zero(1) True """ if self.m._type == ZT_MPZ: return bool(self.m._core.mpz[0][self.row].is_zero(frm)) elif self.m._type == ZT_LONG: return bool(self.m._core.long[0][self.row].is_zero(frm)) else: raise RuntimeError("Integer type '%s' not understood."%self.m._type) def size_nz(self): """ Index at which an all zero vector starts. Example:: >>> A = IntegerMatrix.from_matrix([[0,2,3],[0,2,0],[0,0,0]]) >>> A[0].size_nz() 3 >>> A[1].size_nz() 2 >>> A[2].size_nz() 0 """ if self.m._type == ZT_MPZ: return self.m._core.mpz[0][self.row].size_nz() elif self.m._type == ZT_LONG: return self.m._core.long[0][self.row].size_nz() else: raise RuntimeError("Integer type '%s' not understood."%self.m._type) def __iadd__(self, IntegerMatrixRow v): """ In-place add row vector ``v`` :param IntegerMatrixRow v: a row vector Example:: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A[0] += A[1] >>> print(A[0]) (3, 6) >>> v = A[0] >>> v += A[1] >>> print(A[0]) (6, 10) """ if self.m._type == ZT_MPZ: self.m._core.mpz[0][self.row].add(v.m._core.mpz[0][v.row]) elif self.m._type == ZT_LONG: self.m._core.long[0][self.row].add(v.m._core.long[0][v.row]) else: raise RuntimeError("Integer type '%s' not understood."%self.m._type) return self def __isub__(self, IntegerMatrixRow v): """ In-place subtract row vector ``v`` :param IntegerMatrixRow v: a row vector Example:: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A[0] -= A[1] >>> print(A[0]) (-3, -2) >>> v = A[0] >>> v -= A[1] >>> print(A[0]) (-6, -6) """ if self.m._type == ZT_MPZ: self.m._core.mpz[0][self.row].sub(v.m._core.mpz[0][v.row]) elif self.m._type == ZT_LONG: self.m._core.long[0][self.row].sub(v.m._core.long[0][v.row]) else: raise RuntimeError("Integer type '%s' not understood."%self.m._type) return self def addmul(self, IntegerMatrixRow v, x=1, int expo=0): """In-place add row vector ``2^expo ⋅ x ⋅ v`` :param IntegerMatrixRow v: a row vector :param x: multiplier :param int expo: scaling exponent. Example:: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A[0].addmul(A[1]) >>> print(A[0]) (3, 6) >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A[0].addmul(A[1],x=0) >>> print(A[0]) (0, 2) >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A[0].addmul(A[1],x=1,expo=2) >>> print(A[0]) (12, 18) """ cdef Z_NR[mpz_t] x_mpz_ cdef Z_NR[mpz_t] tmp_mpz cdef Z_NR[long] x_l_ cdef Z_NR[long] tmp_l if self.m._type == ZT_MPZ: assign_Z_NR_mpz(x_mpz_, x) self.m._core.mpz[0][self.row].addmul_2exp(v.m._core.mpz[0][v.row], x_mpz_, expo, tmp_mpz) elif self.m._type == ZT_LONG: x_l_ = x self.m._core.long[0][self.row].addmul_2exp(v.m._core.long[0][v.row], x_l_, expo, tmp_l) else: raise RuntimeError("Integer type '%s' not understood."%self.m._type) return cdef class IntegerMatrix: """ Dense matrices over the Integers. """ def __init__(self, arg0, arg1=None, int_type="mpz"): """Construct a new integer matrix :param arg0: number of rows ≥ 0 or matrix :param arg1: number of columns ≥ 0 or ``None`` The default constructor takes the number of rows and columns:: >>> from fpylll import IntegerMatrix >>> IntegerMatrix(10, 10) # doctest: +ELLIPSIS >>> IntegerMatrix(10, 0) # doctest: +ELLIPSIS >>> IntegerMatrix(-1, 0) Traceback (most recent call last): ... ValueError: Number of rows must be >0 The default constructor is also a copy constructor:: >>> A = IntegerMatrix(2, 2) >>> A[0,0] = 1 >>> B = IntegerMatrix(A) >>> B[0,0] 1 >>> A[0,0] = 2 >>> B[0,0] 1 IntegerMatrix also supports numpy's integer types, if numpy is supported. See tests/test_numpy.py for example usage. """ self._type = check_int_type(int_type) if PyIndex_Check(arg0) and PyIndex_Check(arg1): if arg0 < 0: raise ValueError("Number of rows must be >0") if arg1 < 0: raise ValueError("Number of columns must be >0") if self._type == ZT_MPZ: self._core.mpz = new ZZ_mat[mpz_t](arg0, arg1) elif self._type == ZT_LONG: self._core.long = new ZZ_mat[long](arg0, arg1) else: raise ValueError("Integer type '%s' not understood."%int_type) return elif isinstance(arg0, IntegerMatrix) and arg1 is None: if self._type == ZT_MPZ: self._core.mpz = new ZZ_mat[mpz_t](arg0.nrows, arg0.ncols) elif self._type == ZT_LONG: self._core.long = new ZZ_mat[long](arg0.nrows, arg0.ncols) else: raise ValueError("Integer type '%s' not understood."%int_type) self.set_matrix(arg0) return else: raise TypeError("Parameters arg0 and arg1 not understood") @classmethod def from_matrix(cls, A, nrows=None, ncols=None, **kwds): """Construct a new integer matrix from matrix-like object A :param A: a matrix like object, with element access A[i,j] or A[i][j] :param nrows: number of rows (optional) :param ncols: number of columns (optional) >>> A = IntegerMatrix.from_matrix([[1,2,3],[4,5,6]]) >>> print(A) [ 1 2 3 ] [ 4 5 6 ] """ cdef int m, n if nrows is None: if hasattr(A, "nrows"): nrows = A.nrows elif hasattr(A, "__len__"): nrows = len(A) else: raise ValueError("Cannot determine number of rows.") if not PyIndex_Check(nrows): if callable(nrows): nrows = nrows() else: raise ValueError("Cannot determine number of rows.") if ncols is None: if hasattr(A, "ncols"): ncols = A.ncols elif hasattr(A[0], "__len__"): ncols = len(A[0]) else: raise ValueError("Cannot determine number of rows.") if not PyIndex_Check(ncols): if callable(ncols): ncols = ncols() else: raise ValueError("Cannot determine number of rows.") m = nrows n = ncols B = cls(m, n, **kwds) B.set_matrix(A) return B @classmethod def from_iterable(cls, nrows, ncols, it, **kwds): """Construct a new integer matrix from matrix-like object A :param nrows: number of rows :param ncols: number of columns :param it: an iterable of length at least ``nrows * ncols`` >>> A = IntegerMatrix.from_iterable(2,3, [1,2,3,4,5,6]) >>> print(A) [ 1 2 3 ] [ 4 5 6 ] """ A = cls(nrows, ncols, **kwds) A.set_iterable(it) return A @classmethod def identity(cls, nrows, int_type="mpz"): """Construct a new identity matrix of dimension ``nrows × nrows`` :param nrows: number of rows. >>> A = IntegerMatrix.identity(4) >>> print(A) [ 1 0 0 0 ] [ 0 1 0 0 ] [ 0 0 1 0 ] [ 0 0 0 1 ] """ A = IntegerMatrix(nrows, nrows, int_type=int_type) A.gen_identity(nrows) return A @classmethod def random(cls, d, algorithm, int_type="mpz", **kwds): """ Construct new random matrix. :param d: dominant size parameter, see below for details :param algorithm: type of matrix create, see below for details :param int_type: underlying integer type :returns: a random lattice basis Examples:: >>> from fpylll import FPLLL >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(10, "intrel", bits=30)) [ 285965362 1 0 0 0 0 0 0 0 0 0 ] [ 714553900 0 1 0 0 0 0 0 0 0 0 ] [ 1017994245 0 0 1 0 0 0 0 0 0 0 ] [ 256743299 0 0 0 1 0 0 0 0 0 0 ] [ 602398079 0 0 0 0 1 0 0 0 0 0 ] [ 159503182 0 0 0 0 0 1 0 0 0 0 ] [ 450941699 0 0 0 0 0 0 1 0 0 0 ] [ 125249023 0 0 0 0 0 0 0 1 0 0 ] [ 158876382 0 0 0 0 0 0 0 0 1 0 ] [ 514616289 0 0 0 0 0 0 0 0 0 1 ] :: >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(10, "simdioph", bits=10, bits2=30)) [ 1073741824 50 556 5 899 383 846 771 511 734 ] [ 0 1024 0 0 0 0 0 0 0 0 ] [ 0 0 1024 0 0 0 0 0 0 0 ] [ 0 0 0 1024 0 0 0 0 0 0 ] [ 0 0 0 0 1024 0 0 0 0 0 ] [ 0 0 0 0 0 1024 0 0 0 0 ] [ 0 0 0 0 0 0 1024 0 0 0 ] [ 0 0 0 0 0 0 0 1024 0 0 ] [ 0 0 0 0 0 0 0 0 1024 0 ] [ 0 0 0 0 0 0 0 0 0 1024 ] :: >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(10, "uniform", bits=10)) [ 50 556 5 899 383 846 771 511 734 993 ] [ 325 12 242 43 374 815 437 260 541 50 ] [ 492 174 215 999 186 189 292 497 832 966 ] [ 508 290 160 247 859 817 669 821 258 930 ] [ 510 933 588 895 18 546 393 868 858 790 ] [ 620 72 832 133 263 121 724 35 454 385 ] [ 431 347 749 311 911 937 50 160 322 180 ] [ 517 941 184 922 217 563 1008 960 37 85 ] [ 5 855 643 824 43 525 37 988 886 118 ] [ 27 944 560 993 662 589 20 694 696 205 ] :: >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(5, "ntrulike", q=127)) [ 1 0 0 0 0 25 50 44 5 3 ] [ 0 1 0 0 0 3 25 50 44 5 ] [ 0 0 1 0 0 5 3 25 50 44 ] [ 0 0 0 1 0 44 5 3 25 50 ] [ 0 0 0 0 1 50 44 5 3 25 ] [ 0 0 0 0 0 127 0 0 0 0 ] [ 0 0 0 0 0 0 127 0 0 0 ] [ 0 0 0 0 0 0 0 127 0 0 ] [ 0 0 0 0 0 0 0 0 127 0 ] [ 0 0 0 0 0 0 0 0 0 127 ] :: >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(5, "ntrulike2", q=127)) [ 127 0 0 0 0 0 0 0 0 0 ] [ 0 127 0 0 0 0 0 0 0 0 ] [ 0 0 127 0 0 0 0 0 0 0 ] [ 0 0 0 127 0 0 0 0 0 0 ] [ 0 0 0 0 127 0 0 0 0 0 ] [ 25 3 5 44 50 1 0 0 0 0 ] [ 50 25 3 5 44 0 1 0 0 0 ] [ 44 50 25 3 5 0 0 1 0 0 ] [ 5 44 50 25 3 0 0 0 1 0 ] [ 3 5 44 50 25 0 0 0 0 1 ] :: >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(10, "qary", k=8, q=127)) [ 1 0 50 44 5 3 78 3 94 97 ] [ 0 1 69 12 114 43 118 47 53 4 ] [ 0 0 127 0 0 0 0 0 0 0 ] [ 0 0 0 127 0 0 0 0 0 0 ] [ 0 0 0 0 127 0 0 0 0 0 ] [ 0 0 0 0 0 127 0 0 0 0 ] [ 0 0 0 0 0 0 127 0 0 0 ] [ 0 0 0 0 0 0 0 127 0 0 ] [ 0 0 0 0 0 0 0 0 127 0 ] [ 0 0 0 0 0 0 0 0 0 127 ] :: >>> FPLLL.set_random_seed(1337) >>> print(IntegerMatrix.random(10, "trg", alpha=0.99)) [ 228404 0 0 0 0 0 0 0 0 0 ] [ -80428 34992 0 0 0 0 0 0 0 0 ] [ -104323 -3287 24449 0 0 0 0 0 0 0 ] [ -54019 -5306 9234 42371 0 0 0 0 0 0 ] [ -17118 -13604 6537 -10587 4082 0 0 0 0 0 ] [ 108869 8134 4954 -17719 -1984 15326 0 0 0 0 ] [ -111858 -7328 5192 8105 -1109 1910 5818 0 0 0 ] [ -97654 -16219 -2181 14658 -1879 7195 -100 2347 0 0 ] [ -46340 13109 6265 12205 -1848 6113 1049 -170 1810 0 ] [ 10290 16293 4131 -4313 -525 2068 -262 248 715 592 ] Available Algorithms: - ``"intrel"`` - (``bits`` = `b`) generate a knapsack like matrix of dimension `d × (d+1)` and `b` bits: the i-th vector starts with a random integer of bit-length `≤ b` and the rest is the i-th canonical unit vector. - ``"simdioph"`` - (``bits`` = `b_1`, ``bits2`` = `b_2`) generate a `d × d` matrix of a form similar to that is involved when trying to find rational approximations to reals with the same small denominator. The first vector starts with a random integer of bit-length `≤ b_2` and continues with `d-1` independent integers of bit-lengths `≤ b_1`; the i-th vector for `i>1` is the i-th canonical unit vector scaled by a factor `2^{b_1}`. - ``"uniform"`` - (``bits`` = `b`) - generate a `d × d` matrix whose entries are independent integers of bit-lengths `≤ b`. - ``"ntrulike"`` - (``bits`` = `b` or ``q``) generate a `2d × 2d` NTRU-like matrix. If ``bits`` is given, then it first samples an integer `q` of bit-length `≤ b`, whereas if ``q``, then it sets `q` to the provided value. Then it samples a uniform `h` in the ring `Z_q[x]/(x^n-1)`. It finally returns the 2 x 2 block matrix `[[I, rot(h)], [0, qI]]`, where each block is `d x d`, the first row of `rot(h)` is the coefficient vector of `h`, and the i-th row of `rot(h)` is the shift of the (i-1)-th (with last entry put back in first position), for all i>1. - ``ntrulike2"`` - (``bits`` = `b` or ``q``) as the previous option, except that the constructed matrix is `[[qI, 0], [rot(h), I]]`. - ``"qary"`` - (``bits`` = `b` or ``q``, ``k``) generate a `d × d` q-ary matrix with determinant `q^k`. If ``bits`` is given, then it first samples an integer `q` of bit-length `≤ b`; if ``q`` is provided, then set `q` to the provided value. It returns a `2 x 2` block matrix `[[qI, 0], [H, I]]`, where `H` is `k x (d-k)` and uniformly random modulo q. These bases correspond to the SIS/LWE q-ary lattices. Goldstein-Mayer lattices correspond to `k=1` and `q` prime. - ``"trg"`` - (``alpha``) generate a `d × d` lower-triangular matrix `B` with `B_{ii} = 2^{(d-i+1)^\\alpha}` for all `i`, and `B_{ij}` is uniform between `-B_{jj}/2` and `B_{jj}/2` for all `j>> z = [[1,2,3,4], [5,6,7,8], [9,10,11,12], [13,14,15,16]] >>> A = IntegerMatrix(4, 4) >>> A.set_matrix(z) >>> print(A) [ 1 2 3 4 ] [ 5 6 7 8 ] [ 9 10 11 12 ] [ 13 14 15 16 ] >>> A = IntegerMatrix(3, 3) >>> A.set_matrix(z) >>> print(A) [ 1 2 3 ] [ 5 6 7 ] [ 9 10 11 ] .. warning:: entries starting from ``A[nrows, ncols]`` are ignored. """ cdef int i, j cdef int m = self._nrows() cdef int n = self._ncols() try: for i in range(m): for j in range(n): self._set(i, j, A[i, j]) except TypeError: for i in range(m): for j in range(n): self._set(i, j, A[i][j]) def set_iterable(self, A): """Set this matrix from iterable A :param A: an iterable object such as a list or tuple EXAMPLE:: >>> z = range(16) >>> A = IntegerMatrix(4, 4) >>> A.set_iterable(z) >>> print(A) [ 0 1 2 3 ] [ 4 5 6 7 ] [ 8 9 10 11 ] [ 12 13 14 15 ] >>> A = IntegerMatrix(3, 3) >>> A.set_iterable(z) >>> print(A) [ 0 1 2 ] [ 3 4 5 ] [ 6 7 8 ] .. warning:: entries starting at ``A[nrows * ncols]`` are ignored. """ cdef int i, j cdef int m = self._nrows() cdef int n = self._ncols() it = iter(A) for i in range(m): for j in range(n): self._set(i, j, next(it)) def to_matrix(self, A): """Write this matrix to matrix-like object A :param A: a matrix like object, with element access A[i,j] or A[i][j] :returns: A Example:: >>> from fpylll import FPLLL >>> z = [[0 for _ in range(10)] for _ in range(10)] >>> A = IntegerMatrix.random(10, "qary", q=127, k=5) >>> _ = A.to_matrix(z) >>> z[0] == list(A[0]) True """ cdef int i, j cdef int m = self._nrows() cdef int n = self._ncols() try: for i in range(m): for j in range(n): A[i, j] = self._get(i, j) except TypeError: for i in range(m): for j in range(n): A[i][j] = self._get(i, j) return A def __dealloc__(self): """ Delete integer matrix """ if self._type == ZT_MPZ: del self._core.mpz elif self._type == ZT_LONG: del self._core.long def __repr__(self): """Short representation. """ return "" % ( self._nrows(), self._ncols(), hex(id(self))) def __str__(self): """Full string representation of this matrix. """ cdef int i, j max_length = [] for j in range(self._ncols()): max_length.append(1) for i in range(self._nrows()): value = self._get(i, j) if not value: continue length = ceil(log10(abs(value))) length += int(ceil(log10(abs(value))) == floor(log10(abs(value)))) # sign length += int(value < 0) if length > max_length[j]: max_length[j] = int(length) r = [] for i in range(self._nrows()): r.append(["["]) for j in range(self._ncols()): r[-1].append(("%%%dd"%max_length[j])%self._get(i,j)) r[-1].append("]") r[-1] = " ".join(r[-1]) r = "\n".join(r) return r @property def int_type(self): """ """ if self._type == ZT_LONG: return "long" if self._type == ZT_MPZ: return "mpz" raise RuntimeError("Integer type '%s' not understood."%self._type) def __copy__(self): """Copy this matrix. """ cdef IntegerMatrix A = IntegerMatrix(self._nrows(), self._ncols(), int_type=self.int_type) cdef int i, j for i in range(self._nrows()): for j in range(self._ncols()): A._set(i, j, self._get(i,j)) return A def __reduce__(self): """Serialize this matrix >>> import pickle >>> A = IntegerMatrix.random(10, "uniform", bits=20) >>> pickle.loads(pickle.dumps(A)) == A True """ cdef int i, j l = [] if self._type == ZT_MPZ: for i in range(self._nrows()): for j in range(self._ncols()): # mpz_get_pyintlong ensure pickles work between Sage & not-Sage l.append(int(mpz_get_pyintlong(self._core.mpz[0][i][j].get_data()))) elif self._type == ZT_LONG: for i in range(self._nrows()): for j in range(self._ncols()): # mpz_get_pyintlong ensure pickles work between Sage & not-Sage l.append(int(self._core.long[0][i][j].get_data())) else: raise RuntimeError("Integer type '%s' not understood."%self._type) return unpickle_IntegerMatrix, (self._nrows(), self._ncols(), l, self.int_type) cdef long _nrows(self): if self._type == ZT_MPZ: return self._core.mpz[0].get_rows() elif self._type == ZT_LONG: return self._core.long[0].get_rows() else: raise RuntimeError("Integer type '%s' not understood."%self._type) cdef long _ncols(self): if self._type == ZT_MPZ: return self._core.mpz[0].get_cols() elif self._type == ZT_LONG: return self._core.long[0].get_cols() else: raise RuntimeError("Integer type '%s' not understood."%self._type) @property def nrows(self): """Number of Rows :returns: number of rows >>> from fpylll import IntegerMatrix >>> IntegerMatrix(10, 10).nrows 10 """ return self._nrows() @property def ncols(self): """Number of Columns :returns: number of columns >>> from fpylll import IntegerMatrix >>> IntegerMatrix(10, 10).ncols 10 """ return self._ncols() cdef object _get(self, int i, int j): if self._type == ZT_MPZ: return mpz_get_python(self._core.mpz[0][i][j].get_data()) elif self._type == ZT_LONG: return self._core.long[0][i][j].get_data() else: raise RuntimeError("Integer type '%s' not understood."%self._type) def __getitem__(self, key): """Select a row or entry. :param key: an integer for the row, a tuple for row and column or a slice. :returns: a reference to a row or an integer depending on format of ``key`` >>> from fpylll import IntegerMatrix >>> A = IntegerMatrix(10, 10) >>> A.gen_identity(10) >>> A[1,0] 0 >>> print(A[1]) (0, 1, 0, 0, 0, 0, 0, 0, 0, 0) >>> print(A[0:2]) [ 1 0 0 0 0 0 0 0 0 0 ] [ 0 1 0 0 0 0 0 0 0 0 ] """ cdef int i = 0 cdef int j = 0 if isinstance(key, tuple): i, j = key preprocess_indices(i, j, self._nrows(), self._ncols()) return self._get(i, j) elif isinstance(key, slice): key = range(*key.indices(self._nrows())) return self.submatrix(key, range(self._ncols())) elif PyIndex_Check(key): i = key preprocess_indices(i, i, self._nrows(), self._nrows()) return IntegerMatrixRow(self, i) else: raise ValueError("Parameter '%s' not understood."%key) cdef int _set(self, int i, int j, value) except -1: cdef long tmp if self._type == ZT_MPZ: assign_Z_NR_mpz(self._core.mpz[0][i][j], value) elif self._type == ZT_LONG: tmp = value self._core.long[0][i][j] = tmp else: raise RuntimeError("Integer type '%s' not understood."%self._type) def __setitem__(self, key, value): """ Assign value to index. :param key: a tuple of row and column indices :param value: an integer Example:: >>> from fpylll import IntegerMatrix >>> A = IntegerMatrix(10, 10) >>> A.gen_identity(10) >>> A[1,0] = 2 >>> A[1,0] 2 Arbitrary precision integers are supported:: >>> A[0, 0] = 2**2048 The notation ``A[i][j]`` is not supported. This is because ``A[i]`` returns an object of type ``IntegerMatrixRow`` object which is immutable by design. This is to avoid the user confusing such an object with a proper vector.:: >>> A[1][0] = 2 Traceback (most recent call last): ... TypeError: 'fpylll.fplll.integer_matrix.IntegerMatrixRow' object does not support item assignment """ cdef int i = 0 cdef int j = 0 if isinstance(key, tuple): i, j = key preprocess_indices(i, j, self._nrows(), self._ncols()) self._set(i, j, value) elif isinstance(key, int): i = key preprocess_indices(i, i, self._nrows(), self._nrows()) if isinstance(value, IntegerMatrixRow) \ and (value).row == i \ and (value).m == self: pass else: raise NotImplementedError else: raise ValueError("Parameter '%s' not understood."%key) def randomize(self, algorithm, **kwds): """Randomize this matrix using ``algorithm``. :param algorithm: see :func:`~IntegerMatrix.random` :seealso: :func:`~IntegerMatrix.random` """ if algorithm == "intrel": bits = int(kwds["bits"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_intrel(bits) elif self._type == ZT_LONG: self._core.long.gen_intrel(bits) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() elif algorithm == "simdioph": bits = int(kwds["bits"]) bits2 = int(kwds["bits2"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_simdioph(bits, bits2) elif self._type == ZT_LONG: self._core.long.gen_simdioph(bits, bits2) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() elif algorithm == "uniform": bits = int(kwds["bits"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_uniform(bits) elif self._type == ZT_LONG: self._core.long.gen_uniform(bits) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() elif algorithm == "ntrulike": if "q" in kwds: q = int(kwds["q"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_ntrulike_withq(q) elif self._type == ZT_LONG: self._core.long.gen_ntrulike_withq(q) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() elif "bits" in kwds: bits = int(kwds["bits"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_ntrulike(bits) elif self._type == ZT_LONG: self._core.long.gen_ntrulike(bits) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() else: raise ValueError("Either 'q' or 'bits' is required.") elif algorithm == "ntrulike2": if "q" in kwds: q = int(kwds["q"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_ntrulike2_withq(q) elif self._type == ZT_LONG: self._core.long.gen_ntrulike2_withq(q) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() elif "bits" in kwds: bits = int(kwds["bits"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_ntrulike2(bits) elif self._type == ZT_LONG: self._core.long.gen_ntrulike2(bits) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() else: raise ValueError("Either 'q' or 'bits' is required.") elif algorithm == "qary": k = int(kwds["k"]) if "q" in kwds: q = int(kwds["q"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_qary_withq(k, q) elif self._type == ZT_LONG: self._core.long.gen_qary_withq(k, q) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() elif "bits" in kwds: bits = int(kwds["bits"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_qary_prime(k, bits) elif self._type == ZT_LONG: self._core.long.gen_qary_prime(k, bits) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() else: raise ValueError("Either 'q' or 'bits' is required.") elif algorithm == "trg": alpha = float(kwds["alpha"]) sig_on() if self._type == ZT_MPZ: self._core.mpz.gen_trg(alpha) elif self._type == ZT_LONG: self._core.long.gen_trg(alpha) else: raise RuntimeError("Integer type '%s' not understood."%self._type) sig_off() else: raise ValueError("Algorithm '%s' unknown."%algorithm) def gen_identity(self, int nrows=-1): """Generate identity matrix. :param nrows: number of rows """ if nrows == -1: nrows = self.nrows if self._type == ZT_MPZ: self._core.mpz.gen_identity(nrows) elif self._type == ZT_LONG: self._core.long.gen_identity(nrows) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def clear(self): """ """ if self._type == ZT_MPZ: return (self._core.mpz).clear() elif self._type == ZT_LONG: return (self._core.long).clear() else: raise RuntimeError("Integer type '%s' not understood."%self._type) def is_empty(self): """ """ if self._type == ZT_MPZ: return bool((self._core.mpz).empty()) elif self._type == ZT_LONG: return bool((self._core.long).empty()) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def resize(self, int rows, int cols): """ :param int rows: :param int cols: """ if self._type == ZT_MPZ: return (self._core.mpz).resize(rows, cols) elif self._type == ZT_LONG: return (self._core.long).resize(rows, cols) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def set_rows(self, int rows): """ :param int rows: """ if self._type == ZT_MPZ: (self._core.mpz).set_rows(rows) elif self._type == ZT_LONG: (self._core.long).set_rows(rows) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def set_cols(self, int cols): """ :param int cols: """ if self._type == ZT_MPZ: (self._core.mpz).set_cols(cols) elif self._type == ZT_LONG: (self._core.long).set_cols(cols) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def swap_rows(self, int r1, int r2): """ :param int r1: :param int r2: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A.swap_rows(0, 1) >>> print(A) [ 3 4 ] [ 0 2 ] """ if self._type == ZT_MPZ: return (self._core.mpz).swap_rows(r1, r2) elif self._type == ZT_LONG: return (self._core.long).swap_rows(r1, r2) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def rotate_left(self, int first, int last): """Row permutation. ``(M[first],…,M[last])`` becomes ``(M[first+1],…,M[last],M[first])`` :param int first: :param int last: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) """ if self._type == ZT_MPZ: return (self._core.mpz).rotate_left(first, last) elif self._type == ZT_LONG: return (self._core.long).rotate_left(first, last) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def rotate_right(self, int first, int last): """Row permutation. ``(M[first],…,M[last])`` becomes ``(M[last],M[first],…,M[last-1])`` :param int first: :param int last: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) """ if self._type == ZT_MPZ: return (self._core.mpz).rotate_right(first, last) elif self._type == ZT_LONG: return (self._core.long).rotate_right(first, last) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def rotate(self, int first, int middle, int last): """ Rotates the order of the elements in the range [first,last), in such a way that the element pointed by middle becomes the new first element. ``(M[first],…,M[middle-1],M[middle],M[last])`` becomes ``(M[middle],…,M[last],M[first],…,M[middle-1])`` :param int first: first index :param int middle: new first index :param int last: last index (inclusive) >>> A = IntegerMatrix.from_matrix([[0,1,2],[3,4,5],[6,7,8]]) >>> A.rotate(0,0,2) >>> print(A) [ 0 1 2 ] [ 3 4 5 ] [ 6 7 8 ] >>> A = IntegerMatrix.from_matrix([[0,1,2],[3,4,5],[6,7,8]]) >>> A.rotate(0,2,2) >>> print(A) [ 6 7 8 ] [ 0 1 2 ] [ 3 4 5 ] """ if self._type == ZT_MPZ: return (self._core.mpz).rotate(first, middle, last) elif self._type == ZT_LONG: return (self._core.long).rotate(first, middle, last) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def rotate_gram_left(self, int first, int last, int n_valid_rows): """ Transformation needed to update the lower triangular Gram matrix when ``rotateLeft(first, last)`` is done on the basis of the lattice. :param int first: :param int last: :param int n_valid_rows: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) """ if self._type == ZT_MPZ: return (self._core.mpz).rotate_gram_left(first, last, n_valid_rows) elif self._type == ZT_LONG: return (self._core.long).rotate_gram_left(first, last, n_valid_rows) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def rotate_gram_right(self, int first, int last, int n_valid_rows): """ Transformation needed to update the lower triangular Gram matrix when ``rotateRight(first, last)`` is done on the basis of the lattice. :param int first: :param int last: :param int n_valid_rows: >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) """ if self._type == ZT_MPZ: return (self._core.mpz).rotate_gram_right(first, last, n_valid_rows) elif self._type == ZT_LONG: return (self._core.long).rotate_gram_right(first, last, n_valid_rows) else: raise RuntimeError("Integer type '%s' not understood."%self._type) def transpose(self): """ Inline transpose. >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> _ = A.transpose() >>> print(A) [ 0 3 ] [ 2 4 ] """ if self._type == ZT_MPZ: (self._core.mpz).transpose() elif self._type == ZT_LONG: (self._core.long).transpose() else: raise RuntimeError("Integer type '%s' not understood."%self._type) return self def get_max_exp(self): """ >>> A = IntegerMatrix.from_matrix([[0,2],[3,4]]) >>> A.get_max_exp() 3 >>> A = IntegerMatrix.from_matrix([[0,2],[3,9]]) >>> A.get_max_exp() 4 """ if self._type == ZT_MPZ: return (self._core.mpz).get_max_exp() elif self._type == ZT_LONG: return (self._core.long).get_max_exp() else: raise RuntimeError("Integer type '%s' not understood."%self._type) # Extensions def __mul__(IntegerMatrix A, IntegerMatrix B): """Naive matrix × matrix products. :param IntegerMatrix A: m × n integer matrix A :param IntegerMatrix B: n × k integer matrix B :returns: m × k integer matrix C = A × B >>> from fpylll import FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix(2, 2) >>> A.randomize("uniform", bits=2) >>> print(A) [ 2 0 ] [ 1 3 ] >>> B = IntegerMatrix(2, 2) >>> B.randomize("uniform", bits=2) >>> print(B) [ 3 2 ] [ 3 3 ] >>> print(A*B) [ 6 4 ] [ 12 11 ] >>> print(B*A) [ 8 6 ] [ 9 9 ] """ if A.ncols != B.nrows: raise ValueError("Number of columns of A (%d) does not match number of rows of B (%d)"%(A.ncols, B.nrows)) cdef IntegerMatrix res = IntegerMatrix(A.nrows, B.ncols) cdef int i, j for i in range(A.nrows): for j in range(B.ncols): tmp = res._get(i, j) for k in range(A.ncols): tmp += A._get(i,k) * B._get(k, j) res._set(i, j, tmp) return res def __mod__(IntegerMatrix self, q): """Return A mod q. :param q: a modulus > 0 """ A = self.__copy__() A.mod(q) return A def mod(IntegerMatrix self, q, int start_row=0, int start_col=0, int stop_row=-1, int stop_col=-1): """Apply moduluar reduction modulo `q` to this matrix. :param q: modulus :param int start_row: starting row :param int start_col: starting column :param int stop_row: last row (excluding) :param int stop_col: last column (excluding) >>> A = IntegerMatrix(2, 2) >>> A[0,0] = 1001 >>> A[1,0] = 13 >>> A[0,1] = 102 >>> print(A) [ 1001 102 ] [ 13 0 ] >>> A.mod(10, start_row=1, start_col=0) >>> print(A) [ 1001 102 ] [ 3 0 ] >>> A.mod(10) >>> print(A) [ 1 2 ] [ 3 0 ] >>> A = IntegerMatrix(2, 2) >>> A[0,0] = 1001 >>> A[1,0] = 13 >>> A[0,1] = 102 >>> A.mod(10, stop_row=1) >>> print(A) [ 1 2 ] [ 13 0 ] """ preprocess_indices(start_row, start_col, self._nrows(), self._ncols()) preprocess_indices(stop_row, stop_col, self._nrows()+1, self._ncols()+1) cdef mpz_t q_ mpz_init(q_) try: assign_mpz(q_, q) except NotImplementedError, msg: mpz_clear(q_) raise NotImplementedError(msg) cdef mpz_t t1 mpz_init(t1) cdef mpz_t t2 mpz_init(t2) cdef mpz_t q2_ mpz_init(q2_) mpz_fdiv_q_ui(q2_, q_, 2) cdef int i, j for i in range(self._nrows()): for j in range(self._ncols()): if self._type == ZT_MPZ: mpz_set(t1, self._core.mpz[0][i][j].get_data()) elif self._type == ZT_LONG: mpz_set_si(t1, self._core.long[0][i][j].get_data()) else: raise RuntimeError("Integer type '%s' not understood."%self._type) if start_row <= i < stop_row and start_col <= i < stop_col: mpz_mod(t2, t1, q_) if mpz_cmp(t2, q2_) > 0: mpz_sub(t2, t2, q_) if self._type == ZT_MPZ: self._core.mpz[0][i][j].set(t2) elif self._type == ZT_LONG: self._core.long[0][i][j] = mpz_get_si(t2) else: raise RuntimeError("Integer type '%s' not understood."%self._type) mpz_clear(q_) mpz_clear(q2_) mpz_clear(t1) mpz_clear(t2) def __richcmp__(IntegerMatrix self, IntegerMatrix other, int op): """Compare two matrices """ cdef int i, j cdef a, b if op == 2 or op == 3: eq = True if self._nrows() != other.nrows: eq = False elif self._ncols() != other.ncols: eq = False for i in range(self._nrows()): if eq is False: break for j in range(self._ncols()): a = self._get(i, j) b = other._get(i, j) if a != b: eq = False break else: raise NotImplementedError("Only != and == are implemented for integer matrices.") if op == 2: return eq elif op == 3: return not eq def apply_transform(self, IntegerMatrix U, int start_row=0): """Apply transformation matrix ``U`` to this matrix starting at row ``start_row``. :param IntegerMatrix U: transformation matrix :param int start_row: start transformation in this row """ cdef int i, j S = self.submatrix(start_row, 0, start_row + U.nrows, self._ncols()) cdef IntegerMatrix B = U*S for i in range(B.nrows): for j in range(B.ncols): tmp = B._get(i, j) self._set(start_row+i, j, tmp) def submatrix(self, a, b, c=None, d=None): """Construct a new submatrix. :param a: either the index of the first row or an iterable of row indices :param b: either the index of the first column or an iterable of column indices :param c: the index of first excluded row (or ``None``) :param d: the index of first excluded column (or ``None``) :returns: :rtype: We illustrate the calling conventions of this function using a 10 x 10 matrix:: >>> from fpylll import IntegerMatrix, FPLLL >>> A = IntegerMatrix(10, 10) >>> FPLLL.set_random_seed(1337) >>> A.randomize("ntrulike", bits=22, q=4194319) >>> print(A) [ 1 0 0 0 0 3021421 752690 1522220 2972677 119630 ] [ 0 1 0 0 0 119630 3021421 752690 1522220 2972677 ] [ 0 0 1 0 0 2972677 119630 3021421 752690 1522220 ] [ 0 0 0 1 0 1522220 2972677 119630 3021421 752690 ] [ 0 0 0 0 1 752690 1522220 2972677 119630 3021421 ] [ 0 0 0 0 0 4194319 0 0 0 0 ] [ 0 0 0 0 0 0 4194319 0 0 0 ] [ 0 0 0 0 0 0 0 4194319 0 0 ] [ 0 0 0 0 0 0 0 0 4194319 0 ] [ 0 0 0 0 0 0 0 0 0 4194319 ] We can either specify start/stop rows and columns:: >>> print(A.submatrix(0,0,2,8)) [ 1 0 0 0 0 3021421 752690 1522220 ] [ 0 1 0 0 0 119630 3021421 752690 ] Or we can give lists of rows, columns explicitly:: >>> print(A.submatrix([0,1,2],range(3,9))) [ 0 0 3021421 752690 1522220 2972677 ] [ 0 0 119630 3021421 752690 1522220 ] [ 0 0 2972677 119630 3021421 752690 ] """ cdef int m = 0 cdef int n = 0 cdef int i, j, row, col if c is None and d is None: try: iter(a) rows = a iter(b) cols = b except TypeError: raise ValueError("Inputs to submatrix not understood.") it = iter(rows) try: while True: next(it) m += 1 except StopIteration: pass it = iter(cols) try: while True: next(it) n += 1 except StopIteration: pass A = IntegerMatrix(m, n) i = 0 for row in iter(rows): j = 0 for col in iter(cols): preprocess_indices(row, col, self._nrows(), self._ncols()) A._set(i, j, self._get(row, col)) j += 1 i += 1 return A else: if c < 0: c %= self._nrows() if d < 0: d %= self._ncols() preprocess_indices(a, b, self._nrows(), self._ncols()) preprocess_indices(c, d, self._nrows()+1, self._ncols()+1) if c < a: raise ValueError("Last row (%d) < first row (%d)"%(c, a)) if d < b: raise ValueError("Last column (%d) < first column (%d)"%(d, b)) i = 0 m = c - a n = d - b A = IntegerMatrix(m, n) for row in range(a, c): j = 0 for col in range(b, d): A._set(i, j, self._get(row, col)) j += 1 i += 1 return A def multiply_left(self, v, start=0): """Return ``v*A'`` where ``A'`` is ``A`` reduced to ``len(v)`` rows starting at ``start``. :param v: a tuple-like object :param start: start in row ``start`` """ r = [0]*self._ncols() for i in range(len(v)): for j in range(self._ncols()): r[j] += v[i]*self._get(start+i, j) return tuple(r) @classmethod def from_file(cls, filename, **kwds): """Construct new matrix from file. >>> import tempfile >>> A = IntegerMatrix.random(10, "qary", k=5, bits=20) >>> fn = tempfile.mktemp() >>> fh = open(fn, "w") >>> _ = fh.write(str(A)) >>> fh.close() >>> B = IntegerMatrix.from_file(fn) >>> A == B True :param filename: name of file to read from """ A = cls(0, 0, **kwds) with open(filename, 'r') as fh: for i, line in enumerate(fh.readlines()): line = re.match("\[+([^\]]+) *\]", line) if line is None: continue line = line.groups()[0] line = line.strip() line = [e for e in line.split(" ") if e != ''] ncols = len(line) values = map(int, line) if (A)._type == ZT_MPZ: (A)._core.mpz.set_rows(i+1) (A)._core.mpz.set_cols(ncols) elif (A)._type == ZT_LONG: (A)._core.long.set_rows(i+1) (A)._core.long.set_cols(ncols) else: raise RuntimeError("Integer type '%s' not understood."%(A)._type) for j, v in enumerate(values): (A)._set(i, j, v) return A def unpickle_IntegerMatrix(nrows, ncols, l, int_type="mpz"): """Deserialize an integer matrix. :param nrows: number of rows :param ncols: number of columns :param l: list of entries """ return IntegerMatrix.from_iterable(nrows, ncols, l, int_type=int_type) fpylll-0.6.1/src/fpylll/fplll/lll.pxd000066400000000000000000000004451455321202600175250ustar00rootroot00000000000000# -*- coding: utf-8 -*- from .gso cimport MatGSO from .decl cimport lll_reduction_core_t, fplll_mat_gso_data_type_t cdef class LLLReduction: cdef fplll_mat_gso_data_type_t _type cdef lll_reduction_core_t _core cdef readonly MatGSO M cdef double _delta cdef double _eta fpylll-0.6.1/src/fpylll/fplll/lll.pyx000066400000000000000000000671301455321202600175560ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" from cysignals.signals cimport sig_on, sig_off from fpylll.gmp.mpz cimport mpz_t from fpylll.mpfr.mpfr cimport mpfr_t from .integer_matrix cimport IntegerMatrix from .fplll cimport LLL_VERBOSE from .fplll cimport LLL_EARLY_RED from .fplll cimport LLL_SIEGEL from .fplll cimport LLL_DEFAULT from .fplll cimport LLLMethod, LLL_DEF_ETA, LLL_DEF_DELTA from .fplll cimport LM_WRAPPER, LM_PROVED, LM_HEURISTIC, LM_FAST from .fplll cimport FT_DEFAULT, FT_DOUBLE, FT_LONG_DOUBLE, FT_DD, FT_QD from .fplll cimport ZT_MPZ from .fplll cimport dpe_t from .fplll cimport Z_NR, FP_NR from .fplll cimport lll_reduction as lll_reduction_c from .fplll cimport RED_SUCCESS from .fplll cimport MatGSOInterface as MatGSOInterface_c from .fplll cimport LLLReduction as LLLReduction_c from .fplll cimport get_red_status_str from .fplll cimport is_lll_reduced from .fplll cimport FloatType from fpylll.util cimport check_float_type, check_delta, check_eta, check_precision from fpylll.util import ReductionError from .decl cimport d_t from .decl cimport mat_gso_mpz_d, mat_gso_mpz_ld, mat_gso_mpz_dpe, mat_gso_mpz_mpfr from .decl cimport mat_gso_long_d, mat_gso_long_ld, mat_gso_long_dpe, mat_gso_long_mpfr IF HAVE_LONG_DOUBLE: from .decl cimport ld_t IF HAVE_QD: from .decl cimport mat_gso_mpz_dd, mat_gso_mpz_qd, mat_gso_long_dd, mat_gso_long_qd, dd_t, qd_t from .wrapper import Wrapper cdef class LLLReduction: def __init__(self, MatGSO M, double delta=LLL_DEF_DELTA, double eta=LLL_DEF_ETA, int flags=LLL_DEFAULT): """Construct new LLL object. :param MatGSO M: :param double delta: :param double eta: :param int flags: - ``DEFAULT``: - ``VERBOSE``: - ``EARLY_RED``: - ``SIEGEL``: """ check_delta(delta) check_eta(eta) cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[d_t]] *m_mpz_double IF HAVE_LONG_DOUBLE: cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[ld_t]] *m_mpz_ld cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[dpe_t]] *m_mpz_dpe IF HAVE_QD: cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[dd_t]] *m_mpz_dd cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[qd_t]] *m_mpz_qd cdef MatGSOInterface_c[Z_NR[mpz_t], FP_NR[mpfr_t]] *m_mpz_mpfr cdef MatGSOInterface_c[Z_NR[long], FP_NR[d_t]] *m_long_double IF HAVE_LONG_DOUBLE: cdef MatGSOInterface_c[Z_NR[long], FP_NR[ld_t]] *m_long_ld cdef MatGSOInterface_c[Z_NR[long], FP_NR[dpe_t]] *m_long_dpe IF HAVE_QD: cdef MatGSOInterface_c[Z_NR[long], FP_NR[dd_t]] *m_long_dd cdef MatGSOInterface_c[Z_NR[long], FP_NR[qd_t]] *m_long_qd cdef MatGSOInterface_c[Z_NR[long], FP_NR[mpfr_t]] *m_long_mpfr self.M = M if M._type == mat_gso_mpz_d: m_mpz_double = M._core.mpz_d self._type = mat_gso_mpz_d self._core.mpz_d = new LLLReduction_c[Z_NR[mpz_t], FP_NR[d_t]](m_mpz_double[0], delta, eta, flags) elif M._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: m_mpz_ld = M._core.mpz_ld self._type = mat_gso_mpz_ld self._core.mpz_ld = new LLLReduction_c[Z_NR[mpz_t], FP_NR[ld_t]](m_mpz_ld[0], delta, eta, flags) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) elif M._type == mat_gso_mpz_dpe: m_mpz_dpe = M._core.mpz_dpe self._type = mat_gso_mpz_dpe self._core.mpz_dpe = new LLLReduction_c[Z_NR[mpz_t], FP_NR[dpe_t]](m_mpz_dpe[0], delta, eta, flags) elif M._type == mat_gso_mpz_mpfr: m_mpz_mpfr = M._core.mpz_mpfr self._type = mat_gso_mpz_mpfr self._core.mpz_mpfr = new LLLReduction_c[Z_NR[mpz_t], FP_NR[mpfr_t]](m_mpz_mpfr[0], delta, eta, flags) elif M._type == mat_gso_long_d: m_long_double = M._core.long_d self._type = mat_gso_long_d self._core.long_d = new LLLReduction_c[Z_NR[long], FP_NR[d_t]](m_long_double[0], delta, eta, flags) elif M._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: m_long_ld = M._core.long_ld self._type = mat_gso_long_ld self._core.long_ld = new LLLReduction_c[Z_NR[long], FP_NR[ld_t]](m_long_ld[0], delta, eta, flags) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) elif M._type == mat_gso_long_dpe: m_long_dpe = M._core.long_dpe self._type = mat_gso_long_dpe self._core.long_dpe = new LLLReduction_c[Z_NR[long], FP_NR[dpe_t]](m_long_dpe[0], delta, eta, flags) elif M._type == mat_gso_long_mpfr: m_long_mpfr = M._core.long_mpfr self._type = mat_gso_long_mpfr self._core.long_mpfr = new LLLReduction_c[Z_NR[long], FP_NR[mpfr_t]](m_long_mpfr[0], delta, eta, flags) else: IF HAVE_QD: if M._type == mat_gso_mpz_dd: m_mpz_dd = M._core.mpz_dd self._type = mat_gso_mpz_dd self._core.mpz_dd = new LLLReduction_c[Z_NR[mpz_t], FP_NR[dd_t]](m_mpz_dd[0], delta, eta, flags) elif M._type == mat_gso_mpz_qd: m_mpz_qd = M._core.mpz_qd self._type = mat_gso_mpz_qd self._core.mpz_qd = new LLLReduction_c[Z_NR[mpz_t], FP_NR[qd_t]](m_mpz_qd[0], delta, eta, flags) elif M._type == mat_gso_long_dd: m_long_dd = M._core.long_dd self._type = mat_gso_long_dd self._core.long_dd = new LLLReduction_c[Z_NR[long], FP_NR[dd_t]](m_long_dd[0], delta, eta, flags) elif M._type == mat_gso_long_qd: m_long_qd = M._core.long_qd self._type = mat_gso_long_qd self._core.long_qd = new LLLReduction_c[Z_NR[long], FP_NR[qd_t]](m_long_qd[0], delta, eta, flags) else: raise RuntimeError("MatGSO object '%s' has no core."%self) ELSE: raise RuntimeError("MatGSO object '%s' has no core."%self) self._delta = delta self._eta = eta def __dealloc__(self): if self._type == mat_gso_mpz_d: del self._core.mpz_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: del self._core.mpz_ld if self._type == mat_gso_mpz_dpe: del self._core.mpz_dpe IF HAVE_QD: if self._type == mat_gso_mpz_dd: del self._core.mpz_dd if self._type == mat_gso_mpz_qd: del self._core.mpz_qd if self._type == mat_gso_mpz_mpfr: del self._core.mpz_mpfr if self._type == mat_gso_long_d: del self._core.long_d IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: del self._core.long_ld if self._type == mat_gso_long_dpe: del self._core.long_dpe IF HAVE_QD: if self._type == mat_gso_long_dd: del self._core.long_dd if self._type == mat_gso_long_qd: del self._core.long_qd if self._type == mat_gso_long_mpfr: del self._core.long_mpfr def __reduce__(self): """ Make sure attempts at pickling raise an error until proper pickling is implemented. """ raise NotImplementedError def __call__(self, int kappa_min=0, int kappa_start=0, int kappa_end=-1, int size_reduction_start=0): """LLL reduction. :param int kappa_min: minimal index to go back to :param int kappa_start: index to start processing at :param int kappa_end: end index (exclusive) :param int size_reduction_start: only perform size reductions using vectors starting at this index """ if self.M.d == 0: return if kappa_end == -1: kappa_end = self.M.d cdef int r if self._type == mat_gso_mpz_d: sig_on() self._core.mpz_d.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.mpz_d.status sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() self._core.mpz_ld.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.mpz_ld.status sig_off() ELSE: raise RuntimeError("LLLReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() self._core.mpz_dpe.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.mpz_dpe.status sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() self._core.mpz_mpfr.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.mpz_mpfr.status sig_off() elif self._type == mat_gso_long_d: sig_on() self._core.long_d.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.long_d.status sig_off() elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() self._core.long_ld.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.long_ld.status sig_off() ELSE: raise RuntimeError("LLLReduction object '%s' has no core."%self) elif self._type == mat_gso_long_dpe: sig_on() self._core.long_dpe.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.long_dpe.status sig_off() elif self._type == mat_gso_long_mpfr: sig_on() self._core.long_mpfr.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.long_mpfr.status sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() self._core.mpz_dd.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.mpz_dd.status sig_off() elif self._type == mat_gso_mpz_qd: sig_on() self._core.mpz_qd.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.mpz_qd.status sig_off() elif self._type == mat_gso_long_dd: sig_on() self._core.long_dd.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.long_dd.status sig_off() elif self._type == mat_gso_long_qd: sig_on() self._core.long_qd.lll(kappa_min, kappa_start, kappa_end, size_reduction_start) r = self._core.long_qd.status sig_off() else: raise RuntimeError("LLLReduction object '%s' has no core."%self) ELSE: raise RuntimeError("LLLReduction object '%s' has no core."%self) if r: raise ReductionError( str(get_red_status_str(r)) ) def size_reduction(self, int kappa_min=0, int kappa_end=-1, int size_reduction_start=0): """Size reduction. :param int kappa_min: start index :param int kappa_end: end index (exclusive) :param int size_reduction_start: only perform size reductions using vectors starting at this index """ if kappa_end == -1: kappa_end = self.M.d if self._type == mat_gso_mpz_d: sig_on() r = self._core.mpz_d.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_long_d: sig_on() r = self._core.long_d.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_mpz_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.mpz_ld.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() ELSE: raise RuntimeError("LLLReduction object '%s' has no core."%self) elif self._type == mat_gso_long_ld: IF HAVE_LONG_DOUBLE: sig_on() r = self._core.long_ld.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() ELSE: raise RuntimeError("LLLReduction object '%s' has no core."%self) elif self._type == mat_gso_mpz_dpe: sig_on() r = self._core.mpz_dpe.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_mpz_mpfr: sig_on() r = self._core.mpz_mpfr.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_long_dpe: sig_on() r = self._core.long_dpe.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_long_mpfr: sig_on() r = self._core.long_mpfr.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() else: IF HAVE_QD: if self._type == mat_gso_mpz_dd: sig_on() r = self._core.mpz_dd.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_mpz_qd: sig_on() r = self._core.mpz_qd.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_long_dd: sig_on() r = self._core.long_dd.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() elif self._type == mat_gso_long_qd: sig_on() r = self._core.long_qd.size_reduction(kappa_min, kappa_end, size_reduction_start) sig_off() else: raise RuntimeError("LLLReduction object '%s' has no core."%self) ELSE: raise RuntimeError("LLLReduction object '%s' has no core."%self) if not r: raise ReductionError( str(get_red_status_str(r)) ) @property def final_kappa(self): """FIXME! briefly describe function :returns: :rtype: """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.final_kappa IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.final_kappa if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.final_kappa IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.final_kappa if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.final_kappa if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.final_kappa if self._type == mat_gso_long_d: return self._core.long_d.final_kappa IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.final_kappa if self._type == mat_gso_long_dpe: return self._core.long_dpe.final_kappa IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.final_kappa if self._type == mat_gso_long_qd: return self._core.long_qd.final_kappa if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.final_kappa raise RuntimeError("LLLReduction object '%s' has no core."%self) @property def last_early_red(self): """FIXME! briefly describe function :returns: :rtype: """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.last_early_red IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.last_early_red if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.last_early_red IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.last_early_red if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.last_early_red if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.last_early_red if self._type == mat_gso_long_d: return self._core.long_d.last_early_red IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.last_early_red if self._type == mat_gso_long_dpe: return self._core.long_dpe.last_early_red IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.last_early_red if self._type == mat_gso_long_qd: return self._core.long_qd.last_early_red if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.last_early_red raise RuntimeError("LLLReduction object '%s' has no core."%self) @property def zeros(self): """FIXME! briefly describe function :returns: :rtype: """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.zeros IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.zeros if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.zeros IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.zeros if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.zeros if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.zeros if self._type == mat_gso_long_d: return self._core.long_d.zeros IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.zeros if self._type == mat_gso_long_dpe: return self._core.long_dpe.zeros IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.zeros if self._type == mat_gso_long_qd: return self._core.long_qd.zeros if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.zeros raise RuntimeError("LLLReduction object '%s' has no core."%self) @property def nswaps(self): """FIXME! briefly describe function :returns: :rtype: """ if self._type == mat_gso_mpz_d: return self._core.mpz_d.n_swaps IF HAVE_LONG_DOUBLE: if self._type == mat_gso_mpz_ld: return self._core.mpz_ld.n_swaps if self._type == mat_gso_mpz_dpe: return self._core.mpz_dpe.n_swaps IF HAVE_QD: if self._type == mat_gso_mpz_dd: return self._core.mpz_dd.n_swaps if self._type == mat_gso_mpz_qd: return self._core.mpz_qd.n_swaps if self._type == mat_gso_mpz_mpfr: return self._core.mpz_mpfr.n_swaps if self._type == mat_gso_long_d: return self._core.long_d.n_swaps IF HAVE_LONG_DOUBLE: if self._type == mat_gso_long_ld: return self._core.long_ld.n_swaps if self._type == mat_gso_long_dpe: return self._core.long_dpe.n_swaps IF HAVE_QD: if self._type == mat_gso_long_dd: return self._core.long_dd.n_swaps if self._type == mat_gso_long_qd: return self._core.long_qd.n_swaps if self._type == mat_gso_long_mpfr: return self._core.long_mpfr.n_swaps raise RuntimeError("LLLReduction object '%s' has no core."%self) @property def delta(self): return self._delta @property def eta(self): return self._eta def lll_reduction(IntegerMatrix B, U=None, double delta=LLL_DEF_DELTA, double eta=LLL_DEF_ETA, method=None, float_type=None, int precision=0, int flags=LLL_DEFAULT): u"""Run LLL reduction. :param IntegerMatrix B: Integer matrix, modified in place. :param U: Transformation matrix or ``None`` :param double delta: LLL parameter `0.25 < δ ≤ 1` :param double eta: LLL parameter `0 ≤ η < √δ` :param method: one of ``'wrapper'``, ``'proved'``, ``'heuristic'``, ``'fast'`` or ``None``. :param float_type: an element of `fpylll.config.float_types` or ``None`` :param precision: bit precision to use if ``float_tpe`` is ``'mpfr'`` :param int flags: LLL flags. :returns: modified matrix ``B`` """ cdef IntegerMatrix B_ if B._type == ZT_MPZ: B_ = B else: B_ = IntegerMatrix(B, int_type="mpz") check_delta(delta) check_eta(eta) check_precision(precision) cdef LLLMethod method_ if method == "wrapper" or method is None: method_ = LM_WRAPPER elif method == "proved": method_ = LM_PROVED elif method == "heuristic": method_ = LM_HEURISTIC elif method == "fast": method_ = LM_FAST else: raise ValueError("Method '%s' unknown."%method) if float_type is None and method_ == LM_FAST: float_type = "double" if method_ == LM_WRAPPER and check_float_type(float_type) != FT_DEFAULT: raise ValueError("LLL wrapper function requires float_type==None") if method_ == LM_FAST and \ check_float_type(float_type) not in (FT_DOUBLE, FT_LONG_DOUBLE, FT_DD, FT_QD): raise ValueError("LLL fast function requires " "float_type == 'double', 'long double', 'dd' or 'qd'") cdef int r cdef FloatType ft = check_float_type(float_type) if U is not None and isinstance(U, IntegerMatrix): sig_on() r = lll_reduction_c(B_._core.mpz[0], (U)._core.mpz[0], delta, eta, method_, ft, precision, flags) sig_off() else: sig_on() r = lll_reduction_c(B_._core.mpz[0], delta, eta, method_, ft, precision, flags) sig_off() if r: raise ReductionError( str(get_red_status_str(r)) ) if B != B_: B.set_matrix(B_) return B def is_LLL_reduced(M, delta=LLL_DEF_DELTA, eta=LLL_DEF_ETA): """Test if ``M`` is LLL reduced. :param M: either an GSO object of an integer matrix or an integer matrix. :param delta: LLL parameter δ < 1 :param eta: LLL parameter η > 0.5 :returns: Return ``True`` if ``M`` is definitely LLL reduced, ``False`` otherwise. Random matrices are typically not LLL reduced:: >>> from fpylll import IntegerMatrix, LLL >>> A = IntegerMatrix(40, 40) >>> A.randomize('uniform', bits=32) >>> LLL.is_reduced(A) False LLL reduction should produce matrices which are LLL reduced:: >>> LLL.reduction(A) # doctest: +ELLIPSIS >>> LLL.is_reduced(A) True .. note:: This function may return ``False`` for LLL reduced matrices if the precision used to compute the GSO is too small. """ check_delta(delta) check_eta(eta) cdef MatGSO M_ if isinstance(M, MatGSO): M_ = M elif isinstance(M, IntegerMatrix): M_ = MatGSO(M) M_.update_gso() else: raise TypeError("Type '%s' not understood."%type(M)) if M_._type == mat_gso_mpz_d: return bool(is_lll_reduced[Z_NR[mpz_t], FP_NR[double]](M_._core.mpz_d[0], delta, eta)) IF HAVE_LONG_DOUBLE: if M_._type == mat_gso_mpz_ld: return bool(is_lll_reduced[Z_NR[mpz_t], FP_NR[longdouble]](M_._core.mpz_ld[0], delta, eta)) IF HAVE_QD: if M_._type == mat_gso_mpz_dd: return bool(is_lll_reduced[Z_NR[mpz_t], FP_NR[dd_t]](M_._core.mpz_dd[0], delta, eta)) if M_._type == mat_gso_mpz_qd: return bool(is_lll_reduced[Z_NR[mpz_t], FP_NR[qd_t]](M_._core.mpz_qd[0], delta, eta)) if M_._type == mat_gso_mpz_mpfr: return bool(is_lll_reduced[Z_NR[mpz_t], FP_NR[mpfr_t]](M_._core.mpz_mpfr[0], delta, eta)) if M_._type == mat_gso_long_d: return bool(is_lll_reduced[Z_NR[long], FP_NR[double]](M_._core.long_d[0], delta, eta)) IF HAVE_LONG_DOUBLE: if M_._type == mat_gso_long_ld: return bool(is_lll_reduced[Z_NR[long], FP_NR[longdouble]](M_._core.long_ld[0], delta, eta)) IF HAVE_QD: if M_._type == mat_gso_long_dd: return bool(is_lll_reduced[Z_NR[long], FP_NR[dd_t]](M_._core.long_dd[0], delta, eta)) if M_._type == mat_gso_long_qd: return bool(is_lll_reduced[Z_NR[long], FP_NR[qd_t]](M_._core.long_qd[0], delta, eta)) if M_._type == mat_gso_long_mpfr: return bool(is_lll_reduced[Z_NR[long], FP_NR[mpfr_t]](M_._core.long_mpfr[0], delta, eta)) raise RuntimeError("MatGSO object '%s' has no core."%M) class LLL: DEFAULT = LLL_DEFAULT VERBOSE = LLL_VERBOSE EARLY_RED = LLL_EARLY_RED SIEGEL = LLL_SIEGEL DEFAULT_DELTA = LLL_DEF_DELTA DEFAULT_ETA = LLL_DEF_ETA Reduction = LLLReduction reduction = staticmethod(lll_reduction) is_reduced = staticmethod(is_LLL_reduced) Wrapper = Wrapper fpylll-0.6.1/src/fpylll/fplll/pruner.pxd000066400000000000000000000007101455321202600202500ustar00rootroot00000000000000# -*- coding: utf-8 -*- from .integer_matrix cimport IntegerMatrix from .decl cimport pruner_core_t, fplll_nr_type_t from .fplll cimport PruningParams as PruningParams_c cdef class PruningParams: cdef PruningParams_c _core @staticmethod cdef PruningParams from_cxx(PruningParams_c & p) @staticmethod cdef to_cxx(PruningParams_c& self, PruningParams p) cdef class Pruner: cdef fplll_nr_type_t _type cdef pruner_core_t _core fpylll-0.6.1/src/fpylll/fplll/pruner.pyx000066400000000000000000001140501455321202600203000ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Pruner EXAMPLE:: >>> from fpylll import * >>> FPLLL.set_random_seed(1337) >>> A = [IntegerMatrix.random(10, "qary", bits=10, k=5) for _ in range(20)] >>> M = [GSO.Mat(a) for a in A] >>> _ = [LLL.Reduction(m)() for m in M] >>> radius = sum([m.get_r(0, 0) for m in M])/len(M) >>> pr = Pruning.run(radius, 10000, [m.r() for m in M], 0.4) >>> print(pr) # doctest: +ELLIPSIS PruningParams<7.797437, (1.00,...,0.80), 0.6594> >>> print(Pruning.run(M[0].get_r(0, 0), 2**20, [m.r() for m in M], 0.9, pruning=pr)) PruningParams<1.001130, (1.00,...,0.98), 0.9410> .. moduleauthor:: Martin R. Albrecht """ include "fpylll/config.pxi" from libcpp cimport bool from libcpp.vector cimport vector from math import log, exp from cysignals.signals cimport sig_on, sig_off from cython.operator cimport dereference as deref, preincrement as inc from .decl cimport fp_nr_t, mpz_t, dpe_t, mpfr_t from .decl cimport nr_d, nr_dpe, nr_mpfr, pruner_core_t, d_t from .fplll cimport FT_DOUBLE, FT_LONG_DOUBLE, FT_DPE, FT_MPFR, FloatType from .fplll cimport PRUNER_METRIC_PROBABILITY_OF_SHORTEST, PRUNER_METRIC_EXPECTED_SOLUTIONS from .fplll cimport FP_NR, Z_NR from .fplll cimport prune as prune_c from .fplll cimport PruningParams as PruningParams_c from .fplll cimport Pruner as Pruner_c from .fplll cimport PrunerMetric from .fplll cimport svp_probability as svp_probability_c from .fplll cimport PRUNER_CVP, PRUNER_START_FROM_INPUT, PRUNER_GRADIENT, PRUNER_NELDER_MEAD, PRUNER_VERBOSE from .fplll cimport PRUNER_SINGLE, PRUNER_HALF from fpylll.util import adjust_radius_to_gh_bound, precision, FPLLL from fpylll.util cimport check_float_type, check_precision, check_pruner_metric IF HAVE_LONG_DOUBLE: from .decl cimport nr_ld, ld_t IF HAVE_QD: from .decl cimport nr_dd, nr_qd, dd_t, qd_t from .fplll cimport FT_DD, FT_QD cdef class PruningParams: """ Pruning parameters. """ def __init__(self, gh_factor, coefficients, expectation=1.0, metric="probability", detailed_cost=tuple()): """Create new pruning parameters object. :param gh_factor: ratio of radius to Gaussian heuristic :param coefficients: a list of pruning coefficients :param expectation: success probability or number of solutions :param metric: either "probability" or "solutions" """ if gh_factor <= 0: raise ValueError("Radius factor must be > 0") cdef PrunerMetric met = check_pruner_metric(metric) if met == PRUNER_METRIC_PROBABILITY_OF_SHORTEST: if expectation <= 0 or expectation > 1: raise ValueError("Probability must be between 0 and 1") self._core.gh_factor = gh_factor self._core.expectation = expectation self._core.metric = met for c in coefficients: self._core.coefficients.push_back(c) for c in detailed_cost: self._core.detailed_cost.push_back(c) @staticmethod cdef PruningParams from_cxx(PruningParams_c& p): """ Load PruningParams object from C++ PruningParams object. .. note:: All data is copied, i.e. `p` can be safely deleted after this function returned. """ cdef PruningParams self = PruningParams(1.0, ()) self._core = p return self @staticmethod cdef to_cxx(PruningParams_c& self, PruningParams p): """ Store pruning object in C++ pruning object. .. note:: All data is copied, i.e. `p` can be safely deleted after this function returned. """ self.gh_factor = p._core.gh_factor self.expectation = p._core.expectation self.metric = p._core.metric for c in p._core.coefficients: self.coefficients.push_back(c) for c in p._core.detailed_cost: self.detailed_cost.push_back(c) @staticmethod def LinearPruningParams(block_size, level): """ Set all pruning coefficients to 1, except the last coefficients, these will be linearly with slope `-1 / block_size`. :param block_size: block size :param level: level """ sig_on() cdef PruningParams_c p = PruningParams_c.LinearPruningParams(block_size, level) sig_off() return PruningParams.from_cxx(p) def __reduce__(self): """ :: >>> from fpylll.fplll.pruner import PruningParams >>> import pickle >>> print(pickle.loads(pickle.dumps(PruningParams(1.0, [1.0, 0.6, 0.3], 1.0)))) PruningParams<1.000000, (1.00,...,0.30), 1.0000> """ return PruningParams, (self.gh_factor, self.coefficients, self.expectation, self.metric, self.detailed_cost) def __str__(self): return "PruningParams<%f, (%.2f,...,%.2f), %.4f>"%(self.gh_factor, self.coefficients[0], self.coefficients[-1], self.expectation) @property def gh_factor(self): """ :: >>> from fpylll.fplll.pruner import PruningParams >>> pr = PruningParams(1.0, [1.0, 0.6, 0.3], 0.9) >>> pr.gh_factor 1.0 """ return self._core.gh_factor @property def expectation(self): """ :: >>> from fpylll.fplll.pruner import PruningParams >>> pr = PruningParams(1.0, [1.0, 0.6, 0.3], 0.9) >>> pr.expectation 0.9 """ return self._core.expectation @property def metric(self): """ :: >>> from fpylll.fplll.pruner import PruningParams >>> pr = PruningParams(1.0, [1.0, 0.6, 0.3], 0.9) >>> pr.metric 'probability' """ if self._core.metric == PRUNER_METRIC_PROBABILITY_OF_SHORTEST: return "probability" elif self._core.metric == PRUNER_METRIC_EXPECTED_SOLUTIONS: return "solutions" else: raise NotImplementedError("Metric %d not understood"%self._core.metric) @property def coefficients(self): """ :: >>> from fpylll.fplll.pruner import PruningParams >>> pr = PruningParams(1.0, [1.0, 0.6, 0.3], 0.9) >>> pr.coefficients (1.0, 0.6, 0.3) """ cdef list coefficients = [] cdef vector[double].iterator it = self._core.coefficients.begin() while it != self._core.coefficients.end(): coefficients.append(deref(it)) inc(it) return tuple(coefficients) @property def detailed_cost(self): """ :: >>> from fpylll.fplll.pruner import PruningParams >>> pr = PruningParams(1.0, [1.0, 0.6, 0.3], 0.9) >>> pr.detailed_cost () """ cdef list detailed_cost = [] cdef vector[double].iterator it = self._core.detailed_cost.begin() while it != self._core.detailed_cost.end(): detailed_cost.append(deref(it)) inc(it) return tuple(detailed_cost) cdef class Pruner: def __init__(self, double enumeration_radius, double preproc_cost, gso_r, double target, metric="probability", int flags=PRUNER_GRADIENT, float_type="double"): """ :param enumeration_radius: target squared enumeration radius :param preproc_cost: cost of preprocessing :param gso_r: r vector of GSO :param target: overall targeted success probability or number of solutions :param metric: "probability" or "solutions" :param flags: flags :param float_type: floating point type to use EXAMPLE:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 1, metric=Pruning.EXPECTED_SOLUTIONS) .. note :: Preprocessing cost should be expressed in terms of nodes in an enumeration (~100 CPU cycles per node) """ cdef FloatType float_type_ = check_float_type(float_type) cdef PrunerMetric metric_ = check_pruner_metric(metric) cdef fp_nr_t enumeration_radius_ cdef fp_nr_t preproc_cost_ cdef fp_nr_t target_ if preproc_cost < 1: raise ValueError("Preprocessing cost must be at least 1 but got %f"%preproc_cost) if metric == PRUNER_METRIC_PROBABILITY_OF_SHORTEST: if target <= 0 or target >= 1.0: raise ValueError("Probability must be between 0 and 1 (exclusive) but got %f"%target) if metric == PRUNER_METRIC_EXPECTED_SOLUTIONS: if target <= 0: raise ValueError("Number of solutions must be > 0 but got %f"%target) cdef vector[vector[double]] gso_r_ d = len(gso_r[0]) for i,m in enumerate(gso_r): gso_r_.push_back(vector[double]()) if len(m) != d: raise ValueError("Lengths of all vectors must match.") for e in m: gso_r_[i].push_back(e) if float_type_ == FT_DOUBLE: self._type = nr_d enumeration_radius_.d = enumeration_radius preproc_cost_.d = preproc_cost target_.d = target self._core.d = new Pruner_c[FP_NR[d_t]](enumeration_radius_.d, preproc_cost_.d, gso_r_, target_.d, metric_, flags) elif float_type_ == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: self._type = nr_ld enumeration_radius_.ld = enumeration_radius preproc_cost_.ld = preproc_cost target_.ld = target self._core.ld = new Pruner_c[FP_NR[ld_t]](enumeration_radius_.ld, preproc_cost_.ld, gso_r_, target_.ld, metric_, flags) ELSE: raise ValueError("Float type '%s' not understood." % float_type) elif float_type_ == FT_DPE: self._type = nr_dpe enumeration_radius_.dpe = enumeration_radius preproc_cost_.dpe = preproc_cost target_.dpe = target self._core.dpe = new Pruner_c[FP_NR[dpe_t]](enumeration_radius_.dpe, preproc_cost_.dpe, gso_r_, target_.dpe, metric_, flags) elif float_type_ == FT_MPFR: self._type = nr_mpfr enumeration_radius_.mpfr = enumeration_radius preproc_cost_.mpfr = preproc_cost target_.mpfr = target self._core.mpfr = new Pruner_c[FP_NR[mpfr_t]](enumeration_radius_.mpfr, preproc_cost_.mpfr, gso_r_, target_.mpfr, metric_, flags) else: IF HAVE_QD: if float_type_ == FT_DD: self._type = nr_dd enumeration_radius_.dd = enumeration_radius preproc_cost_.dd = preproc_cost target_.dd = target self._core.dd = new Pruner_c[FP_NR[dd_t]](enumeration_radius_.dd, preproc_cost_.dd, gso_r_, target_.dd, metric_, flags) elif float_type_ == FT_QD: self._type = nr_qd enumeration_radius_.qd = enumeration_radius preproc_cost_.qd = preproc_cost target_.qd = target self._core.qd = new Pruner_c[FP_NR[qd_t]](enumeration_radius_.qd, preproc_cost_.qd, gso_r_, target_.qd, metric_, flags) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) def __dealloc__(self): if self._type == nr_d: del self._core.d IF HAVE_LONG_DOUBLE: if self._type == nr_ld: del self._core.ld if self._type == nr_dpe: del self._core.dpe IF HAVE_QD: if self._type == nr_dd: del self._core.dd if self._type == nr_qd: del self._core.qd if self._type == nr_mpfr: del self._core.mpfr def optimize_coefficients(self, pr): """ Optimize pruning coefficients. >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(60, "qary", bits=20, k=30) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(0.9*M.get_r(0,0), 2**40, [M.r()], 0.51, metric=Pruning.PROBABILITY_OF_SHORTEST) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) >>> pr.measure_metric(c) # doctest: +ELLIPSIS 0.002711... >>> pr = Pruning.Pruner(0.9*M.get_r(0,0), 2**2, [M.r()], 1.0, metric=Pruning.EXPECTED_SOLUTIONS) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) >>> pr.measure_metric(c) # doctest: +ELLIPSIS 0.990517... >>> pr = Pruning.Pruner(0.5*M.get_r(0,0), 2**40, [M.r()], 0.51, metric=Pruning.PROBABILITY_OF_SHORTEST, flags=Pruning.SINGLE) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) >>> pr.measure_metric(c) # doctest: +ELLIPSIS 0.515304... >>> pr = Pruning.Pruner(0.9*M.get_r(0,0), 2**2, [M.r()], 1.0, metric=Pruning.EXPECTED_SOLUTIONS, flags=Pruning.SINGLE) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) >>> pr.measure_metric(c) # doctest: +ELLIPSIS 1.043578... """ cdef vector[double] pr_ cdef bool called = False d = len(pr) for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() self._core.d.optimize_coefficients(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() self._core.ld.optimize_coefficients(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() self._core.dpe.optimize_coefficients(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() self._core.dd.optimize_coefficients(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() self._core.qd.optimize_coefficients(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() self._core.mpfr.optimize_coefficients(pr_) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) pr = [] for i in range(d): pr.append(pr_[i]) return tuple(pr) def optimize_coefficients_evec(self, pr): """ Optimize using "even" coefficients. Run the optimization process, successively using the algorithm activated using using half coefficients: the input ``pr`` has length ``n``; but only the even indices in the vector will be used in the optimization. In the end, we have ``pr_i = pr_{i+1}``. This function only optimizes the overall enumeration time where the target function is: ``single_enum_cost(pr) * trials + preproc_cost * (trials - 1.0)`` :param pr: input pruning parameters EXAMPLE:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients_evec([1. for _ in range(M.d)]) >>> c[0:10] # doctest: +ELLIPSIS (1.0, 1.0, 0.98, 0.98, 0.98, 0.98, 0.9637..., 0.9637..., 0.9591..., 0.9591...) """ cdef vector[double] pr_ cdef bool called = False d = len(pr) for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() self._core.d.optimize_coefficients_evec(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() self._core.ld.optimize_coefficients_evec(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() self._core.dpe.optimize_coefficients_evec(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() self._core.dd.optimize_coefficients_evec(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() self._core.qd.optimize_coefficients_evec(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() self._core.mpfr.optimize_coefficients_evec(pr_) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) pr = [] for i in range(d): pr.append(pr_[i]) return tuple(pr) def optimize_coefficients_full(self, pr): """ Optimize pruning coefficients using all the coefficients. Run the optimization process, successively using the algorithm activated using using full coefficients. That is, we do not have the constraint pr_i = pr_{i+1} in this function. Note that this function (and `optimize_coefficients_full_core()`) only optimizes the overall enumeration time where the target function is: ``single_enum_cost(pr) * trials + preproc_cost * (trials - 1.0)`` :param pr: input pruning parameters EXAMPLE:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients_full([1. for _ in range(M.d)]) >>> c[0:10] # doctest: +ELLIPSIS (1.0, 1.0, 0.98, 0.98, 0.98, 0.98, 0.9608..., 0.9607..., 0.9574..., 0.9572...) .. note :: Basis shape and other parameters must have been set beforehand. """ cdef vector[double] pr_ cdef bool called = False d = len(pr) for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() self._core.d.optimize_coefficients_full(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() self._core.ld.optimize_coefficients_full(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() self._core.dpe.optimize_coefficients_full(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() self._core.dd.optimize_coefficients_full(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() self._core.qd.optimize_coefficients_full(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() self._core.mpfr.optimize_coefficients_full(pr_) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) pr = [] for i in range(d): pr.append(pr_[i]) return tuple(pr) def optimize_coefficients_cost_vary_prob(self, pr): """ Optimize the pruning coefficients with respect to the overall enumeration time. The target function is: ``single_enum_cost(pr) * trials + preproc_cost * (trials - 1.0)``; EXAMPLE:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients_cost_vary_prob([1. for _ in range(M.d)]) >>> c[0:10] # doctest: +ELLIPSIS (1.0, 1.0, 0.999..., 0.999..., 0.995..., 0.993..., 0.977..., 0.962..., 0.936..., 0.913...) """ cdef vector[double] pr_ cdef bool called = False d = len(pr) for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() self._core.d.optimize_coefficients_cost_vary_prob(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() self._core.ld.optimize_coefficients_cost_vary_prob(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() self._core.dpe.optimize_coefficients_cost_vary_prob(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() self._core.dd.optimize_coefficients_cost_vary_prob(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() self._core.qd.optimize_coefficients_cost_vary_prob(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() self._core.mpfr.optimize_coefficients_cost_vary_prob(pr_) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) pr = [] for i in range(d): pr.append(pr_[i]) return tuple(pr) def optimize_coefficients_cost_fixed_prob(self, pr): """ Optimize pruning coefficients with respect to the single enumeration. Main interface to optimize the single enumeration time with the constraint such that the succ. prob (or expected solutions) is fixed (and given) from input to the Pruner constructor. EXAMPLE:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients_cost_fixed_prob([1. for _ in range(M.d)]) >>> c[0:10] # doctest: +ELLIPSIS (1.0, 1.0, 0.98, 0.98, 0.98, 0.98, 0.962..., 0.944..., 0.944..., 0.944...) """ cdef vector[double] pr_ cdef bool called = False d = len(pr) for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() self._core.d.optimize_coefficients_cost_fixed_prob(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() self._core.ld.optimize_coefficients_cost_fixed_prob(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() self._core.dpe.optimize_coefficients_cost_fixed_prob(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() self._core.dd.optimize_coefficients_cost_fixed_prob(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() self._core.qd.optimize_coefficients_cost_fixed_prob(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() self._core.mpfr.optimize_coefficients_cost_fixed_prob(pr_) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) pr = [] for i in range(d): pr.append(pr_[i]) return tuple(pr) def single_enum_cost(self, pr, detailed_cost=False): """ Compute the cost of a single enumeration:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) >>> cost, details = pr.single_enum_cost(c, True) >>> cost # doctest: +ELLIPSIS 14980.48... >>> details[0:10] # doctest: +ELLIPSIS (0.134901..., 0.3048..., 0.81588..., 1.945..., 4.5903..., 11.51..., 16.048..., 41.7115..., 48.03..., 116.986...) """ cdef vector[double] pr_ cdef vector[double] detailed_cost_ cdef bool called = False cost = 0.0 d = len(pr) for e in pr: pr_.push_back(e) detailed_cost_.push_back(0.0) # TODO: don't just return doubles if self._type == nr_d: sig_on() cost = self._core.d.single_enum_cost(pr_, &detailed_cost_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() cost = self._core.ld.single_enum_cost(pr_, &detailed_cost_) called = True sig_off() if self._type == nr_dpe: sig_on() cost = self._core.dpe.single_enum_cost(pr_, &detailed_cost_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() cost = self._core.dd.single_enum_cost(pr_, &detailed_cost_) called = True sig_off() elif self._type == nr_qd: sig_on() cost = self._core.qd.single_enum_cost(pr_, &detailed_cost_) called = True sig_off() if self._type == nr_mpfr: sig_on() cost = self._core.mpfr.single_enum_cost(pr_, &detailed_cost_) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) if detailed_cost: detailed_cost = [] for i in range(d): detailed_cost.append(detailed_cost_[i]) return cost, tuple(detailed_cost) else: return cost def repeated_enum_cost(self, pr): """ Compute the cost of r enumeration and (r-1) preprocessing, where r is the required number of retrials to reach target:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) >>> pr.repeated_enum_cost(c) # doctest: +ELLIPSIS 15626.98... """ cdef vector[double] pr_ cdef bool called = False cost = 0.0 for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() cost = self._core.d.repeated_enum_cost(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() cost = self._core.ld.repeated_enum_cost(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() cost = self._core.dpe.repeated_enum_cost(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() cost = self._core.dd.repeated_enum_cost(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() cost = self._core.qd.repeated_enum_cost(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() cost = self._core.mpfr.repeated_enum_cost(pr_,) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) return cost def measure_metric(self, pr): """ Compute the success probability of expected number of solutions of a single enumeration:: >>> from fpylll import IntegerMatrix, GSO, LLL, Pruning, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(40, "qary", bits=20, k=20) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A) >>> _ = M.update_gso() >>> pr = Pruning.Pruner(M.get_r(0,0), 2**20, [M.r()], 0.51) >>> c = pr.optimize_coefficients([1. for _ in range(M.d)]) """ cdef vector[double] pr_ cdef bool called = False r = 0.0 for e in pr: pr_.push_back(e) # TODO: don't just return doubles if self._type == nr_d: sig_on() r = self._core.d.measure_metric(pr_) called = True sig_off() IF HAVE_LONG_DOUBLE: if self._type == nr_ld: sig_on() r = self._core.ld.measure_metric(pr_) called = True sig_off() if self._type == nr_dpe: sig_on() r = self._core.dpe.measure_metric(pr_) called = True sig_off() IF HAVE_QD: if self._type == nr_dd: sig_on() r = self._core.dd.measure_metric(pr_) called = True sig_off() elif self._type == nr_qd: sig_on() r = self._core.qd.measure_metric(pr_) called = True sig_off() if self._type == nr_mpfr: sig_on() r = self._core.mpfr.measure_metric(pr_,) called = True sig_off() if not called: raise RuntimeError("Pruner object '%s' has no core."%self) return r def prune(double enumeration_radius, double preproc_cost, gso_r, double target, metric="probability", int flags=PRUNER_GRADIENT, pruning=None, float_type="double"): """Return optimal pruning parameters. :param enumeration_radius: target squared enumeration radius :param preproc_cost: cost of preprocessing :param gso_: list (of lists) with r coefficients :param target: overall targeted success probability or number of solutions :param metric: "probability" or "solutions" :param flags: flags :param pruning: write output here, pass ``None`` for creating a new one :param float_type: floating point type to use EXAMPLE:: >>> from fpylll import IntegerMatrix, LLL, GSO, FPLLL >>> from fpylll import FPLLL >>> from fpylll import Pruning >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(20, "qary", bits=20, k=10) >>> M = GSO.Mat(A) >>> LLL.Reduction(M)() >>> _ = FPLLL.set_precision(128) >>> R = [M.get_r(i,i) for i in range(0, 20)] >>> pr0 = Pruning.run(R[0], 2**20, [R], 0.5, float_type="double") >>> pr1 = Pruning.run(R[0], 2**20, [R], 0.5, float_type="mpfr") >>> pr0.coefficients[10], pr1.coefficients[10] # doctest: +ELLIPSIS (0.6266..., 0.6266...) >>> pr0 = Pruning.run(R[0], 2**10, [R], 0.5, flags=Pruning.GRADIENT, float_type="double") >>> pr1 = Pruning.run(R[0], 2**10, [R], 0.5, flags=Pruning.NELDER_MEAD, float_type="mpfr") >>> pr0.coefficients[10], pr1.coefficients[10] # doctest: +ELLIPSIS (0.70722482938..., 0.824291475...) .. note :: Preprocessing cost should be expressed in terms of nodes in an enumeration (~100 CPU cycles per node) """ if preproc_cost < 1: raise ValueError("Preprocessing cost must be at least 1 but got %f"%preproc_cost) if metric == PRUNER_METRIC_PROBABILITY_OF_SHORTEST: if target <= 0 or target >= 1.0: raise ValueError("Probability must be between 0 and 1 (exclusive) but got %f"%target) if metric == PRUNER_METRIC_EXPECTED_SOLUTIONS: if target <= 0: raise ValueError("Number of solutions must be > 0 but got %f"%target) cdef FloatType ft = check_float_type(float_type) metric = check_pruner_metric(metric) try: gso_r[0][0] except (AttributeError, TypeError): gso_r = [gso_r] if pruning is None: pruning = PruningParams(1.0, [], 1.0) elif not isinstance(pruning, PruningParams): raise TypeError("First parameter must be of type PruningParams or None but got type '%s'"%type(pruning)) cdef vector[vector[double]] gso_r_ d = len(gso_r[0]) for i,m in enumerate(gso_r): gso_r_.push_back(vector[double]()) if len(m) != d: raise ValueError("Lengths of all vectors must match.") for e in m: gso_r_[i].push_back(e) if ft == FT_DOUBLE: sig_on() prune_c[FP_NR[double]]((pruning)._core, enumeration_radius, preproc_cost, gso_r_, target, metric, flags) sig_off() return pruning IF HAVE_LONG_DOUBLE: if ft == FT_LONG_DOUBLE: sig_on() prune_c[FP_NR[longdouble]]((pruning)._core, enumeration_radius, preproc_cost, gso_r_, target, metric, flags) sig_off() return pruning if ft == FT_DPE: sig_on() prune_c[FP_NR[dpe_t]]((pruning)._core, enumeration_radius, preproc_cost, gso_r_, target, metric, flags) sig_off() return pruning if ft == FT_MPFR: sig_on() prune_c[FP_NR[mpfr_t]]((pruning)._core, enumeration_radius, preproc_cost, gso_r_, target, metric, flags) sig_off() return pruning IF HAVE_QD: if ft == FT_DD: sig_on() prune_c[FP_NR[dd_t]]((pruning)._core, enumeration_radius, preproc_cost, gso_r_, target, metric, flags) sig_off() return pruning elif ft == FT_QD: sig_on() prune_c[FP_NR[qd_t]]((pruning)._core, enumeration_radius, preproc_cost, gso_r_, target, metric, flags) sig_off() return pruning def svp_probability(pr, float_type="double"): """Return probability of success for enumeration with given set of pruning parameters. :param pr: pruning parameters, either PruningParams object or list of floating point numbers :param float_type: floating point type used internally """ cdef FloatType ft = check_float_type(float_type) if not isinstance(pr, PruningParams): pr = PruningParams(1.0, pr, 1.0) if ft == FT_DOUBLE: return svp_probability_c[FP_NR[double]]((pr)._core.coefficients).get_d() IF HAVE_LONG_DOUBLE: if ft == FT_LONG_DOUBLE: return svp_probability_c[FP_NR[longdouble]]((pr)._core.coefficients).get_d() if ft == FT_DPE: return svp_probability_c[FP_NR[dpe_t]]((pr)._core.coefficients).get_d() if ft == FT_MPFR: return svp_probability_c[FP_NR[mpfr_t]]((pr)._core.coefficients).get_d() IF HAVE_QD: if ft == FT_DD: return svp_probability_c[FP_NR[dd_t]]((pr)._core.coefficients).get_d() elif ft == FT_QD: return svp_probability_c[FP_NR[qd_t]]((pr)._core.coefficients).get_d() raise ValueError("Float type '%s' not understood."%float_type) class Pruning: Pruner = Pruner PruningParams = PruningParams LinearPruningParams = PruningParams.LinearPruningParams run = staticmethod(prune) CVP = PRUNER_CVP START_FROM_INPUT = PRUNER_START_FROM_INPUT GRADIENT = PRUNER_GRADIENT NELDER_MEAD = PRUNER_NELDER_MEAD VERBOSE = PRUNER_VERBOSE ZEALOUS = PRUNER_GRADIENT | PRUNER_NELDER_MEAD SINGLE = PRUNER_SINGLE HALF = PRUNER_HALF PROBABILITY_OF_SHORTEST = PRUNER_METRIC_PROBABILITY_OF_SHORTEST EXPECTED_SOLUTIONS = PRUNER_METRIC_EXPECTED_SOLUTIONS fpylll-0.6.1/src/fpylll/fplll/svpcvp.pyx000066400000000000000000000177401455321202600203160ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Shortest and Closest Vectors. .. moduleauthor:: Martin R. Albrecht """ include "fpylll/config.pxi" import warnings from cysignals.signals cimport sig_on, sig_off from libcpp.vector cimport vector from fpylll.gmp.mpz cimport mpz_t from .fplll cimport Z_NR, ZT_MPZ from .fplll cimport SVP_DEFAULT, CVP_DEFAULT from .fplll cimport SVP_VERBOSE, CVP_VERBOSE from .fplll cimport SVP_OVERRIDE_BND from .fplll cimport SVPM_PROVED, SVPM_FAST from .fplll cimport SVPMethod from .fplll cimport CVPM_PROVED, CVPM_FAST from .fplll cimport CVPMethod from .fplll cimport shortest_vector_pruning from .fplll cimport shortest_vector as shortest_vector_c from .fplll cimport closest_vector as closest_vector_c from .fplll cimport vector_matrix_product from .fplll cimport FPLLL_MAX_ENUM_DIM as MAX_ENUM_DIM from .gso import GSO from .lll import LLL from .bkz import BKZ from .bkz_param import load_strategies_json from fpylll.algorithms.bkz2 import BKZReduction from .pruner import Pruning from fpylll.io cimport assign_Z_NR_mpz, mpz_get_python from fpylll.io import SuppressStream from fpylll.util import ReductionError from fpylll.algorithms.babai import babai from .integer_matrix cimport IntegerMatrix def shortest_vector(IntegerMatrix B, method="fast", int flags=SVP_DEFAULT, pruning=True, preprocess=True, max_aux_solutions=0): """Return a shortest vector. The result is guaranteed if ``method`` is "proved". :param B: A lattice basis :param method: One of "fast" or "proved". :param int flags: :param pruning: If ``True`` pruning parameters are computed by this function. :param preprocess: Blocksize used for preprocessing; if ``True`` a block size is picked. :param max_aux_solutions: maximum number of additional short-ish solutions to return """ d = B.nrows if pruning is True and d <= 20: pruning = None # HACK: pruning in small dimensions can go wrong. if d > MAX_ENUM_DIM: raise NotImplementedError("This build of FPLLL is configured with a maximum enumeration dimension of %d."%MAX_ENUM_DIM) if B._type != ZT_MPZ: raise NotImplementedError("Only integer matrices over GMP integers (mpz_t) are supported.") cdef SVPMethod method_ if method == "proved": method_ = SVPM_PROVED if pruning is True: pruning = None if pruning is not None: raise ValueError("Method 'proved' is incompatible with providing pruning parameters.") elif method == "fast": method_ = SVPM_FAST else: raise ValueError("Method '{}' unknown".format(method)) cdef int r = 0 s = load_strategies_json(BKZ.DEFAULT_STRATEGY)[-1] if preprocess is True and d > s.block_size: preprocess = max(min(d-10, s.block_size), 2) if preprocess == 2: # just run LLL B = LLL.reduction(B) elif preprocess is True: # automatic choice bkz_obj = BKZReduction(B) bkz_obj.svp_reduction(0, d, BKZ.EasyParam(d)) elif preprocess and preprocess > 2: # make something work preprocess = max(min(d-10, preprocess), 2) bkz_obj = BKZReduction(B) bkz_obj(BKZ.EasyParam(preprocess)) if pruning is True: M = GSO.Mat(B) M.update_gso() for cost in (10, 20, 30, 40, 50): try: with SuppressStream(): pruning = Pruning.run(M.get_r(0, 0), 2**cost, M.r(), 0.99, flags=Pruning.SINGLE|Pruning.GRADIENT) pruning = pruning.coefficients break except RuntimeError: pass if pruning is True: # it didn't work warnings.warn("Pruning failed, proceeding without it.", RuntimeWarning) pruning = [1]*d cdef vector[Z_NR[mpz_t]] sol_coord cdef vector[Z_NR[mpz_t]] solution cdef vector[double] pruning_ cdef vector[vector[Z_NR[mpz_t]]] auxsol_coord cdef vector[double] auxsol_dist if pruning: if len(pruning) != B.nrows: raise ValueError("PruningParams vector must have length %d but got %d."%(B.nrows, len(pruning))) pruning_.resize(B.nrows) for i in range(len(pruning)): pruning_[i] = pruning[i] if max_aux_solutions == 0: sig_on() r = shortest_vector_pruning(B._core.mpz[0], sol_coord, pruning_, flags) sig_off() else: sig_on() r = shortest_vector_pruning(B._core.mpz[0], sol_coord, auxsol_coord, auxsol_dist, max_aux_solutions, pruning_, flags) sig_off() else: sig_on() r = shortest_vector_c(B._core.mpz[0], sol_coord, method_, flags) sig_off() if r: raise ReductionError("SVP solver returned an error ({:d})".format(r)) vector_matrix_product(solution, sol_coord, B._core.mpz[0]) cdef list v = [] for i in range(solution.size()): v.append(mpz_get_python(solution[i].get_data())) cdef list aux = [] if max_aux_solutions > 0: for j in range(auxsol_dist.size()): vector_matrix_product(solution, auxsol_coord[j], B._core.mpz[0]) aux_sol = [] for i in range(solution.size()): aux_sol.append(mpz_get_python(solution[i].get_data())) aux.append(tuple(aux_sol)) return tuple(v), tuple(aux) else: return tuple(v) class SVP: shortest_vector = staticmethod(shortest_vector) DEFAULT = SVP_DEFAULT VERBOSE = SVP_VERBOSE OVERRIDE_BND = SVP_OVERRIDE_BND def closest_vector(IntegerMatrix B, t, method="fast", int flags=CVP_DEFAULT): """Return a closest vector. The basis must be LLL-reduced with delta=``LLL.DEFAULT_DELTA`` and eta=``LLL.DEFAULT_ETA``. The result is guaranteed if method = "proved", default is "fast". :param IntegerMatrix B: Input lattice basis. :param t: Target point (∈ ZZ^n) :param method: One of "fast" or "proved". :param int flags: Either ``CVP.DEFAULT`` or ``CVP.VERBOSE``. :returns coordinates of the solution vector: EXAMPLE :: >>> from fpylll import * >>> FPLLL.set_random_seed(42) >>> A = IntegerMatrix.random(5, 'uniform', bits=10) >>> lll = LLL.reduction(A) >>> t = (94, -42, 123, 512, -1337) >>> print (CVP.closest_vector(A, t)) (-34, 109, 204, 360, -1548) >>> from fpylll import * >>> n = 10 >>> B = IntegerMatrix(n, n + 1) >>> B.randomize("intrel", bits=100) >>> v_opt = B.multiply_left([1,0,1,0,1,1,0,0,1,1]) >>> s = v_opt[0] # s = , where a is vector of knapsack values. >>> t = [s] + (n * [0]) >>> _ = LLL.reduction(B) >>> v = CVP.closest_vector(B, t) >>> v[0] == t[0] True >>> v[1:] (1, 0, 1, 0, 1, 1, 0, 0, 1, 1) """ if B.nrows > MAX_ENUM_DIM: raise NotImplementedError("This build of FPLLL is configured with a maximum enumeration dimension of %d."%MAX_ENUM_DIM) if B._type != ZT_MPZ: raise NotImplementedError("Only integer matrices over GMP integers (mpz_t) are supported.") cdef CVPMethod method_ if method == "proved": method_ = CVPM_PROVED elif method == "fast": method_ = CVPM_FAST else: raise ValueError("Method '{}' unknown".format(method)) cdef int r = 0 cdef vector[Z_NR[mpz_t]] int_target cdef vector[Z_NR[mpz_t]] sol_coord cdef vector[Z_NR[mpz_t]] solution int_target.resize(len(t)) for i in range(len(t)): assign_Z_NR_mpz(int_target[i], t[i]) sig_on() r = closest_vector_c(B._core.mpz[0], int_target, sol_coord, method_, flags) sig_off() if r: raise ReductionError("CVP solver returned an error ({:d})".format(r)) vector_matrix_product(solution, sol_coord, B._core.mpz[0]) cdef list v = [] for i in range(solution.size()): v.append(mpz_get_python(solution[i].get_data())) return tuple(v) class CVP: babai = staticmethod(babai) closest_vector = staticmethod(closest_vector) DEFAULT = CVP_DEFAULT VERBOSE = CVP_VERBOSE fpylll-0.6.1/src/fpylll/fplll/wrapper.pxd000066400000000000000000000004371455321202600204230ustar00rootroot00000000000000# -*- coding: utf-8 -*- from .fplll cimport Wrapper as Wrapper_c from .integer_matrix cimport IntegerMatrix cdef class Wrapper: cdef Wrapper_c *_core cdef object _called cdef readonly IntegerMatrix B cdef readonly IntegerMatrix U cdef readonly IntegerMatrix UinvT fpylll-0.6.1/src/fpylll/fplll/wrapper.pyx000066400000000000000000000040231455321202600204430ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" from cysignals.signals cimport sig_on, sig_off from .fplll cimport Matrix, Z_NR, mpz_t, ZT_MPZ from .fplll cimport LLL_DEF_ETA, LLL_DEF_DELTA, LLL_DEFAULT from .fplll cimport get_red_status_str from fpylll.util import ReductionError cdef class Wrapper: def __init__(self, IntegerMatrix B, double delta=LLL_DEF_DELTA, double eta=LLL_DEF_ETA, int flags=LLL_DEFAULT): """FIXME! briefly describe function :param IntegerMatrix B: :param double delta: :param double eta: :param int flags: >>> from fpylll import LLL, IntegerMatrix >>> A = IntegerMatrix(50, 50) >>> A.randomize("ntrulike", bits=100, q=1023) >>> W = LLL.Wrapper(A) """ if B._type != ZT_MPZ: raise NotImplementedError("Only integer matrices over GMP integers (mpz_t) are supported.") self.B = B # TODO: Don't hardcode this self.U = IntegerMatrix(0,0) self.UinvT = IntegerMatrix(0,0) self._core = new Wrapper_c((self.B._core.mpz)[0], (self.U._core.mpz)[0], (self.UinvT._core.mpz)[0], delta, eta, flags) self._called = False def __dealloc__(self): del self._core def __reduce__(self): """ Make sure attempts at pickling raise an error until proper pickling is implemented. """ raise NotImplementedError def __call__(self): """Run LLL. :returns: :rtype: >>> from fpylll import LLL, IntegerMatrix, GSO >>> A = IntegerMatrix(40, 40) >>> A.randomize("ntrulike", bits=10, q=1023) >>> W = LLL.Wrapper(A) >>> W() """ if self._called: raise ValueError("lll() may only be called once.") sig_on() self._core.lll() sig_off() self._called = True @property def status(self): return self._core.status fpylll-0.6.1/src/fpylll/gmp/000077500000000000000000000000001455321202600156745ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/gmp/__init__.py000066400000000000000000000000001455321202600177730ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/gmp/all.pxd000066400000000000000000000002441455321202600171610ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 from .types cimport * from .random cimport * from .mpz cimport * from .mpq cimport * from .pylong cimport * fpylll-0.6.1/src/fpylll/gmp/misc.pxd000066400000000000000000000003751455321202600173510ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 # distutils: libraries = gmp cdef extern from "gmp.h": void mp_set_memory_functions( void *(*) (size_t), void *(*) (void *, size_t, size_t), void (*) (void *, size_t)) fpylll-0.6.1/src/fpylll/gmp/mpf.pxd000066400000000000000000000071231455321202600171760ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 # distutils: libraries = gmp from .types cimport * cdef extern from "gmp.h": ### Floating-point Functions ### # Initialization Functions void mpf_set_default_prec (unsigned long int prec) unsigned long int mpf_get_default_prec () void mpf_init (mpf_t x) void mpf_init2 (mpf_t x, unsigned long int prec) void mpf_clear (mpf_t x) unsigned long int mpf_get_prec (mpf_t op) void mpf_set_prec (mpf_t rop, unsigned long int prec) void mpf_set_prec_raw (mpf_t rop, unsigned long int prec) # Assignment Functions void mpf_set (mpf_t rop, mpf_t op) void mpf_set_ui (mpf_t rop, unsigned long int op) void mpf_set_si (mpf_t rop, signed long int op) void mpf_set_d (mpf_t rop, double op) void mpf_set_z (mpf_t rop, mpz_t op) void mpf_set_q (mpf_t rop, mpq_t op) int mpf_set_str (mpf_t rop, char *str, int base) void mpf_swap (mpf_t rop1, mpf_t rop2) # Combined Initialization and Assignment Functions void mpf_init_set (mpf_t rop, mpf_t op) void mpf_init_set_ui (mpf_t rop, unsigned long int op) void mpf_init_set_si (mpf_t rop, signed long int op) void mpf_init_set_d (mpf_t rop, double op) int mpf_init_set_str (mpf_t rop, char *str, int base) # Conversion Functions double mpf_get_d (mpf_t op) double mpf_get_d_2exp (signed long int *exp, mpf_t op) long mpf_get_si (mpf_t op) unsigned long mpf_get_ui (mpf_t op) char * mpf_get_str (char *str, mp_exp_t *expptr, int base, size_t n_digits, mpf_t op) # Arithmetic Functions void mpf_add (mpf_t rop, mpf_t op1, mpf_t op2) void mpf_add_ui (mpf_t rop, mpf_t op1, unsigned long int op2) void mpf_sub (mpf_t rop, mpf_t op1, mpf_t op2) void mpf_ui_sub (mpf_t rop, unsigned long int op1, mpf_t op2) void mpf_sub_ui (mpf_t rop, mpf_t op1, unsigned long int op2) void mpf_mul (mpf_t rop, mpf_t op1, mpf_t op2) void mpf_mul_ui (mpf_t rop, mpf_t op1, unsigned long int op2) void mpf_div (mpf_t rop, mpf_t op1, mpf_t op2) void mpf_ui_div (mpf_t rop, unsigned long int op1, mpf_t op2) void mpf_div_ui (mpf_t rop, mpf_t op1, unsigned long int op2) void mpf_sqrt (mpf_t rop, mpf_t op) void mpf_sqrt_ui (mpf_t rop, unsigned long int op) void mpf_pow_ui (mpf_t rop, mpf_t op1, unsigned long int op2) void mpf_neg (mpf_t rop, mpf_t op) void mpf_abs (mpf_t rop, mpf_t op) void mpf_mul_2exp (mpf_t rop, mpf_t op1, unsigned long int op2) void mpf_div_2exp (mpf_t rop, mpf_t op1, unsigned long int op2) # Comparison Functions int mpf_cmp (mpf_t op1, mpf_t op2) int mpf_cmp_d (mpf_t op1, double op2) int mpf_cmp_ui (mpf_t op1, unsigned long int op2) int mpf_cmp_si (mpf_t op1, signed long int op2) int mpf_eq (mpf_t op1, mpf_t op2, unsigned long int op3) void mpf_reldiff (mpf_t rop, mpf_t op1, mpf_t op2) int mpf_sgn (mpf_t op) # Input and Output Functions # size_t mpf_out_str (file *stream, int base, size_t n_digits, mpf_t op) # size_t mpf_inp_str (mpf_t rop, file *stream, int base) # Miscellaneous Functions void mpf_ceil (mpf_t rop, mpf_t op) void mpf_floor (mpf_t rop, mpf_t op) void mpf_trunc (mpf_t rop, mpf_t op) bint mpf_integer_p (mpf_t op) bint mpf_fits_ulong_p (mpf_t op) bint mpf_fits_slong_p (mpf_t op) bint mpf_fits_uint_p (mpf_t op) bint mpf_fits_sint_p (mpf_t op) bint mpf_fits_ushort_p (mpf_t op) bint mpf_fits_sshort_p (mpf_t op) void mpf_urandomb (mpf_t rop, gmp_randstate_t state, unsigned long int nbits) void mpf_random2 (mpf_t rop, mp_size_t max_size, mp_exp_t exp) fpylll-0.6.1/src/fpylll/gmp/mpn.pxd000066400000000000000000000100641455321202600172040ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 # distutils: libraries = gmp from .types cimport * cdef extern from "gmp.h": ### Low-level Functions ### mp_limb_t mpn_add_n (mp_limb_t *rp, mp_limb_t *s1p, mp_limb_t *s2p, mp_size_t n) mp_limb_t mpn_add_1 (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t n, mp_limb_t s2limb) mp_limb_t mpn_add (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t s1n, mp_limb_t *s2p, mp_size_t s2n) mp_limb_t mpn_sub_n (mp_limb_t *rp, mp_limb_t *s1p, mp_limb_t *s2p, mp_size_t n) mp_limb_t mpn_sub_1 (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t n, mp_limb_t s2limb) mp_limb_t mpn_sub (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t s1n, mp_limb_t *s2p, mp_size_t s2n) void mpn_mul_n (mp_limb_t *rp, mp_limb_t *s1p, mp_limb_t *s2p, mp_size_t n) mp_limb_t mpn_mul_1 (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t n, mp_limb_t s2limb) mp_limb_t mpn_addmul_1 (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t n, mp_limb_t s2limb) mp_limb_t mpn_submul_1 (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t n, mp_limb_t s2limb) mp_limb_t mpn_mul (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t s1n, mp_limb_t *s2p, mp_size_t s2n) void mpn_tdiv_qr (mp_limb_t *qp, mp_limb_t *rp, mp_size_t qxn, mp_limb_t *np, mp_size_t nn, mp_limb_t *dp, mp_size_t dn) mp_limb_t mpn_divrem (mp_limb_t *r1p, mp_size_t qxn, mp_limb_t *rs2p, mp_size_t rs2n, mp_limb_t *s3p, mp_size_t s3n) mp_limb_t mpn_divrem_1 (mp_limb_t *r1p, mp_size_t qxn, mp_limb_t *s2p, mp_size_t s2n, mp_limb_t s3limb) mp_limb_t mpn_divmod_1 (mp_limb_t *r1p, mp_limb_t *s2p, mp_size_t s2n, mp_limb_t s3limb) mp_limb_t mpn_divmod (mp_limb_t *r1p, mp_limb_t *rs2p, mp_size_t rs2n, mp_limb_t *s3p, mp_size_t s3n) mp_limb_t mpn_divexact_by3 (mp_limb_t *rp, mp_limb_t *sp, mp_size_t n) mp_limb_t mpn_divexact_by3c (mp_limb_t *rp, mp_limb_t *sp, mp_size_t n, mp_limb_t carry) mp_limb_t mpn_mod_1 (mp_limb_t *s1p, mp_size_t s1n, mp_limb_t s2limb) mp_limb_t mpn_bdivmod (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t s1n, mp_limb_t *s2p, mp_size_t s2n, unsigned long int d) mp_limb_t mpn_lshift (mp_limb_t *rp, mp_limb_t *sp, mp_size_t n, unsigned int count) mp_limb_t mpn_rshift (mp_limb_t *rp, mp_limb_t *sp, mp_size_t n, unsigned int count) int mpn_cmp (mp_limb_t *s1p, mp_limb_t *s2p, mp_size_t n) mp_size_t mpn_gcd (mp_limb_t *rp, mp_limb_t *s1p, mp_size_t s1n, mp_limb_t *s2p, mp_size_t s2n) mp_limb_t mpn_gcd_1 (mp_limb_t *s1p, mp_size_t s1n, mp_limb_t s2limb) mp_size_t mpn_gcdext (mp_limb_t *r1p, mp_limb_t *r2p, mp_size_t *r2n, mp_limb_t *s1p, mp_size_t s1n, mp_limb_t *s2p, mp_size_t s2n) mp_size_t mpn_sqrtrem (mp_limb_t *r1p, mp_limb_t *r2p, mp_limb_t *sp, mp_size_t n) mp_size_t mpn_get_str (unsigned char *str, int base, mp_limb_t *s1p, mp_size_t s1n) mp_size_t mpn_set_str (mp_limb_t *rp, unsigned char *str, size_t strsize, int base) unsigned long int mpn_scan0 (mp_limb_t *s1p, unsigned long int bit) unsigned long int mpn_scan1 (mp_limb_t *s1p, unsigned long int bit) void mpn_random (mp_limb_t *r1p, mp_size_t r1n) void mpn_random2 (mp_limb_t *r1p, mp_size_t r1n) unsigned long int mpn_popcount (mp_limb_t *s1p, mp_size_t n) unsigned long int mpn_hamdist (mp_limb_t *s1p, mp_limb_t *s2p, mp_size_t n) int mpn_perfect_square_p (mp_limb_t *s1p, mp_size_t n) void mpn_and_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_andn_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_nand_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_ior_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_iorn_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_nior_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_xor_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_xnor_n(mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t n) void mpn_com(mp_ptr rp, mp_srcptr sp, mp_size_t n) void mpn_copyi(mp_ptr rp, mp_srcptr s1p, mp_size_t n) void mpn_copyd(mp_ptr rp, mp_srcptr s1p, mp_size_t n) void mpn_zero(mp_ptr rp, mp_size_t n) fpylll-0.6.1/src/fpylll/gmp/mpq.pxd000066400000000000000000000042501455321202600172070ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 # distutils: libraries = gmp from .types cimport * cdef extern from "gmp.h": ### Rational Functions ### void mpq_canonicalize (mpq_t op) # Initialization and Assignment Functions void mpq_init (mpq_t dest_rational) void mpq_clear (mpq_t rational_number) void mpq_set (mpq_t rop, mpq_t op) void mpq_set_z (mpq_t rop, mpz_t op) void mpq_set_ui (mpq_t rop, unsigned long int op1, unsigned long int op2) void mpq_set_si (mpq_t rop, signed long int op1, unsigned long int op2) int mpq_set_str (mpq_t rop, char *str, int base) void mpq_swap (mpq_t rop1, mpq_t rop2) # Conversion Functions double mpq_get_d (mpq_t op) void mpq_set_d (mpq_t rop, double op) void mpq_set_f (mpq_t rop, mpf_t op) char * mpq_get_str (char *str, int base, mpq_t op) # Arithmetic Functions void mpq_add (mpq_t sum, mpq_t addend1, mpq_t addend2) void mpq_sub (mpq_t difference, mpq_t minuend, mpq_t subtrahend) void mpq_mul (mpq_t product, mpq_t multiplier, mpq_t multiplicand) void mpq_mul_2exp (mpq_t rop, mpq_t op1, unsigned long int op2) void mpq_div (mpq_t quotient, mpq_t dividend, mpq_t divisor) void mpq_div_2exp (mpq_t rop, mpq_t op1, unsigned long int op2) void mpq_neg (mpq_t negated_operand, mpq_t operand) void mpq_abs (mpq_t rop, mpq_t op) void mpq_inv (mpq_t inverted_number, mpq_t number) # Comparison Functions int mpq_cmp (mpq_t op1, mpq_t op2) int mpq_cmp_ui (mpq_t op1, unsigned long int num2, unsigned long int den2) int mpq_cmp_si (mpq_t op1, long int num2, unsigned long int den2) int mpq_sgn (mpq_t op) int mpq_equal (mpq_t op1, mpq_t op2) # Applying Integer Functions to Rationals mpz_t mpq_numref (mpq_t op) mpz_t mpq_denref (mpq_t op) void mpq_get_num (mpz_t numerator, mpq_t rational) void mpq_get_den (mpz_t denominator, mpq_t rational) void mpq_set_num (mpq_t rational, mpz_t numerator) void mpq_set_den (mpq_t rational, mpz_t denominator) # Input and Output Functions # size_t mpq_out_str (file *stream, int base, mpq_t op) # size_t mpq_inp_str (mpq_t rop, file *stream, int base) fpylll-0.6.1/src/fpylll/gmp/mpz.pxd000066400000000000000000000223141455321202600172210ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 # distutils: libraries = gmp from .types cimport * from libc.stdio cimport FILE from libc.stdint cimport intmax_t, uintmax_t cdef extern from "gmp.h": ### Integer Functions ### # Initialization Functions void mpz_init (mpz_t integer) void mpz_init2 (mpz_t integer, unsigned long n) void mpz_clear (mpz_t integer) void mpz_realloc2 (mpz_t integer, unsigned long n) # Assignment Functions void mpz_set (mpz_t rop, mpz_t op) void mpz_set_ui (mpz_t rop, unsigned long int op) void mpz_set_si (mpz_t rop, signed long int op) void mpz_set_ux (mpz_t rop, uintmax_t op) void mpz_set_sx (mpz_t rop, intmax_t op) void mpz_set_d (mpz_t rop, double op) void mpz_set_q (mpz_t rop, mpq_t op) void mpz_set_f (mpz_t rop, mpf_t op) int mpz_set_str (mpz_t rop, char *str, int base) void mpz_swap (mpz_t rop1, mpz_t rop2) # Combined Initialization and Assignment Functions void mpz_init_set (mpz_t rop, mpz_t op) void mpz_init_set_ui (mpz_t rop, unsigned long int op) void mpz_init_set_si (mpz_t rop, signed long int op) void mpz_init_set_ux (mpz_t rop, uintmax_t op) void mpz_init_set_sx (mpz_t rop, intmax_t op) void mpz_init_set_d (mpz_t rop, double op) int mpz_init_set_str (mpz_t rop, char *str, int base) # Conversion Functions unsigned long int mpz_get_ui (mpz_t op) signed long int mpz_get_si (mpz_t op) uintmax_t mpz_get_ux (mpz_t op) intmax_t mpz_get_sx (mpz_t op) double mpz_get_d (mpz_t op) double mpz_get_d_2exp (long int *exp, mpz_t op) char * mpz_get_str (char *str, int base, mpz_t op) # Arithmetic Functions void mpz_add (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_add_ui (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_sub (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_sub_ui (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_ui_sub (mpz_t rop, unsigned long int op1, mpz_t op2) void mpz_mul (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_mul_si (mpz_t rop, mpz_t op1, long int op2) void mpz_mul_ui (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_addmul (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_addmul_ui (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_submul (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_submul_ui (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_mul_2exp (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_neg (mpz_t rop, mpz_t op) void mpz_abs (mpz_t rop, mpz_t op) # Division Functions void mpz_cdiv_q (mpz_t q, mpz_t n, mpz_t d) void mpz_cdiv_r (mpz_t r, mpz_t n, mpz_t d) void mpz_cdiv_qr (mpz_t q, mpz_t r, mpz_t n, mpz_t d) unsigned long int mpz_cdiv_q_ui (mpz_t q, mpz_t n, unsigned long int d) unsigned long int mpz_cdiv_r_ui (mpz_t r, mpz_t n, unsigned long int d) unsigned long int mpz_cdiv_qr_ui (mpz_t q, mpz_t r, mpz_t n, unsigned long int d) unsigned long int mpz_cdiv_ui (mpz_t n, unsigned long int d) void mpz_cdiv_q_2exp (mpz_t q, mpz_t n, unsigned long int b) void mpz_cdiv_r_2exp (mpz_t r, mpz_t n, unsigned long int b) void mpz_fdiv_q (mpz_t q, mpz_t n, mpz_t d) void mpz_fdiv_r (mpz_t r, mpz_t n, mpz_t d) void mpz_fdiv_qr (mpz_t q, mpz_t r, mpz_t n, mpz_t d) unsigned long int mpz_fdiv_q_ui (mpz_t q, mpz_t n, unsigned long int d) unsigned long int mpz_fdiv_r_ui (mpz_t r, mpz_t n, unsigned long int d) unsigned long int mpz_fdiv_qr_ui (mpz_t q, mpz_t r, mpz_t n, unsigned long int d) unsigned long int mpz_fdiv_ui (mpz_t n, unsigned long int d) void mpz_fdiv_q_2exp (mpz_t q, mpz_t n, unsigned long int b) void mpz_fdiv_r_2exp (mpz_t r, mpz_t n, unsigned long int b) void mpz_tdiv_q (mpz_t q, mpz_t n, mpz_t d) void mpz_tdiv_r (mpz_t r, mpz_t n, mpz_t d) void mpz_tdiv_qr (mpz_t q, mpz_t r, mpz_t n, mpz_t d) unsigned long int mpz_tdiv_q_ui (mpz_t q, mpz_t n, unsigned long int d) unsigned long int mpz_tdiv_r_ui (mpz_t r, mpz_t n, unsigned long int d) unsigned long int mpz_tdiv_qr_ui (mpz_t q, mpz_t r, mpz_t n, unsigned long int d) unsigned long int mpz_tdiv_ui (mpz_t n, unsigned long int d) void mpz_tdiv_q_2exp (mpz_t q, mpz_t n, unsigned long int b) void mpz_tdiv_r_2exp (mpz_t r, mpz_t n, unsigned long int b) void mpz_mod (mpz_t r, mpz_t n, mpz_t d) unsigned long int mpz_mod_ui (mpz_t r, mpz_t n, unsigned long int d) void mpz_divexact (mpz_t q, mpz_t n, mpz_t d) void mpz_divexact_ui (mpz_t q, mpz_t n, unsigned long d) bint mpz_divisible_p (mpz_t n, mpz_t d) bint mpz_divisible_ui_p (mpz_t n, unsigned long int d) bint mpz_divisible_2exp_p (mpz_t n, unsigned long int b) bint mpz_congruent_p (mpz_t n, mpz_t c, mpz_t d) bint mpz_congruent_ui_p (mpz_t n, unsigned long int c, unsigned long int d) bint mpz_congruent_2exp_p (mpz_t n, mpz_t c, unsigned long int b) # Exponentiation Functions void mpz_powm (mpz_t rop, mpz_t base, mpz_t exp, mpz_t mod) void mpz_powm_ui (mpz_t rop, mpz_t base, unsigned long int exp, mpz_t mod) void mpz_pow_ui (mpz_t rop, mpz_t base, unsigned long int exp) void mpz_ui_pow_ui (mpz_t rop, unsigned long int base, unsigned long int exp) # Root Extraction Functions int mpz_root (mpz_t rop, mpz_t op, unsigned long int n) void mpz_rootrem (mpz_t root, mpz_t rem, mpz_t u, unsigned long int n) void mpz_sqrt (mpz_t rop, mpz_t op) void mpz_sqrtrem (mpz_t rop1, mpz_t rop2, mpz_t op) bint mpz_perfect_power_p (mpz_t op) bint mpz_perfect_square_p (mpz_t op) # Number Theoretic Functions bint mpz_probab_prime_p (mpz_t n, int reps) void mpz_nextprime (mpz_t rop, mpz_t op) void mpz_gcd (mpz_t rop, mpz_t op1, mpz_t op2) unsigned long int mpz_gcd_ui (mpz_t rop, mpz_t op1, unsigned long int op2) void mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, mpz_t a, mpz_t b) void mpz_lcm (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_lcm_ui (mpz_t rop, mpz_t op1, unsigned long op2) int mpz_invert (mpz_t rop, mpz_t op1, mpz_t op2) int mpz_jacobi (mpz_t a, mpz_t b) int mpz_legendre (mpz_t a, mpz_t p) int mpz_kronecker (mpz_t a, mpz_t b) int mpz_kronecker_si (mpz_t a, long b) int mpz_kronecker_ui (mpz_t a, unsigned long b) int mpz_si_kronecker (long a, mpz_t b) int mpz_ui_kronecker (unsigned long a, mpz_t b) unsigned long int mpz_remove (mpz_t rop, mpz_t op, mpz_t f) void mpz_fac_ui (mpz_t rop, unsigned long int op) void mpz_bin_ui (mpz_t rop, mpz_t n, unsigned long int k) void mpz_bin_uiui (mpz_t rop, unsigned long int n, unsigned long int k) void mpz_fib_ui (mpz_t fn, unsigned long int n) void mpz_fib2_ui (mpz_t fn, mpz_t fnsub1, unsigned long int n) void mpz_lucnum_ui (mpz_t ln, unsigned long int n) void mpz_lucnum2_ui (mpz_t ln, mpz_t lnsub1, unsigned long int n) # Comparison Functions int mpz_cmp (mpz_t op1, mpz_t op2) int mpz_cmp_d (mpz_t op1, double op2) int mpz_cmp_si (mpz_t op1, signed long int op2) int mpz_cmp_ui (mpz_t op1, unsigned long int op2) int mpz_cmpabs (mpz_t op1, mpz_t op2) int mpz_cmpabs_d (mpz_t op1, double op2) int mpz_cmpabs_ui (mpz_t op1, unsigned long int op2) int mpz_sgn (mpz_t op) # Logical and Bit Manipulation Functions void mpz_and (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_ior (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_xor (mpz_t rop, mpz_t op1, mpz_t op2) void mpz_com (mpz_t rop, mpz_t op) unsigned long int mpz_popcount (mpz_t op) unsigned long int mpz_hamdist (mpz_t op1, mpz_t op2) unsigned long int mpz_scan0 (mpz_t op, unsigned long int starting_bit) unsigned long int mpz_scan1 (mpz_t op, unsigned long int starting_bit) void mpz_setbit (mpz_t rop, unsigned long int bit_index) void mpz_clrbit (mpz_t rop, unsigned long int bit_index) void mpz_combit (mpz_t rop, unsigned long int bit_index) int mpz_tstbit (mpz_t op, unsigned long int bit_index) # Input and Output Functions size_t mpz_out_str (FILE *stream, int base, mpz_t op) size_t mpz_inp_str (mpz_t rop, FILE *stream, int base) size_t mpz_out_raw (FILE *stream, mpz_t op) size_t mpz_inp_raw (mpz_t rop, FILE *stream) # Random Number Functions void mpz_urandomb (mpz_t rop, gmp_randstate_t state, unsigned long int n) void mpz_urandomm (mpz_t rop, gmp_randstate_t state, mpz_t n) void mpz_rrandomb (mpz_t rop, gmp_randstate_t state, unsigned long int n) void mpz_random (mpz_t rop, mp_size_t max_size) void mpz_random2 (mpz_t rop, mp_size_t max_size) # Integer Import and Export void mpz_import (mpz_t rop, size_t count, int order, int size, int endian, size_t nails, void *op) void * mpz_export (void *rop, size_t *countp, int order, int size, int endian, size_t nails, mpz_t op) # Miscellaneous Functions bint mpz_fits_ulong_p (mpz_t op) bint mpz_fits_slong_p (mpz_t op) bint mpz_fits_uint_p (mpz_t op) bint mpz_fits_sint_p (mpz_t op) bint mpz_fits_ushort_p (mpz_t op) bint mpz_fits_sshort_p (mpz_t op) bint mpz_odd_p (mpz_t op) bint mpz_even_p (mpz_t op) size_t mpz_sizeinbase (mpz_t op, int base) # Special Functions void * _mpz_realloc (mpz_t integer, mp_size_t new_alloc) mp_limb_t mpz_getlimbn (mpz_t op, mp_size_t n) size_t mpz_size (mpz_t op) fpylll-0.6.1/src/fpylll/gmp/pycore_long.h000066400000000000000000000042221455321202600203650ustar00rootroot00000000000000#include "Python.h" #include #if PY_VERSION_HEX >= 0x030C00A5 #define ob_digit(o) (((PyLongObject*)o)->long_value.ob_digit) #else #define ob_digit(o) (((PyLongObject*)o)->ob_digit) #endif #if PY_VERSION_HEX >= 0x030C00A7 // taken from cpython:Include/internal/pycore_long.h @ 3.12 /* Long value tag bits: * 0-1: Sign bits value = (1-sign), ie. negative=2, positive=0, zero=1. * 2: Reserved for immortality bit * 3+ Unsigned digit count */ #define SIGN_MASK 3 #define SIGN_ZERO 1 #define SIGN_NEGATIVE 2 #define NON_SIZE_BITS 3 static inline bool _PyLong_IsZero(const PyLongObject *op) { return (op->long_value.lv_tag & SIGN_MASK) == SIGN_ZERO; } static inline bool _PyLong_IsNegative(const PyLongObject *op) { return (op->long_value.lv_tag & SIGN_MASK) == SIGN_NEGATIVE; } static inline bool _PyLong_IsPositive(const PyLongObject *op) { return (op->long_value.lv_tag & SIGN_MASK) == 0; } static inline Py_ssize_t _PyLong_DigitCount(const PyLongObject *op) { assert(PyLong_Check(op)); return op->long_value.lv_tag >> NON_SIZE_BITS; } #define TAG_FROM_SIGN_AND_SIZE(sign, size) ((1 - (sign)) | ((size) << NON_SIZE_BITS)) static inline void _PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size) { assert(size >= 0); assert(-1 <= sign && sign <= 1); assert(sign != 0 || size == 0); op->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(sign, (size_t)size); } #else // fallback for < 3.12 static inline bool _PyLong_IsZero(const PyLongObject *op) { return Py_SIZE(op) == 0; } static inline bool _PyLong_IsNegative(const PyLongObject *op) { return Py_SIZE(op) < 0; } static inline bool _PyLong_IsPositive(const PyLongObject *op) { return Py_SIZE(op) > 0; } static inline Py_ssize_t _PyLong_DigitCount(const PyLongObject *op) { Py_ssize_t size = Py_SIZE(op); return size < 0 ? -size : size; } static inline void _PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size) { #if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION < 9) // The function Py_SET_SIZE is defined starting with python 3.9. Py_SIZE(o) = size; #else Py_SET_SIZE(op, sign < 0 ? -size : size); #endif } #endif fpylll-0.6.1/src/fpylll/gmp/pycore_long.pxd000066400000000000000000000005341455321202600207330ustar00rootroot00000000000000from cpython.longintrepr cimport py_long, digit cdef extern from "pycore_long.h": digit* ob_digit(py_long o) bint _PyLong_IsZero(py_long o) bint _PyLong_IsNegative(py_long o) bint _PyLong_IsPositive(py_long o) Py_ssize_t _PyLong_DigitCount(py_long o) void _PyLong_SetSignAndDigitCount(py_long o, int sign, Py_ssize_t size) fpylll-0.6.1/src/fpylll/gmp/pylong.pxd000066400000000000000000000005701455321202600177230ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 """ Various functions to deal with conversion mpz <-> Python int/long """ from cpython.longintrepr cimport py_long from fpylll.gmp.types cimport * cdef mpz_get_pylong(mpz_srcptr z) cdef mpz_get_pyintlong(mpz_srcptr z) cdef int mpz_set_pylong(mpz_ptr z, py_long L) except -1 cdef Py_hash_t mpz_pythonhash(mpz_srcptr z) fpylll-0.6.1/src/fpylll/gmp/pylong.pyx000066400000000000000000000061041455321202600177470ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 """ Various functions to deal with conversion mpz <-> Python int/long For doctests, see :class:`Integer`. AUTHORS: - Gonzalo Tornaria (2006): initial version - David Harvey (2007-08-18): added ``mpz_get_pyintlong`` function (:trac:`440`) - Jeroen Demeyer (2015-02-24): moved from c_lib, rewritten using ``mpz_export`` and ``mpz_import`` (:trac:`17853`) """ #***************************************************************************** # Copyright (C) 2015 Jeroen Demeyer # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** from cpython.int cimport PyInt_FromLong from cpython.long cimport PyLong_CheckExact, PyLong_FromLong from cpython.longintrepr cimport _PyLong_New, digit, PyLong_SHIFT from .pycore_long cimport (ob_digit, _PyLong_IsZero, _PyLong_IsNegative, _PyLong_IsPositive, _PyLong_DigitCount, _PyLong_SetSignAndDigitCount) from .mpz cimport * # Unused bits in every PyLong digit cdef size_t PyLong_nails = 8*sizeof(digit) - PyLong_SHIFT cdef mpz_get_pylong_large(mpz_srcptr z): """ Convert a non-zero ``mpz`` to a Python ``long``. """ cdef size_t nbits = mpz_sizeinbase(z, 2) cdef size_t pylong_size = (nbits + PyLong_SHIFT - 1) // PyLong_SHIFT cdef py_long L = _PyLong_New(pylong_size) mpz_export(ob_digit(L), NULL, -1, sizeof(digit), 0, PyLong_nails, z) _PyLong_SetSignAndDigitCount(L, mpz_sgn(z), pylong_size) return L cdef mpz_get_pylong(mpz_srcptr z): """ Convert an ``mpz`` to a Python ``long``. """ if mpz_fits_slong_p(z): return PyLong_FromLong(mpz_get_si(z)) return mpz_get_pylong_large(z) cdef mpz_get_pyintlong(mpz_srcptr z): """ Convert an ``mpz`` to a Python ``int`` if possible, or a ``long`` if the value is too large. """ if mpz_fits_slong_p(z): return PyInt_FromLong(mpz_get_si(z)) return mpz_get_pylong_large(z) cdef int mpz_set_pylong(mpz_ptr z, py_long L) except -1: """ Convert a Python ``long`` `L` to an ``mpz``. """ cdef Py_ssize_t pylong_size = _PyLong_DigitCount(L) mpz_import(z, pylong_size, -1, sizeof(digit), 0, PyLong_nails, ob_digit(L)) if _PyLong_IsNegative(L): mpz_neg(z, z) cdef Py_hash_t mpz_pythonhash(mpz_srcptr z): """ Hash an ``mpz``, where the hash value is the same as the hash value of the corresponding Python ``long``. """ # Add all limbs, adding 1 for every carry cdef mp_limb_t h1 = 0 cdef mp_limb_t h0 cdef size_t i, n n = mpz_size(z) for i in range(n): h0 = h1 h1 += mpz_getlimbn(z, i) # Add 1 on overflow if h1 < h0: h1 += 1 cdef Py_hash_t h = h1 if mpz_sgn(z) < 0: h = -h if h == -1: return -2 return h fpylll-0.6.1/src/fpylll/gmp/random.pxd000066400000000000000000000017711455321202600176770ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 # distutils: libraries = gmp from .types cimport * cdef extern from "gmp.h": ### Random Number Functions ### # Random State Initialization void gmp_randinit_default (gmp_randstate_t state) int gmp_randinit_mt (gmp_randstate_t state) void gmp_randinit_lc_2exp (gmp_randstate_t state, mpz_t a, unsigned long c, unsigned long m2exp) int gmp_randinit_lc_2exp_size (gmp_randstate_t state, unsigned long size) int gmp_randinit_set (gmp_randstate_t rop, gmp_randstate_t op) # void gmp_randinit (gmp_randstate_t state, gmp_randalg_t alg, ...) void gmp_randclear (gmp_randstate_t state) # Random State Seeding void gmp_randseed (gmp_randstate_t state, mpz_t seed) void gmp_randseed_ui (gmp_randstate_t state, unsigned long int seed) # Random State Miscellaneous unsigned long gmp_urandomb_ui (gmp_randstate_t state, unsigned long n) unsigned long gmp_urandomm_ui (gmp_randstate_t state, unsigned long n) fpylll-0.6.1/src/fpylll/gmp/types.pxd000066400000000000000000000027311455321202600175600ustar00rootroot00000000000000# copied/adapted from Sage development tree version 6.9 from libc.stdio cimport FILE cdef extern from "gmp.h": # GMP's configuration of how many bits are stuffed into a limb cdef unsigned int GMP_LIMB_BITS cdef int mp_bits_per_limb # Underlying typedefs ctypedef unsigned long mp_limb_t ctypedef long mp_limb_signed_t ctypedef unsigned long mp_bitcnt_t ctypedef long mp_size_t ctypedef long mp_exp_t ctypedef mp_limb_t* mp_ptr ctypedef mp_limb_t* mp_srcptr # This internal structure is not guaranteed to stay the same with # future releases of GMP or MPIR. ctypedef struct __mpz_struct: int _mp_alloc int _mp_size mp_ptr _mp_d ctypedef struct __mpq_struct: __mpz_struct _mp_num __mpz_struct _mp_den ctypedef struct __mpf_struct: int _mp_prec int _mp_size mp_exp_t _mp_exp mp_limb_t *_mp_d ctypedef struct __gmp_randstate_struct: pass # User facing types ctypedef __mpz_struct mpz_t[1] ctypedef __mpq_struct mpq_t[1] ctypedef __mpf_struct mpf_t[1] ctypedef __gmp_randstate_struct gmp_randstate_t[1] ctypedef __mpz_struct *mpz_ptr ctypedef __mpq_struct *mpq_ptr ctypedef __mpf_struct *mpf_ptr ctypedef __mpz_struct *mpz_srcptr ctypedef __mpq_struct *mpq_srcptr ctypedef __mpf_struct *mpf_srcptr # Cython doesn't want to take the address of an mpz_t cdef mpz_t* address_of_mpz "&"(mpz_t x) fpylll-0.6.1/src/fpylll/io.pxd000066400000000000000000000011331455321202600162330ustar00rootroot00000000000000include "fpylll/config.pxi" from .fplll.fplll cimport Z_NR from .gmp.mpz cimport mpz_t from .gmp.types cimport mpz_srcptr from .fplll.decl cimport fp_nr_t, vector_fp_nr_t, vector_z_nr_t from fplll.fplll cimport FloatType, IntType cdef int assign_Z_NR_mpz(Z_NR[mpz_t]& t, value) except -1 cdef int assign_mpz(mpz_t& t, value) except -1 cdef object mpz_get_python(mpz_srcptr z) cdef void vector_fp_nr_barf(vector_fp_nr_t &out, object inp, FloatType float_type) cdef object vector_fp_nr_slurp(vector_fp_nr_t &inp, FloatType float_type) cdef object vector_z_nr_slurp(vector_z_nr_t &inp, IntType int_type) fpylll-0.6.1/src/fpylll/io.pyx000066400000000000000000000134701455321202600162670ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" import sys import os from cpython.int cimport PyInt_AS_LONG from fpylll.gmp.mpz cimport mpz_init, mpz_clear, mpz_set from fpylll.gmp.pylong cimport mpz_get_pyintlong, mpz_set_pylong from .gmp.mpz cimport mpz_t, mpz_set_si, mpz_set from cpython.version cimport PY_MAJOR_VERSION from fplll.fplll cimport FT_DEFAULT, FT_DOUBLE, FT_LONG_DOUBLE, FT_DPE, FT_MPFR from fplll.fplll cimport ZT_MPZ, ZT_LONG # Note: this uses fpylll's numpy and not the global numpy package. IF HAVE_NUMPY: from .numpy import is_numpy_integer IF HAVE_QD: from fpylll.fplll.fplll cimport FT_DD, FT_QD try: from sage.rings.integer import Integer have_sage = True except Exception: have_sage = False cdef int assign_Z_NR_mpz(Z_NR[mpz_t]& t, value) except -1: """ Assign Python integer to Z_NR[mpz_t] """ cdef mpz_t tmp mpz_init(tmp) try: assign_mpz(tmp, value) t.set(tmp) finally: mpz_clear(tmp) cdef int assign_mpz(mpz_t& t, value) except -1: """ Assign Python integer to Z_NR[mpz_t] """ if isinstance(value, int) and PY_MAJOR_VERSION == 2: mpz_set_si(t, PyInt_AS_LONG(value)) return 0 if isinstance(value, long): mpz_set_pylong(t, value) return 0 if have_sage: if isinstance(value, Integer): value = long(value) mpz_set_pylong(t, value) return 0 IF HAVE_NUMPY: if is_numpy_integer(value): value = long(value) mpz_set_pylong(t, value) return 0 raise NotImplementedError("Type '%s' not supported"%type(value)) cdef object mpz_get_python(mpz_srcptr z): r = mpz_get_pyintlong(z) if have_sage: return Integer(r) else: return r cdef void vector_fp_nr_barf(vector_fp_nr_t &out, object inp, FloatType float_type): cdef fp_nr_t tmp cdef bytes py_bytes if float_type == FT_DOUBLE: for entry in inp: # this is slow but we want to cover all kinds of Python types here py_bytes = str(entry).encode() tmp.d = py_bytes out.d.push_back(tmp.d) elif float_type == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: for entry in inp: py_bytes = str(entry).encode() tmp.ld = py_bytes out.ld.push_back(tmp.ld) ELSE: raise ValueError("Float type '%s' not understood."%float_type) elif float_type == FT_DPE: for entry in inp: py_bytes = str(entry).encode() tmp.dpe = py_bytes out.dpe.push_back(tmp.dpe) elif float_type == FT_MPFR: for entry in inp: py_bytes = str(entry).encode() tmp.mpfr = py_bytes out.mpfr.push_back(tmp.mpfr) else: IF HAVE_QD: if float_type == FT_DD: for entry in inp: py_bytes = str(entry).encode() tmp.dd = py_bytes out.dd.push_back(tmp.dd) elif float_type == FT_QD: for entry in inp: py_bytes = str(entry).encode() tmp.qd = py_bytes out.qd.push_back(tmp.qd) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) cdef object vector_fp_nr_slurp(vector_fp_nr_t &inp, FloatType float_type): out = [] if float_type == FT_DOUBLE: for i in range(inp.d.size()): out.append(inp.d[i].get_d()) elif float_type == FT_LONG_DOUBLE: IF HAVE_LONG_DOUBLE: for i in range(inp.ld.size()): out.append(inp.ld[i].get_d()) ELSE: raise ValueError("Float type '%s' not understood."%float_type) elif float_type == FT_DPE: for i in range(inp.dpe.size()): out.append(inp.dpe[i].get_d()) elif float_type == FT_MPFR: for i in range(inp.mpfr.size()): out.append(inp.mpfr[i].get_d()) else: IF HAVE_QD: if float_type == FT_DD: for i in range(inp.dd.size()): out.append(inp.dd[i].get_d()) elif float_type == FT_QD: for i in range(inp.qd.size()): out.append(inp.qd[i].get_d()) else: raise ValueError("Float type '%s' not understood."%float_type) ELSE: raise ValueError("Float type '%s' not understood."%float_type) return tuple(out) cdef object vector_z_nr_slurp(vector_z_nr_t &inp, IntType int_type): out = [] if int_type == ZT_MPZ: for i in range(inp.mpz.size()): out.append(mpz_get_python(inp.mpz[i].get_data())) elif int_type == ZT_LONG: for i in range(inp.long.size()): out.append(inp.long[i].get_data()) else: raise ValueError("Int type '%s' not understood."%int_type) return tuple(out) class SuppressStream(object): """ Suppress errors (being printed by FPLLL, which are to be expected). """ def __init__(self, stream=sys.stderr): try: self.orig_stream_fileno = stream.fileno() self.skip = False except OSError: self.skip = True def __enter__(self): if self.skip: return self.orig_stream_dup = os.dup(self.orig_stream_fileno) self.devnull = open(os.devnull, "w") os.dup2(self.devnull.fileno(), self.orig_stream_fileno) def __exit__(self, type, value, traceback): if self.skip: return os.close(self.orig_stream_fileno) os.dup2(self.orig_stream_dup, self.orig_stream_fileno) os.close(self.orig_stream_dup) self.devnull.close() fpylll-0.6.1/src/fpylll/mpfr/000077500000000000000000000000001455321202600160555ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/mpfr/__init__.py000066400000000000000000000000001455321202600201540ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/mpfr/mpfr.pxd000066400000000000000000000360461455321202600175470ustar00rootroot00000000000000from fpylll.gmp.types cimport * cdef extern from "mpfr.h": ctypedef struct __mpfr_struct: pass ctypedef __mpfr_struct mpfr_t[1] ctypedef __mpfr_struct* mpfr_ptr ctypedef __mpfr_struct* mpfr_srcptr ctypedef enum mpfr_rnd_t: MPFR_RNDN MPFR_RNDZ MPFR_RNDU MPFR_RNDD MPFR_RNDA MPFR_RNDF MPFR_RNDNA GMP_RNDN GMP_RNDZ GMP_RNDU GMP_RNDD ctypedef mpfr_rnd_t mp_rnd_t ctypedef long mp_prec_t int MPFR_PREC_MIN, MPFR_PREC_MAX # Initialization Functions void mpfr_init2 (mpfr_t x, mp_prec_t prec) void mpfr_clear (mpfr_t x) void mpfr_init (mpfr_t x) void mpfr_set_default_prec (mp_prec_t prec) mp_prec_t mpfr_get_default_prec () void mpfr_set_prec (mpfr_t x, mp_prec_t prec) mp_prec_t mpfr_get_prec (mpfr_t x) # Assignment Functions int mpfr_set (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_set_ui (mpfr_t rop, unsigned long int op, mp_rnd_t rnd) int mpfr_set_si (mpfr_t rop, long int op, mp_rnd_t rnd) int mpfr_set_d (mpfr_t rop, double op, mp_rnd_t rnd) int mpfr_set_ld (mpfr_t rop, long double op, mp_rnd_t rnd) # int mpfr_set_decimal64 (mpfr_t rop, _Decimal64 op, mp_rnd_t rnd) int mpfr_set_z (mpfr_t rop, mpz_t op, mp_rnd_t rnd) int mpfr_set_q (mpfr_t rop, mpq_t op, mp_rnd_t rnd) # int mpfr_set_f (mpfr_t rop, mpf_t op, mp_rnd_t rnd) int mpfr_set_ui_2exp (mpfr_t rop, unsigned long int op, mp_exp_t e, mp_rnd_t rnd) int mpfr_set_si_2exp (mpfr_t rop, long int op, mp_exp_t e, mp_rnd_t rnd) int mpfr_set_str (mpfr_t rop, char *s, int base, mp_rnd_t rnd) int mpfr_strtofr (mpfr_t rop, char *nptr, char **endptr, int base, mp_rnd_t rnd) void mpfr_set_inf (mpfr_t x, int sign) void mpfr_set_nan (mpfr_t x) void mpfr_set_zero (mpfr_t x, int sign) void mpfr_swap (mpfr_t x, mpfr_t y) # Combined Initialization and Assignment Functions int mpfr_init_set (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_init_set_ui (mpfr_t rop, unsigned long int op, mp_rnd_t rnd) int mpfr_init_set_si (mpfr_t rop, signed long int op, mp_rnd_t rnd) int mpfr_init_set_d (mpfr_t rop, double op, mp_rnd_t rnd) int mpfr_init_set_ld (mpfr_t rop, long double op, mp_rnd_t rnd) int mpfr_init_set_z (mpfr_t rop, mpz_t op, mp_rnd_t rnd) int mpfr_init_set_q (mpfr_t rop, mpq_t op, mp_rnd_t rnd) # int mpfr_init_set_f (mpfr_t rop, mpf_t op, mp_rnd_t rnd) int mpfr_init_set_str (mpfr_t x, char *s, int base, mp_rnd_t rnd) # Conversion Functions double mpfr_get_d (mpfr_t op, mp_rnd_t rnd) long double mpfr_get_ld (mpfr_t op, mp_rnd_t rnd) # _Decimal64 mpfr_get_decimal64 (mpfr_t op, mp_rnd_t rnd) double mpfr_get_d_2exp (long *exp, mpfr_t op, mp_rnd_t rnd) long double mpfr_get_ld_2exp (long *exp, mpfr_t op, mp_rnd_t rnd) long mpfr_get_si (mpfr_t op, mp_rnd_t rnd) unsigned long mpfr_get_ui (mpfr_t op, mp_rnd_t rnd) mp_exp_t mpfr_get_z_exp (mpz_t rop, mpfr_t op) void mpfr_get_z (mpz_t rop, mpfr_t op, mp_rnd_t rnd) # int mpfr_get_f (mpf_t rop, mpfr_t op, mp_rnd_t rnd) char * mpfr_get_str (char *str, mp_exp_t *expptr, int b, size_t n, mpfr_t op, mp_rnd_t rnd) void mpfr_free_str (char *str) bint mpfr_fits_ulong_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_slong_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_uint_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_sint_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_ushort_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_sshort_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_intmax_p (mpfr_t op, mp_rnd_t rnd) bint mpfr_fits_uintmax_p (mpfr_t op, mp_rnd_t rnd) # Basic Arithmetic Functions int mpfr_add (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_add_ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_add_si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) int mpfr_add_z (mpfr_t rop, mpfr_t op1, mpz_t op2, mp_rnd_t rnd) int mpfr_add_q (mpfr_t rop, mpfr_t op1, mpq_t op2, mp_rnd_t rnd) int mpfr_sub (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_ui_sub (mpfr_t rop, unsigned long int op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_sub_ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_si_sub (mpfr_t rop, long int op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_sub_si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) int mpfr_sub_z (mpfr_t rop, mpfr_t op1, mpz_t op2, mp_rnd_t rnd) int mpfr_sub_q (mpfr_t rop, mpfr_t op1, mpq_t op2, mp_rnd_t rnd) int mpfr_mul (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_mul_ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_mul_si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) int mpfr_mul_z (mpfr_t rop, mpfr_t op1, mpz_t op2, mp_rnd_t rnd) int mpfr_mul_q (mpfr_t rop, mpfr_t op1, mpq_t op2, mp_rnd_t rnd) int mpfr_sqr (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_div (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_ui_div (mpfr_t rop, unsigned long int op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_div_ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_si_div (mpfr_t rop, long int op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_div_si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) int mpfr_div_z (mpfr_t rop, mpfr_t op1, mpz_t op2, mp_rnd_t rnd) int mpfr_div_q (mpfr_t rop, mpfr_t op1, mpq_t op2, mp_rnd_t rnd) int mpfr_sqrt (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_sqrt_ui (mpfr_t rop, unsigned long int op, mp_rnd_t rnd) int mpfr_cbrt (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_root (mpfr_t rop, mpfr_t op, unsigned long int k, mp_rnd_t rnd) int mpfr_pow (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_pow_ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_pow_si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) int mpfr_pow_z (mpfr_t rop, mpfr_t op1, mpz_t op2, mp_rnd_t rnd) int mpfr_ui_pow_ui (mpfr_t rop, unsigned long int op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_ui_pow (mpfr_t rop, unsigned long int op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_neg (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_abs (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_dim (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_mul_2ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_mul_2si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) int mpfr_div_2ui (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_div_2si (mpfr_t rop, mpfr_t op1, long int op2, mp_rnd_t rnd) # Comparison Functions int mpfr_cmp (mpfr_t op1, mpfr_t op2) int mpfr_cmp_ui (mpfr_t op1, unsigned long int op2) int mpfr_cmp_si (mpfr_t op1, signed long int op2) int mpfr_cmp_d (mpfr_t op1, double op2) int mpfr_cmp_ld (mpfr_t op1, long double op2) int mpfr_cmp_z (mpfr_t op1, mpz_t op2) int mpfr_cmp_q (mpfr_t op1, mpq_t op2) # int mpfr_cmp_f (mpfr_t op1, mpf_t op2) int mpfr_cmp_ui_2exp (mpfr_t op1, unsigned long int op2, mp_exp_t e) int mpfr_cmp_si_2exp (mpfr_t op1, long int op2, mp_exp_t e) int mpfr_cmpabs (mpfr_t op1, mpfr_t op2) bint mpfr_nan_p (mpfr_t op) bint mpfr_inf_p (mpfr_t op) bint mpfr_number_p (mpfr_t op) bint mpfr_zero_p (mpfr_t op) bint mpfr_regular_p (mpfr_t op) int mpfr_sgn (mpfr_t op) bint mpfr_greater_p (mpfr_t op1, mpfr_t op2) bint mpfr_greaterequal_p (mpfr_t op1, mpfr_t op2) bint mpfr_less_p (mpfr_t op1, mpfr_t op2) bint mpfr_lessequal_p (mpfr_t op1, mpfr_t op2) bint mpfr_lessgreater_p (mpfr_t op1, mpfr_t op2) bint mpfr_equal_p (mpfr_t op1, mpfr_t op2) bint mpfr_unordered_p (mpfr_t op1, mpfr_t op2) # Special Functions int mpfr_log (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_log2 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_log10 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_exp (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_exp2 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_exp10 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_cos (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_sin (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_tan (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_sec (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_csc (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_cot (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_sin_cos (mpfr_t sop, mpfr_t cop, mpfr_t op, mp_rnd_t rnd) int mpfr_acos (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_asin (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_atan (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_atan2 (mpfr_t rop, mpfr_t y, mpfr_t x, mp_rnd_t rnd) int mpfr_cosh (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_sinh (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_tanh (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_sech (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_csch (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_coth (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_acosh (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_asinh (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_atanh (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_fac_ui (mpfr_t rop, unsigned long int op, mp_rnd_t rnd) int mpfr_log1p (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_expm1 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_eint (mpfr_t y, mpfr_t x, mp_rnd_t rnd) int mpfr_li2 (mpfr_t y, mpfr_t x, mp_rnd_t rnd) int mpfr_gamma (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_lngamma (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_lgamma (mpfr_t rop, int *signp, mpfr_t op, mp_rnd_t rnd) int mpfr_zeta (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_zeta_ui (mpfr_t rop, unsigned long op, mp_rnd_t rnd) int mpfr_erf (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_erfc (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_j0 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_j1 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_jn (mpfr_t rop, long n, mpfr_t op, mp_rnd_t rnd) int mpfr_y0 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_y1 (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_yn (mpfr_t rop, long n, mpfr_t op, mp_rnd_t rnd) int mpfr_fma (mpfr_t rop, mpfr_t op1, mpfr_t op2, mpfr_t op3, mp_rnd_t rnd) int mpfr_fms (mpfr_t rop, mpfr_t op1, mpfr_t op2, mpfr_t op3, mp_rnd_t rnd) int mpfr_agm (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_hypot (mpfr_t rop, mpfr_t x, mpfr_t y, mp_rnd_t rnd) int mpfr_const_log2 (mpfr_t rop, mp_rnd_t rnd) int mpfr_const_pi (mpfr_t rop, mp_rnd_t rnd) int mpfr_const_euler (mpfr_t rop, mp_rnd_t rnd) int mpfr_const_catalan (mpfr_t rop, mp_rnd_t rnd) void mpfr_free_cache () int mpfr_sum (mpfr_t rop, mpfr_ptr tab[], unsigned long n, mp_rnd_t rnd) # Input and Output Functions # size_t mpfr_out_str (file *stream, int base, size_t n, mpfr_t op, mp_rnd_t rnd) # size_t mpfr_inp_str (mpfr_t rop, file *stream, int base, mp_rnd_t rnd) # Integer Related Functions int mpfr_rint (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_ceil (mpfr_t rop, mpfr_t op) int mpfr_floor (mpfr_t rop, mpfr_t op) int mpfr_round (mpfr_t rop, mpfr_t op) int mpfr_trunc (mpfr_t rop, mpfr_t op) int mpfr_rint_ceil (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_rint_floor (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_rint_round (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_rint_trunc (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_frac (mpfr_t rop, mpfr_t op, mp_rnd_t rnd) int mpfr_remainder (mpfr_t r, mpfr_t x, mpfr_t y, mp_rnd_t rnd) int mpfr_remquo (mpfr_t r, long* q, mpfr_t x, mpfr_t y, mp_rnd_t rnd) bint mpfr_integer_p (mpfr_t op) # Miscellaneous Functions void mpfr_nexttoward (mpfr_t x, mpfr_t y) void mpfr_nextabove (mpfr_t x) void mpfr_nextbelow (mpfr_t x) int mpfr_min (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_max (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_urandomb (mpfr_t rop, gmp_randstate_t state) void mpfr_random (mpfr_t rop) void mpfr_random2 (mpfr_t rop, mp_size_t size, mp_exp_t exp) mp_exp_t mpfr_get_exp (mpfr_t x) int mpfr_set_exp (mpfr_t x, mp_exp_t e) int mpfr_signbit (mpfr_t op) int mpfr_setsign (mpfr_t rop, mpfr_t op, int s, mp_rnd_t rnd) int mpfr_copysign (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) char * mpfr_get_version () long MPFR_VERSION_NUM (major, minor, patchlevel) char * mpfr_get_patches () # Printf-Like Functions int mpfr_printf (const char*, ...) int mpfr_asprintf (char**, const char*, ...) int mpfr_sprintf (char**, const char*, ...) int mpfr_snprintf (char*, size_t, const char*, ...) # Rounding Mode Related Functions void mpfr_set_default_rounding_mode (mp_rnd_t rnd) mp_rnd_t mpfr_get_default_rounding_mode () int mpfr_prec_round (mpfr_t x, mp_prec_t prec, mp_rnd_t rnd) int mpfr_round_prec (mpfr_t x, mp_rnd_t rnd, mp_prec_t prec) char * mpfr_print_rnd_mode (mp_rnd_t rnd) # Exception Related Functions mp_exp_t mpfr_get_emin () mp_exp_t mpfr_get_emax () int mpfr_set_emin (mp_exp_t exp) int mpfr_set_emax (mp_exp_t exp) mp_exp_t mpfr_get_emin_min () mp_exp_t mpfr_get_emin_max () mp_exp_t mpfr_get_emax_min () mp_exp_t mpfr_get_emax_max () int mpfr_check_range (mpfr_t x, int t, mp_rnd_t rnd) int mpfr_subnormalize (mpfr_t x, int t, mp_rnd_t rnd) void mpfr_clear_underflow () void mpfr_clear_overflow () void mpfr_clear_nanflag () void mpfr_clear_inexflag () void mpfr_clear_erangeflag () void mpfr_set_underflow () void mpfr_set_overflow () void mpfr_set_nanflag () void mpfr_set_inexflag () void mpfr_set_erangeflag () void mpfr_clear_flags () bint mpfr_underflow_p () bint mpfr_overflow_p () bint mpfr_nanflag_p () bint mpfr_inexflag_p () bint mpfr_erangeflag_p () # Advanced Functions MPFR_DECL_INIT (name, prec) void mpfr_inits (mpfr_t x, ...) void mpfr_inits2 (mp_prec_t prec, mpfr_t x, ...) void mpfr_clears (mpfr_t x, ...) # Compatibility With MPF void mpfr_set_prec_raw (mpfr_t x, mp_prec_t prec) int mpfr_eq (mpfr_t op1, mpfr_t op2, unsigned long int op3) void mpfr_reldiff (mpfr_t rop, mpfr_t op1, mpfr_t op2, mp_rnd_t rnd) int mpfr_mul_2exp (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) int mpfr_div_2exp (mpfr_t rop, mpfr_t op1, unsigned long int op2, mp_rnd_t rnd) # Custom Interface size_t mpfr_custom_get_size (mp_prec_t prec) void mpfr_custom_init (void *significand, mp_prec_t prec) void mpfr_custom_init_set (mpfr_t x, int kind, mp_exp_t exp, mp_prec_t prec, void *significand) int mpfr_custom_get_kind (mpfr_t x) void * mpfr_custom_get_mantissa (mpfr_t x) mp_exp_t mpfr_custom_get_exp (mpfr_t x) void mpfr_custom_move (mpfr_t x, void *new_position) # Internals int mpfr_can_round (mpfr_t b, mp_exp_t err, mp_rnd_t rnd1, mp_rnd_t rnd2, mp_prec_t prec) double mpfr_get_d1 (mpfr_t op) fpylll-0.6.1/src/fpylll/numpy.pyx000066400000000000000000000124211455321202600170230ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" from fpylll.fplll.gso cimport MatGSO from fpylll.fplll.decl cimport mat_gso_mpz_d, mat_gso_mpz_ld, mat_gso_mpz_dpe, mat_gso_mpz_mpfr from fpylll.fplll.decl cimport mat_gso_long_d, mat_gso_long_ld, mat_gso_long_dpe, mat_gso_long_mpfr IF HAVE_QD: from fpylll.fplll.decl cimport mat_gso_mpz_dd, mat_gso_mpz_qd from fpylll.fplll.decl cimport mat_gso_long_dd, mat_gso_long_qd IF not HAVE_NUMPY: raise ImportError("NumPy is not installed, but this module relies on it.") import numpy from numpy.__init__ cimport ndarray # TODO: that __init__ shouldn't be needed from numpy.__init__ cimport integer as np_integer def _dump_mu(ndarray[double, ndim=2, mode="c"] mu not None, MatGSO M, int kappa, int block_size): u""" Dump a block of the GSO matrix μ into a a numpy array. :param mu: numpy array of size (block_size*block_size) and type float64 :param M: GSO object :param kappa: index of the beginning of the block :param block_size: size of the considered block :returns: Nothing """ if M._type == mat_gso_mpz_d: return M._core.mpz_d.dump_mu_d(&mu[0,0], kappa, block_size) IF HAVE_LONG_DOUBLE: if M._type == mat_gso_mpz_ld: return M._core.mpz_ld.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_mpz_dpe: return M._core.mpz_dpe.dump_mu_d(&mu[0,0], kappa, block_size) IF HAVE_QD: if M._type == mat_gso_mpz_dd: return M._core.mpz_dd.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_mpz_qd: return M._core.mpz_qd.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_mpz_mpfr: return M._core.mpz_mpfr.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_long_d: return M._core.long_d.dump_mu_d(&mu[0,0], kappa, block_size) IF HAVE_LONG_DOUBLE: if M._type == mat_gso_long_ld: return M._core.long_ld.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_long_dpe: return M._core.long_dpe.dump_mu_d(&mu[0,0], kappa, block_size) IF HAVE_QD: if M._type == mat_gso_long_dd: return M._core.long_dd.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_long_qd: return M._core.long_qd.dump_mu_d(&mu[0,0], kappa, block_size) if M._type == mat_gso_long_mpfr: return M._core.long_mpfr.dump_mu_d(&mu[0,0], kappa, block_size) raise RuntimeError("MatGSO object '%s' has no core."%M) def dump_mu(MatGSO m, int kappa, int block_size): u""" Dump a block of the GSO matrix μ into a a numpy array. :param M: GSO object :param kappa: index of the beginning of the block :param block_size: size of the considered block :returns: Nothing """ mu = ndarray(dtype='float64', shape=(block_size, block_size)) _dump_mu(mu, m, kappa, block_size) return mu def _dump_r(ndarray[double, ndim=1, mode="c"] r not None, MatGSO M, int kappa, int block_size): u""" Dump a block of the GSO vector r into a a numpy array. :param mu: numpy array of size (block_size) and type float64 :param M: GSO object :param kappa: index of the beginning of the block :param block_size: size of the considered block :returns: Nothing """ if M._type == mat_gso_mpz_d: return M._core.mpz_d.dump_r_d(&r[0], kappa, block_size) IF HAVE_LONG_DOUBLE: if M._type == mat_gso_mpz_ld: return M._core.mpz_ld.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_mpz_dpe: return M._core.mpz_dpe.dump_r_d(&r[0], kappa, block_size) IF HAVE_QD: if M._type == mat_gso_mpz_dd: return M._core.mpz_dd.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_mpz_qd: return M._core.mpz_qd.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_mpz_mpfr: return M._core.mpz_mpfr.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_long_d: return M._core.long_d.dump_r_d(&r[0], kappa, block_size) IF HAVE_LONG_DOUBLE: if M._type == mat_gso_long_ld: return M._core.long_ld.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_long_dpe: return M._core.long_dpe.dump_r_d(&r[0], kappa, block_size) IF HAVE_QD: if M._type == mat_gso_long_dd: return M._core.long_dd.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_long_qd: return M._core.long_qd.dump_r_d(&r[0], kappa, block_size) if M._type == mat_gso_long_mpfr: return M._core.long_mpfr.dump_r_d(&r[0], kappa, block_size) raise RuntimeError("MatGSO object '%s' has no core."%M) def dump_r(MatGSO M, int kappa, int block_size): u""" Dump a block of the GSO vector r into a a numpy array. :param M: GSO object :param kappa: index of the beginning of the block :param block_size: size of the considered block :returns: Nothing """ r = ndarray(dtype='float64', shape=block_size) _dump_r(r, M, kappa, block_size) return r def is_numpy_integer(value): """ Return true if value is a numpy integer, false otherwise. :param value: the value to be checked. :returns: True if value is a numpy integer, false otherwise. """ return isinstance(value, np_integer) fpylll-0.6.1/src/fpylll/qd/000077500000000000000000000000001455321202600155155ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/qd/__init__.py000066400000000000000000000000001455321202600176140ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/qd/qd.pxd000066400000000000000000000004041455321202600166340ustar00rootroot00000000000000# -*- coding: utf-8 -*- cdef extern from "qd/dd_real.h": cdef cppclass dd_real: dd_real(double hi, double lo) dd_real() cdef extern from "qd/qd_real.h": cdef cppclass qd_real: qd_real(double hi, double lo) qd_real() fpylll-0.6.1/src/fpylll/tools/000077500000000000000000000000001455321202600162515ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/tools/__init__.py000066400000000000000000000000001455321202600203500ustar00rootroot00000000000000fpylll-0.6.1/src/fpylll/tools/benchmark.py000066400000000000000000000016161455321202600205610ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll.fplll.gso import MatGSO from fpylll.fplll.integer_matrix import IntegerMatrix from fpylll.fplll.lll import LLLReduction from fpylll.fplll.enumeration import Enumeration from fpylll import Pruning from time import time def bench_enumeration(n): """Return number of nodes visited and wall time for enumeration in dimension `n`. :param n: dimension :returns: nodes, wall time >>> import fpylll.tools.benchmark >>> _ = fpylll.tools.benchmark.bench_enumeration(30) """ A = IntegerMatrix.random(n, "qary", bits=30, k=n//2) M = MatGSO(A) L = LLLReduction(M) L(0, 0, n) radius = M.get_r(0, 0) * .999 pruning = Pruning.run(radius, 2.0**50, M.r(), 0.2) enum = Enumeration(M) t = time() enum.enumerate(0, n, radius, 0, pruning=pruning.coefficients) t = time() - t cost = enum.get_nodes() return cost, t fpylll-0.6.1/src/fpylll/tools/bkz_plot.py000066400000000000000000000110241455321202600204450ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Plot `\\log_2` of square Gram-Schmidt norms during a BKZ run. EXAMPLE:: >>> from fpylll import IntegerMatrix, BKZ, FPLLL >>> from fpylll.algorithms.bkz2 import BKZReduction as BKZ2 >>> from fpylll.tools.bkz_plot import KeepGSOBKZFactory >>> FPLLL.set_random_seed(1337) >>> _ = FPLLL.set_threads(1) # to make it deterministic >>> A = IntegerMatrix.random(80, "qary", k=40, bits=20) >>> bkz = KeepGSOBKZFactory(BKZ2)(A) >>> bkz(BKZ.EasyParam(20)) >>> bkz._KeepGSOBKZ__gso_norms[0][0] 23104295.0 >>> bkz._KeepGSOBKZ__gso_norms[-1][0] 6591824.0 .. modulauthor: Martin Albrecht """ def KeepGSOBKZFactory(cls): """ Return a wrapper class around ``cls`` which collects Gram-Schmidt norms in the attribute ``__KeepGSOBKZ_gso_norms``. In particular, the list will be constructed as follows: - index 0: input GSO norms - index i,j: kappa and GSO norms in tour i-1 for after j-th SVP call - index -1: output GSO norms :param cls: A BKZ-like algorithm with methods ``__call__``, ``svp_reduction`` and ``tour``. .. warning:: This will slow down the algorithm especially for small block sizes. """ class KeepGSOBKZ(cls): def __call__(self, *args, **kwds): self.M.update_gso() self.__gso_norms = [self.M.r()] self.__at_toplevel = True cls.__call__(self, *args, **kwds) self.M.update_gso() self.__gso_norms.append(self.M.r()) def svp_reduction(self, kappa, *args, **kwds): at_toplevel = self.__at_toplevel self.__at_toplevel = False r = cls.svp_reduction(self, kappa, *args, **kwds) self.__at_toplevel = at_toplevel if at_toplevel: self.M.update_gso() self.__gso_norms[-1].append((kappa, self.M.r())) return r def tour(self, *args, **kwds): if self.__at_toplevel: self.__gso_norms.append([]) return cls.tour(self, *args, **kwds) return KeepGSOBKZ def plot_gso_norms(gso_norms, block_size, basename="bkz-gso-norms", extension="png", dpi=300): """Plot ``gso_norms``. :param gso_norms: list of GSO norms. It is assumed these follow the form output by ``KeepGSOBKZ``. :param block_size: BKZ block size :param basename: graphics filename basenname (may contain full path) :param extension: graphics filename extension/type :param dpi: resolution :returns: Tuple of filenames written. .. note:: To convert to movie, call e.g. ``ffmpeg -framerate 8 -pattern_type glob -i "*.png" bkz.mkv`` .. warning:: This function is quite slow. """ from math import log, pi, e import matplotlib.pyplot as plt import matplotlib.patches as patches filenames = [] def maplog2(l): return [log(l[i], 2) for i in range(len(l))] def plot_finalize(ax, name): ax.set_ylabel("$2\\,\\log_2(\\cdot)$") ax.set_xlabel("$i$") ax.legend(loc="upper right") ax.set_ylim(*ylim) fullname = "%s.%s"%(name, extension) fig.savefig(fullname, dpi=dpi) filenames.append(fullname) plt.close() d = len(gso_norms[0]) x = range(d) beta = float(block_size) delta_0 = (beta/(2.*pi*e) * (pi*beta)**(1./beta))**(1./(2.*(beta-1))) alpha = delta_0**(-2.*d/(d-1.)) logvol = sum(maplog2(gso_norms[0])) # already squared gsa = [log(alpha, 2)*(2*i) + log(delta_0, 2)*(2*d) + logvol*(1./d) for i in range(d)] fig, ax = plt.subplots() ax.plot(x, maplog2(gso_norms[0]), label="$\\|\\mathbf{b}_i^*\\|$") ylim = ax.get_ylim() ax.set_title("Input") plot_finalize(ax, "%s-aaaa-input"%basename) for i, tour in enumerate(gso_norms[1:-1]): for j, (kappa, norms) in enumerate(tour): fig, ax = plt.subplots() rect = patches.Rectangle((kappa, ylim[0]), min(block_size, d-kappa-1), ylim[1]-ylim[0], fill=True, color="lightgray") ax.add_patch(rect) ax.plot(x, maplog2(norms), label="$\\|\\mathbf{b}_i^*\\|$") ax.plot(x, gsa, color="black", label="GSA") ax.set_title("BKZ-%d tour: %2d, $\\kappa$: %3d"%(block_size, i, kappa)) plot_finalize(ax, "%s-t%03d-%04d"%(basename, i, j)) fig, ax = plt.subplots() ax.plot(x, maplog2(gso_norms[-1])) ax.set_title("Output") plot_finalize(ax, "%s-zzzz-output"%basename) return tuple(filenames) fpylll-0.6.1/src/fpylll/tools/bkz_simulator.py000066400000000000000000000252441455321202600215170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ This file implements two BKZ simulation algorithms as proposed in - Chen, Y., & Nguyen, P. Q. (2011). BKZ 2.0: better lattice security estimates. In D. H. Lee, & X. Wang, ASIACRYPT~2011 (pp. 1–20). : Springer, Heidelberg. - Bai, S., & Stehlé, D. & Wen, W. (2018). Measuring, simulating and exploiting the head concavity phenomenon in BKZ. In T. Peyrin, & S. Galbraith, ASIACRYPT~2018 : Springer, Heidelberg. .. moduleauthor:: Michael Walter (2014) .. moduleauthor:: Martin R. Albrecht (2018) .. moduleauthor:: Shi Bai (2020) .. moduleauthor:: Fernando Virdia (2020) """ from copy import copy from math import log, sqrt, lgamma, pi, exp from collections import OrderedDict import random from fpylll.tools.quality import basis_quality from fpylll.tools.bkz_stats import pretty_dict from fpylll.fplll.bkz import BKZ from fpylll.fplll.integer_matrix import IntegerMatrix from fpylll.fplll.gso import MatGSO, GSO from fpylll import FPLLL rk = ( 0.789527997160000, 0.780003183804613, 0.750872218594458, 0.706520454592593, 0.696345241018901, 0.660533841808400, 0.626274718790505, 0.581480717333169, 0.553171463433503, 0.520811087419712, 0.487994338534253, 0.459541470573431, 0.414638319529319, 0.392811729940846, 0.339090376264829, 0.306561491936042, 0.276041187709516, 0.236698863270441, 0.196186341673080, 0.161214212092249, 0.110895134828114, 0.0678261623920553, 0.0272807162335610, -0.0234609979600137, -0.0320527224746912, -0.0940331032784437, -0.129109087817554, -0.176965384290173, -0.209405754915959, -0.265867993276493, -0.299031324494802, -0.349338597048432, -0.380428160303508, -0.427399405474537, -0.474944677694975, -0.530140672818150, -0.561625221138784, -0.612008793872032, -0.669011014635905, -0.713766731570930, -0.754041787011810, -0.808609696192079, -0.859933249032210, -0.884479963601658, -0.886666930030433, ) def _extract_log_norms(r): if isinstance(r, IntegerMatrix): r = GSO.Mat(r) elif isinstance(r, MatGSO): r.update_gso() r = r.r() else: for ri in r: if (ri <= 0): raise ValueError("squared norms in r should be positive") # code uses log2 of norms, FPLLL uses squared norms r = list(map(lambda x: log(x, 2) / 2.0, r)) return r def simulate(r, param): """ BKZ simulation algorithm as proposed by Chen and Nguyen in "BKZ 2.0: Better Lattice Security Estimates". Returns the reduced squared norms of the GSO vectors of the basis and the number of BKZ tours simulated. This version terminates when no substantial progress is made anymore or at most ``max_loops`` tours were simulated. If no ``max_loops`` is given, at most ``d`` tours are performed, where ``d`` is the dimension of the lattice. :param r: squared norms of the GSO vectors of the basis. :param param: BKZ parameters EXAMPLE: >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ >>> FPLLL.set_random_seed(1337) >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50)) >>> M = GSO.Mat(A) >>> from fpylll.tools.bkz_simulator import simulate >>> _ = simulate(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE)) {"i": 0, "r_0": 2^33.3, "r_0/gh": 6.110565, "rhf": 1.018340, "/": -0.07013, "hv/hv": 2.424131} {"i": 1, "r_0": 2^32.7, "r_0/gh": 4.018330, "rhf": 1.016208, "/": -0.06161, "hv/hv": 2.156298} {"i": 2, "r_0": 2^32.3, "r_0/gh": 2.973172, "rhf": 1.014679, "/": -0.05745, "hv/hv": 2.047014} {"i": 3, "r_0": 2^32.1, "r_0/gh": 2.583479, "rhf": 1.013966, "/": -0.05560, "hv/hv": 2.000296} """ r = _extract_log_norms(r) d = len(r) r1 = copy(r) r2 = copy(r) c = [rk[-i] - sum(rk[-i:]) / i for i in range(1, 46)] c += [ (lgamma(beta / 2.0 + 1) * (1.0 / beta) - log(sqrt(pi))) / log(2.0) for beta in range(46, param.block_size + 1) ] if param.max_loops: N = param.max_loops else: N = d for i in range(N): phi = True for k in range(d - min(45, param.block_size)): beta = min(param.block_size, d - k) f = k + beta logV = sum(r1[:f]) - sum(r2[:k]) lma = logV / beta + c[beta - 1] if phi: if lma < r1[k]: r2[k] = lma phi = False else: r2[k] = lma # early termination if phi or r1 == r2: break else: beta = min(45, param.block_size) logV = sum(r1) - sum(r2[:-beta]) if param.block_size < 45: tmp = sum(rk[-param.block_size :]) / param.block_size rk1 = [r_ - tmp for r_ in rk[-param.block_size :]] else: rk1 = rk for k, r in zip(range(d - beta, d), rk1): r2[k] = logV / beta + r r1 = copy(r2) if param.flags & BKZ.VERBOSE: r = OrderedDict() r["i"] = i for k, v in basis_quality(list(map(lambda x: 2.0 ** (2 * x), r1))).items(): r[k] = v print(pretty_dict(r)) r1 = list(map(lambda x: 2.0 ** (2 * x), r1)) return r1, i + 1 def simulate_prob(r, param, prng_seed=0xdeadbeef): """ BKZ simulation algorithm as proposed by Bai and Stehlé and Wen in "Measuring, simulating and exploiting the head concavity phenomenon in BKZ". Returns the reduced squared norms of the GSO vectors of the basis and the number of BKZ tours simulated. This version terminates when no substantial progress is made anymore or at most ``max_loops`` tours were simulated. If no ``max_loops`` is given, at most ``d`` tours are performed, where ``d`` is the dimension of the lattice. :param r: squared norms of the GSO vectors of the basis. :param param: BKZ parameters EXAMPLE: >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ >>> FPLLL.set_random_seed(1337) >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50)) >>> M = GSO.Mat(A) >>> from fpylll.tools.bkz_simulator import simulate_prob >>> _ = simulate_prob(M, BKZ.Param(block_size=40, max_loops=4, flags=BKZ.VERBOSE)) {"i": 0, "r_0": 2^33.1, "r_0/gh": 5.193166, "rhf": 1.017512, "/": -0.07022, "hv/hv": 2.428125} {"i": 1, "r_0": 2^32.7, "r_0/gh": 3.997766, "rhf": 1.016182, "/": -0.06214, "hv/hv": 2.168460} {"i": 2, "r_0": 2^32.3, "r_0/gh": 3.020156, "rhf": 1.014759, "/": -0.05808, "hv/hv": 2.059562} {"i": 3, "r_0": 2^32.2, "r_0/gh": 2.783102, "rhf": 1.014344, "/": -0.05603, "hv/hv": 2.013191} """ if param.block_size <= 2: raise ValueError("The BSW18 simulator requires block size >= 3.") # fix PRNG seed random.seed(prng_seed if prng_seed else FPLLL.randint(0, 2**32-1)) r = _extract_log_norms(r) d = len(r) r1 = copy(r) r2 = copy(r) c = [rk[-j] - sum(rk[-j:]) / j for j in range(1, 46)] c += [ (lgamma(beta / 2.0 + 1) * (1.0 / beta) - log(sqrt(pi))) / log(2.0) for beta in range(46, param.block_size + 1) ] if param.max_loops: N = param.max_loops else: N = d t0 = [True for _ in range(d)] for i in range(N): t1 = [False for _ in range(d)] for k in range(d - min(45, param.block_size)): beta = min(param.block_size, d - k) f = k + beta phi = False for kp in range(k, f): phi |= t0[kp] logV = sum(r1[:f]) - sum(r2[:k]) if phi: X = random.expovariate(.5) lma = (log(X, 2) + logV) / beta + c[beta - 1] if lma < r1[k]: r2[k] = lma r2[k+1] = r1[k] + log(sqrt(1-1./beta), 2) dec = (r1[k]-lma) + (r1[k+1] - r2[k+1]) for j in range(k+2, f): r2[j] = r1[j] + dec/(beta-2.) t1[j] = True phi = False for j in range(k, f): r1[j] = r2[j] # early termination if True not in t1: break # last block beta = min(45, param.block_size) logV = sum(r1) - sum(r2[:-beta]) if param.block_size < 45: rk1 = normalize_GSO_unitary(rk[-beta:]) else: rk1 = rk K = range(d-beta, d) for k, r in zip(K, rk1): r2[k] = logV / beta + r t1[k] = True # early termination if (r1 == r2): break r1 = copy(r2) t0 = copy(t1) if param.flags & BKZ.VERBOSE: r = OrderedDict() r["i"] = i for k, v in basis_quality(list(map(lambda x: 2.0 ** (2 * x), r1))).items(): r[k] = v print(pretty_dict(r)) r1 = list(map(lambda x: 2.0 ** (2 * x), r1)) return r1, i + 1 def normalize_GSO_unitary(l): log_det = sum(l) n = len(l) nor_log_det = [0.0] * n for i in range(n): nor_log_det[i] = l[i] - log_det/n return nor_log_det def averaged_simulate_prob(L, param, tries=10): """ This wrapper calls the [BSW18] probabilistic BKZ simulator with different PRNG seeds, and returns the average output. :param r: squared norms of the GSO vectors of the basis. :param param: BKZ parameters :tries: number of iterations to average over. Default: 10 EXAMPLE: >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL, BKZ >>> FPLLL.set_random_seed(1337) >>> A = LLL.reduction(IntegerMatrix.random(100, "qary", bits=30, k=50)) >>> M = GSO.Mat(A) >>> from fpylll.tools.bkz_simulator import averaged_simulate_prob >>> _ = averaged_simulate_prob(M, BKZ.Param(block_size=40, max_loops=4)) >>> print(_[0][:3]) [4663149828.487..., 4267813469.1884..., 4273411937.5775...] """ if tries < 1: raise ValueError("Need to average over positive number of tries.") for _ in range(tries): x, y = simulate_prob(L, param, prng_seed=_+1) x = list(map(log, x)) if _ == 0: i = [l for l in x] j = y else: inew = [sum(l) for l in zip(i, x)] i = inew j += y i = [l/tries for l in i] j = j/tries return list(map(exp, i)), j fpylll-0.6.1/src/fpylll/tools/bkz_stats.py000066400000000000000000000656201455321202600206400ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Collecting traces from BKZ-like computations .. moduleauthor:: Martin R. Albrecht """ from __future__ import print_function from __future__ import absolute_import import time try: from time import process_time # Python 3 except ImportError: from time import clock as process_time # Python 2 import copy from collections import OrderedDict from math import log from fpylll.tools.quality import basis_quality def pretty_dict(d, keyword_width=None, round_bound=9999, suppress_length=128): """Return 'pretty' string representation of the dictionary ``d``. :param d: a dictionary :param keyword_width: width allocated for keywords :param round_bound: values beyond this bound are shown as `2^x` :param suppress_length: don't print arbitrary data with ``len(str(data)) > suppress_length`` >>> from collections import OrderedDict >>> str(pretty_dict(OrderedDict([('d', 2), ('f', 0.1), ('large', 4097)]))) '{"d": 2, "f": 0.100000, "large": 4097}' """ s = [] for k in d: v = d[k] if keyword_width: fmt = u'"%%%ds"' % keyword_width k = fmt % k else: k = '"%s"' % k if isinstance(v, int): if abs(v) > round_bound: s.append(u"%s: %8s" % (k, u"%s2^%.1f" % ("" if v > 0 else "-", log(abs(v), 2)))) else: s.append(u"%s: %8d" % (k, v)) continue elif not isinstance(v, float): try: v = float(v) except TypeError: if len(str(v)) <= suppress_length: s.append(u"%s: %s" % (k, v)) else: s.append(u"%s: '...'" % (k,)) continue if 0 <= v < 10.0: s.append(u"%s: %8.6f" % (k, v)) elif -10 < v < 0: s.append(u"%s: %8.5f" % (k, v)) elif abs(v) < round_bound: s.append(u"%s: %8.3f" % (k, v)) else: s.append(u"%s: %8s" % (k, u"%s2^%.1f" % ("" if v > 0 else "-", log(abs(v), 2)))) return u"{" + u", ".join(s) + u"}" class Accumulator(object): """ An ``Accumulator`` collects observed facts about some random variable (e.g. running time). In particular, - minimum, - maximum, - mean and - variance are recorded:: >>> v = Accumulator(1.0); v 1.0 >>> v += 2.0; v 3.0 >>> v = Accumulator(-5.4, repr="avg"); v -5.4 >>> v += 0.2 >>> v += 5.2; v 0.0 >>> v.min, v.max (-5.4, 5.2) """ def __init__(self, value, repr="sum", count=True, bessel_correction=False): """ Create a new instance. :param value: some initial value :param repr: how to represent the data: "min", "max", "avg", "sum" and "variance" are valid choices :param count: if ``True`` the provided value is considered as an observed datum, i.e. the counter is increased by one. :param bessel_correction: apply Bessel's correction to the variance. """ self._min = value self._max = value self._sum = value self._sqr = value * value self._ctr = 1 if count else 0 self._repr = repr self._bessel_correction = bessel_correction def add(self, value): """ Add value to the accumulator. >>> v = Accumulator(10.0) >>> v.add(5.0) 15.0 :param value: some value :returns: itself """ self._min = min(self._min, value) self._max = max(self._max, value) self._sum += value self._sqr += value * value self._ctr += 1 return self @property def min(self): """ >>> v = Accumulator(2.0) >>> v += 5.0 >>> v.min 2.0 """ return self._min @property def max(self): """ >>> v = Accumulator(2.0) >>> v += 5.0 >>> v.max 5.0 """ return self._max @property def avg(self): """ >>> v = Accumulator(2.0) >>> v += 5.0 >>> v.avg 3.5 """ return self._sum / self._ctr mean = avg @property def sum(self): """ >>> v = Accumulator(2.0) >>> v += 5.0 >>> v.sum 7.0 """ return self._sum @property def variance(self): """ >>> v = Accumulator(2.0) >>> v += 5.0 >>> v.variance 2.25 """ s = self._sqr / self._ctr - self.avg ** 2 if self._bessel_correction: return self._ctr * (s / (self._ctr - 1)) else: return s def __add__(self, other): """ Addition semantics are: - ``stat + None`` returns ``stat`` - ``stat + stat`` returns the sum of their underlying values - ``stat + value`` inserts ``value`` into ``stat`` >>> v = Accumulator(2.0) >>> v + None 2.0 >>> v + v 4.0 >>> v + 3.0 5.0 """ if other is None: return copy.copy(self) elif not isinstance(other, Accumulator): ret = copy.copy(self) return ret.add(other) else: if self._repr != other._repr: raise ValueError("%s != %s" % (self._repr, other._repr)) ret = Accumulator(0) ret._min = min(self.min, other.min) ret._max = max(self.max, other.max) ret._sum = self._sum + other._sum ret._sqr = self._sqr + other._sqr ret._ctr = self._ctr + other._ctr ret._repr = self._repr return ret def __radd__(self, other): """ Revert to normal addition. """ return self + other def __sub__(self, other): """ Return the difference of the two nodes reduced to floats. """ return float(self) - float(other) def __float__(self): """ Reduce this stats object down a float depending on strategy chosen in constructor. >>> v = Accumulator(2.0, "min"); v += 3.0; float(v) 2.0 >>> v = Accumulator(2.0, "max"); v += 3.0; float(v) 3.0 >>> v = Accumulator(2.0, "avg"); v += 3.0; float(v) 2.5 >>> v = Accumulator(2.0, "sum"); v += 3.0; float(v) 5.0 >>> v = Accumulator(2.0, "variance"); v += 3.0; float(v) 0.25 """ return float(self.__getattribute__(self._repr)) def __str__(self): """ Reduce this stats object down a float depending on strategy chosen in constructor. >>> v = Accumulator(2.0, "min"); v += 3.0; str(v) '2.0' >>> v = Accumulator(2.0, "max"); v += 3.0; str(v) '3.0' >>> v = Accumulator(2.0, "avg"); v += 3.0; str(v) '2.5' >>> v = Accumulator(2.0, "sum"); v += 3.0; str(v) '5.0' >>> v = Accumulator(2.0, "variance"); v += 3.0; str(v) '0.25' """ return str(self.__getattribute__(self._repr)) __repr__ = __str__ class TraceContext(object): """ A trace context collects data about an underlying process on entry/exit of particular parts of the code. """ def __init__(self, tracer, *args, **kwds): """Create a new context for gathering statistics. :param tracer: a tracer object :param args: all args form a label for the trace context :param kwds: all kwds are considered auxiliary data """ self.tracer = tracer self.what = args if len(args) > 1 else args[0] self.kwds = kwds def __enter__(self): """ Call ``enter`` on trace object """ self.tracer.enter(self.what, **self.kwds) def __exit__(self, exception_type, exception_value, exception_traceback): """ Call ``exit`` on trace object """ self.tracer.exit(**self.kwds) if exception_type is not None: return False class Tracer(object): """ A trace object is used to collect information about processes. This base class does nothing. """ def __init__(self, instance, verbosity=False, max_depth=16): """ Create a new tracer instance. :param instance: BKZ-like object instance :param verbosity: print information, integers ≥ 0 are also accepted :param max_depth: record up to this depth. """ self.instance = instance self.verbosity = int(verbosity) self.max_depth = max_depth def context(self, *args, **kwds): """ Return a new ``TraceCotext``. """ return TraceContext(self, *args, **kwds) def enter(self, label, **kwds): """ An implementation would implement this function which controls what happens when the context given by ``label`` is entered. """ pass def exit(self, **kwds): """ An implementation would implement this function which controls what happens when the context given by ``label`` is left. """ pass def _pop(self): # NOTE: we handle ``max_depth`` by removing children when we exit. child = self.current self.current = self.current.parent if child.level > self.max_depth: self.current.del_child(child) # use a dummy_trace whenever no tracing is required dummy_tracer = Tracer(None) class Node(object): """ A simple tree implementation with labels and associated data. """ def __init__(self, label, parent=None, data=None): """Create a new node. :param label: some label such as a string or a tuple :param parent: nodes know their parents :param data: nodes can have associated data which is a key-value store where typically the values are statistics """ self.label = label if data is None: data = OrderedDict() self.data = OrderedDict(data) self.parent = parent self.children = [] def add_child(self, child): """Add a child. :param child: a node :returns: the child """ child.parent = self self.children.append(child) return child def del_child(self, child): """ >>> root = Node("root") >>> c1 = root.child("child1") >>> c2 = root.child("child2") >>> root.children [{"child1": {}}, {"child2": {}}] >>> root.del_child(c1) >>> root.children [{"child2": {}}] """ self.children.remove(child) def child(self, label): """ If node has a child labelled ``label`` return it, otherwise add a new child. :param label: a label :returns: a node >>> root = Node("root") >>> c1 = root.child("child"); c1 {"child": {}} >>> c2 = root.child("child"); c2 {"child": {}} >>> c1 is c2 True """ for child in self.children: if child.label == label: return child return self.add_child(Node(label)) def __str__(self): """ >>> from collections import OrderedDict >>> str(Node("root", data=OrderedDict([('a',1), ('b', 2)]))) '{"root": {"a": 1, "b": 2}}' """ return u'{"%s": %s}' % (self.label, pretty_dict(self.data)) __repr__ = __str__ def report(self, indentation=0, depth=None): """ Return detailed string representation of this tree. :param indentation: add spaces to the left of the string representation :param depth: stop at this depth >>> root = Node("root") >>> c1 = root.child(("child",1)) >>> c2 = root.child(("child",2)) >>> c3 = c1.child(("child", 3)) >>> c1.data["a"] = 100.0 >>> c3.data["a"] = 4097 >>> print(root.report()) {"root": {}} {"('child', 1)": {"a": 100.000}} {"('child', 3)": {"a": 4097}} {"('child', 2)": {}} >>> print(root.report(indentation=2, depth=1)) {"root": {}} {"('child', 1)": {"a": 100.000}} {"('child', 2)": {}} """ s = [" " * indentation + str(self)] if depth is None or depth > 0: for child in self.children: depth = None if depth is None else depth - 1 s.append(child.report(indentation + 2, depth=depth)) return "\n".join(s) def sum(self, tag, include_self=True, raise_keyerror=False, label=None): """ Return sum over all items tagged ``tag`` in associated data within this tree. :param tag: a string :param include_self: include data in this node :param raise_keyerror: if a node does not have an item tagged with ``tag`` raise a ``KeyError`` :param label: filter by ``label`` >>> root = Node("root") >>> c1 = root.child(("child",1)) >>> c2 = root.child(("child",2)) >>> c3 = c1.child(("child", 3)) >>> c1.data["a"] = 100.0 >>> c3.data["a"] = 4097 >>> root.sum("a") 4197.0 >>> root.sum("a", label=("child",3)) 4097 >>> root.sum("a", label=("child",2)) 0 >>> root.sum("a", label=("child",2), raise_keyerror=True) Traceback (most recent call last): ... KeyError: 'a' """ if include_self and (label is None or self.label == label): if raise_keyerror: r = self.data[tag] else: r = self.data.get(tag, 0) else: r = 0 for child in self.children: r = r + child.sum(tag, include_self=True, raise_keyerror=raise_keyerror, label=label) return r def find(self, label, raise_keyerror=False): """ Find the first child node matching label in a breadth-first search. :param label: a label :param raise_keyerror: raise a ``KeyError`` if ``label`` was not found. """ for child in self.children: if child.label == label: return child for child in self.children: try: return child.find(label, raise_keyerror) except KeyError: pass if raise_keyerror: raise KeyError("Label '%s' not present in '%s" % (label, self)) else: return None def find_all(self, label): """ Find all nodes labelled ``label`` :param label: a label """ r = [] if self.label == label: r.append(self) if isinstance(self.label, tuple) and self.label[0] == label: r.append(self) for child in self.children: r.extend(child.find_all(label)) return tuple(r) def __iter__(self): """ Depth-first iterate over all subnodes (including self) :: >>> root = Node("root") >>> c1 = root.child(("child",1)) >>> c2 = root.child(("child",2)) >>> c3 = c1.child(("child", 3)) >>> c1.data["a"] = 100.0 >>> c3.data["a"] = 4097 >>> list(root) [{"root": {}}, {"('child', 1)": {"a": 100.000}}, {"('child', 3)": {"a": 4097}}, {"('child', 2)": {}}] """ yield self for child in self.children: for c in iter(child): yield c def merge(self, node): """ Merge tree ``node`` into self. .. note :: The label of ``node`` is ignored. """ for k, v in node.data.iteritems(): if k in self.data: self.data[k] += v else: self.data[k] = v for child in node.children: self.child(child.label).merge(child) def get(self, label): """Return first child node with label ``label`` :param label: label >>> root = Node("root") >>> _ = root.child("bar") >>> c1 = root.child(("foo",0)) >>> c2 = root.child(("foo",3)) >>> c3 = c1.child(("foo", 3)) >>> c1.data["a"] = 100.0 >>> c3.data["a"] = 4097 >>> root.get("bar") {"bar": {}} >>> root.get("foo") ({"('foo', 0)": {"a": 100.000}}, {"('foo', 3)": {}}) >>> root.get("foo")[0] {"('foo', 0)": {"a": 100.000}} >>> root.get("foo")[1] {"('foo', 3)": {}} """ r = [] for child in self.children: if child.label == label: return child if isinstance(child.label, tuple) and child.label[0] == label: r.append(child) if r: return tuple(r) else: raise AttributeError("'Node' object has no attribute '%s'" % (label)) def __getitem__(self, tag): """Return associated data tagged ``tag``` :param tag: Some tag >>> root = Node("root", data={"foo": 1}) >>> c1 = root.child("foo") >>> root["foo"] 1 """ return self.data[tag] @property def level(self): """ Return level of this node, i.e. how many steps it takes to reach a node with parent ``None``. >>> root = Node("root") >>> _ = root.child("bar") >>> c1 = root.child(("foo",0)) >>> c2 = root.child(("foo",3)) >>> c3 = c1.child(("foo", 3)) >>> root.level 0 >>> c1.level 1 >>> c3.level 2 """ node, level = self, 0 while node.parent is not None: level += 1 node = node.parent return level def __sub__(self, rhs): """ Return tree that contains the difference of this node and the other. The semantics are as follows: - For all data in this node the matching data item in ``rhs`` is subtracted. - If the data is missing in ``rhs`` it is assumed to be zero. - For all children of this node this function is called recursively. - If ``rhs`` does not have an immediate child node with a matching label, those children are skipped. """ if not isinstance(rhs, Node): raise ValueError("Expected node but got '%s'" % type(rhs)) diff = Node(self.label) for k in self.data: diff.data[k] = self.data[k] - rhs.data.get(k, 0) for lchild in self.children: for rchild in rhs.children: if lchild.label == rchild.label: diff.children.append(lchild - rchild) break else: print("Skipping missing node '%s'" % lchild.label) return diff def copy(self, deepcopy=True): """ Return a (deep)copy of this node. :param deepcopy: If ``False`` child nodes and data dictionaries are not copied, this is usually not what the user wants. """ if deepcopy: return copy.deepcopy(self) else: return copy.copy(self) def accumulate(self, key, filter=lambda node: True, repr="avg"): """ Return accumulator value for all occurrences of ``key``:: >>> root = Node("root") >>> c1 = root.child(("child",1)) >>> c2 = root.child(("child",2)) >>> c3 = c1.child(("child", 3)) >>> c1.data["a"] = 100.0 >>> c3.data["a"] = 4097 >>> root.accumulate("a").sum 4197.0 >>> root.accumulate("a", filter=lambda node: node.label == ("child", 3)).sum 4097 :param key: Dictionary key to some ``data`` attribute :param filter: Callable that should return ``True`` for nodes that ought to be considered. :param repr: Representation of accumulator """ acc = Accumulator(0, repr=repr, count=False) for node in iter(self): if filter(node) and key in node.data: acc += node.data[key] return acc class TimeTreeTracer(Tracer): """ Collect CPU and wall time for every context visited, creating a tree structure along the way. """ def __init__( self, instance, verbosity=False, root_label="root", start_clocks=False, max_depth=1024, ): """ Create a new tracer instance. :param instance: BKZ-like object instance :param verbosity: print information, integers ≥ 0 are also accepted :param root_label: label to give to root node :param start_clocks: start tracking time for the root node immediately :param max_depth: record up to this depth. """ Tracer.__init__(self, instance, verbosity, max_depth) self.trace = Node(root_label) self.current = self.trace if start_clocks: self.reenter() def enter(self, label, **kwds): """Enter new context with label :param label: label """ self.current = self.current.child(label) self.reenter() def reenter(self, **kwds): """Reenter current context, i.e. restart clocks""" if self.current is None: # we exited the root node self.current = self.trace node = self.current node.data["cputime"] = node.data.get("cputime", 0) + Accumulator( -process_time(), repr="sum", count=False ) node.data["walltime"] = node.data.get("walltime", 0) + Accumulator( -time.time(), repr="sum", count=False ) def exit(self, **kwds): """ Leave context, record time spent. :param label: ignored .. note :: If verbosity ≥ to the current level, also print the current node. """ node = self.current node.data["cputime"] += process_time() node.data["walltime"] += time.time() if self.verbosity and self.verbosity >= self.current.level: print(self.current) self._pop() class BKZTreeTracer(Tracer): """ Default tracer for BKZ-like algorithms. """ def __init__( self, instance, verbosity=False, root_label="bkz", start_clocks=False, max_depth=16 ): """ Create a new tracer instance. :param instance: BKZ-like object instance :param verbosity: print information, integers ≥ 0 are also accepted :param root_label: label to give to root node :param start_clocks: start tracking time for the root node immediately :param max_depth: record up to this depth. TESTS:: >>> from fpylll.tools.bkz_stats import BKZTreeTracer >>> tracer = BKZTreeTracer(None) >>> for i in range(3): tracer.enter("level-%d"%i) >>> for i in range(3): tracer.exit() >>> "level-2" in tracer.trace.report() True >>> tracer = BKZTreeTracer(None, max_depth=2) >>> for i in range(3): tracer.enter("level-%d"%i) >>> for i in range(3): tracer.exit() >>> "level-2" in tracer.trace.report() False """ Tracer.__init__(self, instance, verbosity, max_depth) self.trace = Node(root_label) self.current = self.trace if start_clocks: self.reenter() def enter(self, label, **kwds): """Enter new context with label :param label: label """ self.current = self.current.child(label) self.reenter() def reenter(self, **kwds): """Reenter current context, i.e. restart clocks""" node = self.current node.data["cputime"] = node.data.get("cputime", 0) + Accumulator( -process_time(), repr="sum", count=False ) node.data["walltime"] = node.data.get("walltime", 0) + Accumulator( -time.time(), repr="sum", count=False ) def exit(self, **kwds): # noqa, shut up linter about this function being too complex """ By default CPU and wall time are recorded. More information is recorded for "enumeration" and "tour" labels. When the label is a tour then the status is printed if verbosity > 0. """ node = self.current label = node.label node.data["cputime"] += process_time() node.data["walltime"] += time.time() if kwds.get("dump_gso", False): node.data["r"] = node.data.get("r", []) + [self.instance.M.r()] if label == "enumeration": full = kwds.get("full", True) if full: try: node.data["#enum"] = Accumulator( kwds["enum_obj"].get_nodes(), repr="sum" ) + node.data.get( "#enum", None ) # noqa except KeyError: pass try: node.data["%"] = Accumulator(kwds["probability"], repr="avg") + node.data.get( "%", None ) except KeyError: pass if label[0] == "tour": data = basis_quality(self.instance.M) for k, v in data.items(): if k == "/": node.data[k] = Accumulator(v, repr="max") else: node.data[k] = Accumulator(v, repr="min") if self.verbosity and label[0] == "tour": report = OrderedDict() report["i"] = label[1] report["cputime"] = node["cputime"] report["walltime"] = node["walltime"] try: report["preproc"] = node.find("preprocessing", True)["cputime"] except KeyError: pass try: report["svp"] = node.find("enumeration", True)["cputime"] except KeyError: pass report["#enum"] = node.sum("#enum") report["lll"] = node.sum("cputime", label="lll") try: report["pruner"] = node.find("pruner", True)["cputime"] except KeyError: pass report["r_0"] = node["r_0"] report["/"] = node["/"] print(pretty_dict(report)) self._pop() def normalize_tracer(tracer): """ Normalize tracer inputs for convenience. :param tracer: ``True`` for ``BKZTreeTracer``, ``False`` for ``dummy_tracer`` or any other value for custom tracer. EXAMPLE:: >>> from fpylll.tools.bkz_stats import normalize_tracer, BKZTreeTracer, dummy_tracer >>> normalize_tracer(True) == BKZTreeTracer True >>> normalize_tracer(False) == dummy_tracer True >>> normalize_tracer(BKZTreeTracer) == BKZTreeTracer True """ if tracer is True: return BKZTreeTracer elif tracer is False: return dummy_tracer else: return tracer fpylll-0.6.1/src/fpylll/tools/compare.py000066400000000000000000000374311455321202600202610ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Compare the performance of BKZ variants. .. moduleauthor:: Martin R. Albrecht .. note :: This module does not work with standard BKZ classes. Instead, it expects that the BKZ classes it consumes accept a block size as ``params`` in the ``tour`` member function. This way, the construction of param objects can be rolled into the class description. See example classes below. """ # Imports from __future__ import absolute_import from collections import OrderedDict from fpylll import IntegerMatrix, BKZ from fpylll import FPLLL from fpylll.tools.bkz_stats import BKZTreeTracer, dummy_tracer, pretty_dict from fpylll.tools.quality import basis_quality from fpylll.util import ReductionError from multiprocessing import Pool import logging import copy import time import pickle import fpylll.algorithms.bkz import fpylll.algorithms.bkz2 # Utility Functions def play(BKZ, A, block_size, tours, progressive_step_size=None): """Call ``BKZ`` on ``A`` with ``block_size`` for the given number of ``tours``. The given number of tours is used for all block sizes from 2 up to ``block_size`` in increments of ``progressive_step_size``. Providing ``None`` for this parameter disables the progressive strategy. :param BKZ: a BKZ class whose ``__call__`` accepts a single block size as parameter :param A: an integer matrix :param block_size: a block size >= 2 :param tours: number of tours >= 1 :param progressive_step_size: step size for progressive strategy :returns: a trace of the execution using ``BKZTreeTracer`` .. note :: This function essentially reimplements ``BKZ.__call__`` but supports the progressive strategy. """ bkz = BKZ(copy.copy(A)) tracer = BKZTreeTracer(bkz, start_clocks=True) # this essentially initialises the GSO object, LLL was already run by the constructor, so this # is quick. with tracer.context("lll"): bkz.lll_obj() if progressive_step_size is None: block_sizes = (block_size,) elif int(progressive_step_size) > 0: block_sizes = range(2, block_size+1, progressive_step_size) if block_sizes[-1] != block_size: block_sizes.append(block_size) else: raise ValueError("Progressive step size of %s not understood."%progressive_step_size) for block_size in block_sizes: for i in range(tours): with tracer.context("tour", (block_size, i)): bkz.tour(block_size, tracer=tracer) tracer.exit() trace = tracer.trace quality = basis_quality(bkz.M) for k, v in quality.items(): trace.data[k] = v return trace class Conductor(object): """ A conductor is our main class for launching block-wise lattice reductions and collecting the outputs. """ def __init__(self, threads=1, pickle_jar=None, logger="."): """Create a new conductor object. :param threads: number of threads :param pickle_jar: dump traces to this file continuously """ self.pool = Pool(processes=threads) self.threads = threads self.pickle_jar = pickle_jar self.logger = logging.getLogger(logger) self.outputs = OrderedDict() self._major_strlen = 0 self._minor_strlen = 0 def _majorminor_format_str(self): "Used to align log files" return "%%%ds(%%%ds) :: %%s"%(self._major_strlen, self._minor_strlen) def _update_strlens(self, major, minor): "Update string lengths of major/minor tags" self._major_strlen = max(len(str(major)), self._major_strlen) self._minor_strlen = max(len(str(minor)), self._minor_strlen) @staticmethod def dump(data, filename): "Pickle ``data`` to ``filename``" pickle.dump(data, open(filename, "wb")) def wait_on(self, outputs, todo, sleep=1): """Wait for jobs in ``todo`` to return and store results in ``outputs``. :param outputs: store results here :param todo: these are running jobs :param sleep: seconds to sleep before checking if new results are availabl. """ fmtstr = self._majorminor_format_str() while todo: collect = [(tag, res) for (tag, res) in todo if res.ready()] for tag, res in collect: major, minor = tag try: res = res.get() if major not in outputs: outputs[major] = [] outputs[major].append((minor, res)) self.logger.debug(fmtstr%(major, minor, pretty_dict(res.data))) if self.pickle_jar is not None: Conductor.dump(self.outputs, self.pickle_jar) except ReductionError: self.logger.debug("ReductionError for %s(%s)."%(major, minor)) todo = todo.difference(collect) time.sleep(sleep) return outputs def log_averages(self, tags, outputs): """ Log average values for all entries tagged as ``tags`` in ``outputs``. """ fmtstr = self._majorminor_format_str() avg = OrderedDict() for major, minor in tags: if major in avg: continue avg[major] = OrderedDict() n = len(outputs[major]) for minor, output in outputs[major]: for k, v in output.data.items(): avg[major][k] = avg[major].get(k, 0.0) + float(v)/n self.logger.info(fmtstr%(major, "avg", pretty_dict(avg[major]))) def __call__(self, jobs, current=None): """ Call ``jobs`` in parallel. The parameter jobs is a list with the following format. Each entry is one of the following: - a tuple ``((major, minor), (BKZ, A, block_size, tours, progressive_step_size))``, where ``major`` and ``minor`` are arbitrary hashable tags and the rest are valid inputs to ``play``. - A list with elements of the same format as above. Entries at the same level are considered to be a group. All jobs in the same group go into the same execution pool. At the end of the execution of a group the average across all ``minor`` tags of a ``major`` tag are shown. .. note :: Recursive jobs, i.e. those in a sub-list are run first, this is an implementation artefact. Typically, we don't expect jobs and lists of jobs to be mixed at the same level, though, this is supported. """ inputs = OrderedDict() if current is None: current = self.outputs # filter out sub-jobs that should be grouped and call recursively for tag, job in jobs: if isinstance(job[0], (list, tuple)): self.logger.info("") self.logger.info("# %s (size: %d) #"%(tag, len(job))) current[tag] = OrderedDict() self(job, current=current[tag]) else: major, minor = tag self._update_strlens(major, minor) if major not in current: current[major] = list() inputs[tag] = job self.logger.debug("") # base case if self.threads > 1: todo = set() for tag in inputs: todo.add((tag, self.pool.apply_async(play, inputs[tag]))) current = self.wait_on(current, todo) else: fmtstr = self._majorminor_format_str() for tag in inputs: major, minor = tag try: res = play(*inputs[tag]) current[major].append((minor, res)) self.logger.debug(fmtstr%(major, minor, pretty_dict(res.data))) if self.pickle_jar is not None: Conductor.dump(self.outputs, self.pickle_jar) except ReductionError: self.logger.debug("ReductionError for %s(%s)."%(major, minor)) self.logger.debug("") # print averages per major tag self.log_averages(inputs.keys(), current) if self.pickle_jar is not None: Conductor.dump(self.outputs, self.pickle_jar) return self.outputs def compare_bkz(classes, matrixf, dimensions, block_sizes, progressive_step_size, seed, threads=2, samples=2, tours=1, pickle_jar=None, logger="compare"): """ Compare BKZ-style lattice reduction. :param classes: a list of BKZ classes to test. See caveat above. :param matrixf: A function to create matrices for a given dimension and block size :param dimensions: a list of dimensions to test :param block_sizes: a list of block sizes to test :param progressive_step_size: step size for the progressive strategy; ``None`` to disable it :param seed: A random seed, each matrix will be created with seed increased by one :param threads: number of threads to use :param samples: number of reductions to perform :param tours: number of BKZ tours to run :param log_filename: log to this file if not ``None`` """ jobs = [] for dimension in dimensions: jobs.append((dimension, [])) for block_size in block_sizes: if dimension < block_size: continue seed_ = seed jobs_ = [] matrixf_ = matrixf(dimension=dimension, block_size=block_size) for i in range(samples): FPLLL.set_random_seed(seed_) A = IntegerMatrix.random(dimension, **matrixf_) for BKZ_ in classes: args = (BKZ_, A, block_size, tours, progressive_step_size) jobs_.append(((BKZ_.__name__, seed_), args)) seed_ += 1 jobs[-1][1].append((block_size, jobs_)) conductor = Conductor(threads=threads, pickle_jar=pickle_jar, logger=logger) return conductor(jobs) # Example class BKZGlue(object): "Base class for producing new BKZ classes with some parameters fixed." def tour(self, params, min_row=0, max_row=-1, tracer=dummy_tracer): if isinstance(params, int): params = BKZ.Param(block_size=params, **self.kwds) return self.base.tour(self, params, min_row=min_row, max_row=max_row, tracer=tracer) def BKZFactory(name, BKZBase, **kwds): """ Return a new BKZ class, derived from ``BKZBase`` with given ``name``. The resulting class accepts a single ``block_size`` parameter for ``tour`` and substitutes it with a ``BKZ.Param``` object where the keyword parameters provided to this function are fixed the values provided to this function :: >>> from fpylll.algorithms.bkz2 import BKZReduction as BKZ2 >>> from fpylll import BKZ >>> from fpylll.tools.compare import BKZFactory >>> BKZ2_LOW = BKZFactory('BKZ2_LOW', BKZ2, strategies=BKZ.DEFAULT_STRATEGY, min_success_probability=0.1) :param name: name for output class :param BKZBase: base class to base this class on """ NEW_BKZ = type(name, (BKZGlue, BKZBase), {"kwds": kwds, "base": BKZBase}) globals()[name] = NEW_BKZ # this is a HACK to enable pickling return NEW_BKZ BKZ1 = BKZFactory("BKZ1", fpylll.algorithms.bkz.BKZReduction) BKZ2 = BKZFactory("BKZ2", fpylll.algorithms.bkz2.BKZReduction, strategies=BKZ.DEFAULT_STRATEGY) # Main def qary30(dimension, block_size): return {"algorithm": "qary", "k": dimension//2, "bits": 30, "int_type": "long"} def setup_logging(name, verbose=False): import subprocess import datetime hostname = str(subprocess.check_output("hostname").rstrip()) now = datetime.datetime.today().strftime("%Y-%m-%d-%H:%M") log_name = "{name}-{hostname}-{now}".format(name=name, hostname=hostname, now=now) logging.basicConfig(level=logging.DEBUG, format='%(levelname)5s:%(name)s:%(asctime)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S %Z', filename=log_name + ".log") # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.INFO if not verbose else logging.DEBUG) console.setFormatter(logging.Formatter('%(name)s: %(message)s',)) logging.getLogger(name).addHandler(console) return log_name def names_to_classes(class_names, filenames): """ Try to find a class for each name in ``class_names``. Classes implemented in one of the ``filenames`` are also considered. The following mapping logic is used: - if a class with ``name`` exists, it is used - if ``name`` is ``BKZ_FOO`` and a class called ``BKZReduction`` is implemented in a file ``bkz_foo`` it is used. :param class_names: :param filenames: """ import imp import os import re classes = class_names classes = [globals().get(clas, clas) for clas in classes] for i, fn in enumerate(filenames): tmp = imp.load_source("compare_module%03d"%i, fn) # find the class by name in the module classes = [tmp.__dict__.get(clas, clas) for clas in classes] # check if there's some BKZReduction implemented in bkz_foo.py, we match this with BKZ_FOO if fn.startswith(os.path.basename(fn)) and "BKZReduction" in tmp.__dict__: candidate = re.sub("bkz(.*)\\.py", "BKZ\\1", os.path.basename(fn)).upper() if candidate in classes: tmp.BKZReduction.__name__ = candidate classes[classes.index(candidate)] = tmp.BKZReduction for clas in classes: if isinstance(clas, str): raise ValueError("Cannot find '%s'"%clas) return classes if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Measure Pressure') parser.add_argument('-c', '--classes', help='BKZ classes', type=str, nargs='+', default=["BKZ2"]) parser.add_argument('-f', '--files', help='additional files to load for BKZ classes', type=str, nargs='+', default=list()) parser.add_argument('-t', '--threads', help='number of threads to use', type=int, default=1) parser.add_argument('-r', '--tours', help='number of BKZ tours', type=int, default=1) parser.add_argument('-s', '--samples', help='number of samples to try', type=int, default=4) parser.add_argument('-z', '--seed', help="random seed", type=int, default=0x1337) parser.add_argument('-b', '--block-sizes', help='block sizes', type=int, nargs='+', default=(10, 20, 30, 40)) parser.add_argument('-p', '--progressive-step-size', help='step size for progressive strategy, None for disabled', default=None, type=int) parser.add_argument('-d', '--dimensions', help='lattice dimensions', type=int, nargs='+', default=(60, 80, 100, 120)) parser.add_argument('-v', '--verbose', help='print more details by default', action='store_true') args = parser.parse_args() name = "compare" classes = names_to_classes(args.classes, args.files) log_filename = setup_logging(name, args.verbose) for k, v in sorted(vars(args).items()): logging.getLogger(name).debug("%s: %s"%(k, v)) results = compare_bkz(classes=classes, matrixf=qary30, block_sizes=args.block_sizes, progressive_step_size=args.progressive_step_size, dimensions=args.dimensions, logger=name, pickle_jar=log_filename + ".sobj", seed=args.seed, threads=args.threads, samples=args.samples, tours=args.tours) fpylll-0.6.1/src/fpylll/tools/quality.py000066400000000000000000000063131455321202600203160ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ """ from math import log, exp from collections import OrderedDict from fpylll.util import gaussian_heuristic def get_current_slope(r, start_row=0, stop_row=-1): """ A Python re-implementation of ``MatGSO.get_current_slope``. >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(100, "qary", bits=30, k=50) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A); _ = M.update_gso() >>> from fpylll.tools.quality import get_current_slope >>> M.get_current_slope(0, 100) # doctest: +ELLIPSIS -0.085500625... >>> get_current_slope(M.r(), 0, 100) # doctest: +ELLIPSIS -0.085500625... """ x = [log(r[i]) for i in range(start_row, stop_row)] n = stop_row - start_row i_mean = (n - 1) * 0.5 + start_row x_mean = sum(x)/n v1, v2 = 0.0, 0.0 for i in range(start_row, stop_row): v1 += (i - i_mean) * (x[i] - x_mean) v2 += (i - i_mean) * (i - i_mean) return v1 / v2 def basis_quality(M): r""" Return a dictionary with various expressions of quality of the basis corresponding to ``M``. Let `|b_i^*|` be the norm of the `i`-th Gram-Schmidt vector. Let `Λ` be the lattice spanned by the basis of dimension `d`. - ``r_0`` - `|b_0|^2` - ``\`` - the slope of `\log(|b_i^*|)` - ``rhf`` - the root-Hermite factor `|b_0|/\Vol(Λ)^{1/d}` also written as `\delta_0` - ``hv/hv`` - the dth-root of the fraction of the first and second half-volumes, i.e. the dth-root of `∏_{i=0}^{d/2-1} |b_i|/∏_{i=d/2}^{d-1} |b_i|`. If `d` is odd, the length `|b_{d//2}|` is ignored. - ``r_0/gh`` - `|b_0|/GH` where `GH = Γ(d/2+1)^{1/d}/π^{1/2} ⋅ \Vol(Λ)^{1/d}` is the Gaussian Heuristic for the shortest vector. :param M: A MatGSO object or a vector of squared Gram-Schmidt norms. Example: >>> from fpylll import IntegerMatrix, GSO, LLL, FPLLL >>> FPLLL.set_random_seed(1337) >>> A = IntegerMatrix.random(100, "qary", bits=30, k=50) >>> _ = LLL.reduction(A) >>> M = GSO.Mat(A); _ = M.update_gso() >>> from fpylll.tools.quality import basis_quality >>> from fpylll.tools.bkz_stats import pretty_dict >>> str(pretty_dict(basis_quality(M))) '{"r_0": 2^34.0, "r_0/gh": 9.389811, "rhf": 1.020530, "/": -0.08550, "hv/hv": 2.940943}' >>> str(pretty_dict(basis_quality(M.r()))) '{"r_0": 2^34.0, "r_0/gh": 9.389811, "rhf": 1.020530, "/": -0.08550, "hv/hv": 2.940943}' """ try: d = M.d r = [M.get_r(i, i) for i in range(d)] except AttributeError: d = len(M) r = M ret = OrderedDict() log_volume = sum(log(r_)/2 for r_ in r) lhs = sum(log(r_)/2 for r_ in r[:d//2]) rhs = sum(log(r_)/2 for r_ in r[d//2 + (d%2):]) ret["r_0"] = r[0] ret["r_0/gh"] = r[0]/gaussian_heuristic(r) ret["rhf"] = exp((log(r[0])/2.0 - log_volume/d)/d) try: ret['/'] = M.get_current_slope(0, d) except AttributeError: ret["/"] = get_current_slope(M, 0, d) ret["hv/hv"] = exp((lhs - rhs)/d) return ret fpylll-0.6.1/src/fpylll/util.pxd000066400000000000000000000010211455321202600165750ustar00rootroot00000000000000from .gmp.mpz cimport mpz_t from .fplll.fplll cimport FloatType, Z_NR, PrunerMetric, IntType from .fplll.fplll cimport BKZParam as BKZParam_c from .fplll.fplll cimport PrunerMetric cdef object check_float_type(object float_type) cdef object check_int_type(object int_type) cdef int preprocess_indices(int &i, int &j, int m, int n) except -1 cdef int check_precision(int precision) except -1 cdef int check_eta(float eta) except -1 cdef int check_delta(float delta) except -1 cdef PrunerMetric check_pruner_metric(object metric) fpylll-0.6.1/src/fpylll/util.pyx000066400000000000000000000273441455321202600166420ustar00rootroot00000000000000# -*- coding: utf-8 -*- include "fpylll/config.pxi" from contextlib import contextmanager from fpylll.fplll.fplll cimport FP_NR, RandGen, dpe_t from fpylll.fplll.fplll cimport FT_DEFAULT, FT_DOUBLE, FT_LONG_DOUBLE, FT_DPE, FT_MPFR from fpylll.fplll.fplll cimport IntType, ZT_LONG, ZT_MPZ from fpylll.fplll.fplll cimport adjust_radius_to_gh_bound as adjust_radius_to_gh_bound_c from fpylll.fplll.fplll cimport set_external_enumerator as set_external_enumerator_c from fpylll.fplll.fplll cimport get_external_enumerator as get_external_enumerator_c from fpylll.fplll.fplll cimport extenum_fc_enumerate from fpylll.fplll.fplll cimport get_root_det as get_root_det_c from fpylll.fplll.fplll cimport PRUNER_METRIC_PROBABILITY_OF_SHORTEST, PRUNER_METRIC_EXPECTED_SOLUTIONS, PrunerMetric from fpylll.fplll.fplll cimport get_threads as get_threads_c, set_threads as set_threads_c from fpylll.fplll.gso cimport MatGSO from fpylll.gmp.random cimport gmp_randstate_t, gmp_randseed_ui, gmp_urandomm_ui from fpylll.mpfr.mpfr cimport mpfr_t from math import log, exp, lgamma, pi from math import sqrt as sqrtf from libcpp.functional cimport function IF HAVE_QD: from fpylll.qd.qd cimport dd_real, qd_real from fpylll.fplll.fplll cimport FT_DD, FT_QD cdef extern from "util_helper.h": function[extenum_fc_enumerate] void_ptr_to_function(void *ptr) float_aliases = {'d': 'double', 'ld': 'long double'} # We return `object` to permit exceptions cdef object check_float_type(object float_type): float_type = float_aliases.get(float_type, float_type) if float_type == "default" or float_type is None: return FT_DEFAULT if float_type == "double": return FT_DOUBLE if float_type == "long double": return FT_LONG_DOUBLE if float_type == "dpe": return FT_DPE IF HAVE_QD: if float_type == "dd": return FT_DD if float_type == "qd": return FT_QD if float_type == "mpfr": return FT_MPFR raise ValueError("Float type '%s' unknown." % float_type) cdef object check_int_type(object int_type): if int_type == "default" or int_type is None: return ZT_MPZ if int_type == "mpz": return ZT_MPZ if int_type == "long": return ZT_LONG raise ValueError("Integer type '%s' unknown." % int_type) cdef PrunerMetric check_pruner_metric(object metric): if metric == "probability" or metric == PRUNER_METRIC_PROBABILITY_OF_SHORTEST: return PRUNER_METRIC_PROBABILITY_OF_SHORTEST elif metric == "solutions" or metric == PRUNER_METRIC_EXPECTED_SOLUTIONS: return PRUNER_METRIC_EXPECTED_SOLUTIONS else: raise ValueError("Pruner metric '%s' not supported."%metric) cdef int preprocess_indices(int &i, int &j, int m, int n) except -1: if i < 0: (&i)[0] %= m if j < 0: (&j)[0] %= n if i >= m: raise IndexError("First index must be < %d but got %d."%(n, i)) if j >= n: raise IndexError("Second index must be < %d but got %d."%(n, j)) return 0 cdef int check_precision(int precision) except -1: """ Check whether the provided precision is within valid bounds. If not raise a ``TypeError``. :param precision: an integer """ if precision < 53 and precision != 0: raise TypeError("precision must be >= 53 or equal to 0") cdef int check_eta(float eta) except -1: """ Check whether the provided parameter ``eta`` is within valid bounds. If not raise a ``TypeError``. :param eta: a floating point number """ if eta < 0.5: raise TypeError("eta must be >= 0.5") cdef int check_delta(float delta) except -1: """ Check whether the provided parameter ``delta`` is within valid bounds. If not raise a ``TypeError``. :param delta: a floating point number """ if delta <= 0.25: raise TypeError("delta must be > 0.25") elif delta > 1.0: raise TypeError("delta must be <= 1.0") def set_random_seed(unsigned long seed): """Set random seed. :param seed: a new seed. """ if not RandGen.get_initialized(): RandGen.init() cdef gmp_randstate_t state = RandGen.get_gmp_state() gmp_randseed_ui(state, seed) def randint(a, b): """ Return random integer in range [a, b], including both end points. """ if not RandGen.get_initialized(): RandGen.init() cdef gmp_randstate_t state = RandGen.get_gmp_state() return (gmp_urandomm_ui(state, b+1-a)) + a def get_precision(float_type="mpfr"): """Get currently set precision :param float_type: one of 'double', 'long double', 'dpe', 'dd', 'qd' or 'mpfr' :returns: precision in bits This function returns the precision per type:: >>> import fpylll >>> from fpylll import FPLLL >>> FPLLL.get_precision('double') 53 >>> if fpylll.config.have_long_double: ... FPLLL.get_precision('long double') > 53 ... else: ... True True >>> FPLLL.get_precision('dpe') 53 For the MPFR type different precisions are supported:: >>> _ = FPLLL.set_precision(212) >>> FPLLL.get_precision('mpfr') 212 >>> FPLLL.get_precision() 212 >>> _ = FPLLL.set_precision(53) """ cdef FloatType float_type_ = check_float_type(float_type) if float_type_ == FT_DOUBLE: return FP_NR[double].get_prec() IF HAVE_LONG_DOUBLE: if float_type_ == FT_LONG_DOUBLE: return FP_NR[longdouble].get_prec() if float_type_ == FT_DPE: return FP_NR[dpe_t].get_prec() IF HAVE_QD: if float_type_ == FT_DD: return FP_NR[dd_real].get_prec() if float_type_ == FT_QD: return FP_NR[qd_real].get_prec() if float_type_ == FT_MPFR: return FP_NR[mpfr_t].get_prec() raise ValueError("Floating point type '%s' unknown."%float_type) def set_precision(unsigned int prec): """Set precision globally for MPFR :param prec: an integer >= 53 :returns: current precision """ if prec == 0: prec = 53 if prec < 53: raise ValueError("Precision (%d) too small."%prec) return FP_NR[mpfr_t].set_prec(prec) @contextmanager def precision(prec): """Run with precision ``prec`` temporarily. :param prec: temporary precision :returns: temporary precision being used >>> from fpylll import FPLLL >>> with FPLLL.precision(212) as prec: print(prec) 212 >>> FPLLL.get_precision() 53 >>> with FPLLL.precision(212): FPLLL.get_precision() 212 """ old_prec = set_precision(prec) try: yield get_precision() finally: set_precision(old_prec) def adjust_radius_to_gh_bound(double dist, int dist_expo, int block_size, double root_det, double gh_factor): """ Use Gaussian Heuristic to reduce bound on the length of the shortest vector. :param double dist: norm of shortest vector :param int dist_expo: exponent of norm (for dpe representation) :param int block_size: block size :param double root_det: root determinant :param double gh_factor: factor to multiply with :returns: (dist, expo) """ cdef FP_NR[double] gh_dist = dist cdef FP_NR[double] root_det_ = root_det adjust_radius_to_gh_bound_c[FP_NR[double]](gh_dist, dist_expo, block_size, root_det_, gh_factor) return gh_dist.get_d(), dist_expo class ReductionError(RuntimeError): pass def ball_log_vol(n): """ Return volume of `n`-dimensional unit ball :param n: dimension """ return (n/2.) * log(pi) - lgamma(n/2. + 1) def gaussian_heuristic(r): """ Return squared norm of shortest vector as predicted by the Gaussian heuristic. :param r: vector of squared Gram-Schmidt norms """ n = len(list(r)) log_vol = sum([log(x) for x in r]) log_gh = 1./n * (log_vol - 2 * ball_log_vol(n)) return exp(log_gh) def vector_norm(x, y=None, sqrt=False): """ Return the squared Euclidean norm of `x` :param x: a vector-like object :param y: if not ``None`` compute norm of `x-y` :param sqrt: if ``False`` compute squared norm :returns: (squared) Euclidean norm of `x-y` .. note :: We consider the minimum dimension of `x` and `y`. """ d = 0 if y is None: y = (0,)*len(x) for i in range(min(len(x), len(y))): d += (x[i]-y[i])**2 if sqrt: d = sqrtf(d) return d cpdef set_external_enumerator(enumerator): """ Set an external enumeration library. For example, assume you compiled a `fplll-extenum `_ First, we load the required Python modules: fpylll and `ctypes `_ >>> from fpylll import * # doctest: +SKIP >>> import ctypes # doctest: +SKIP Then, using ``ctypes`` we dlopen ``enumlib.so`` >>> enumlib = ctypes.cdll.LoadLibrary("enumlib.so") # doctest: +SKIP For demonstration purposes we increase the loglevel. Note that functions names are result of C++ compiler name mangling and may differ depending on platform/compiler/linker. >>> enumlib._Z20enumlib_set_logleveli(1) # doctest: +SKIP We grab the external enumeration function >>> fn = enumlib._Z17enumlib_enumerateidSt8functionIFvPdmbS0_S0_EES_IFddS0_EES_IFvdS0_iEEbb # doctest: +SKIP and pass it to FPLLL >>> FPLLL.set_external_enumerator(fn) # doctest: +SKIP To disable the external enumeration library, call >>> FPLLL.set_external_enumerator(None) # doctest: +SKIP :param enumerator: CTypes handle """ import ctypes cdef unsigned long p if not enumerator: set_external_enumerator_c(NULL) elif isinstance(enumerator, ctypes._CFuncPtr): p = ctypes.cast(enumerator, ctypes.c_void_p).value set_external_enumerator_c(void_ptr_to_function(p)) @contextmanager def external_enumerator(enumerator): """ Temporarily use ``enumerator``. :param enumerator: CTypes handle """ cdef function[extenum_fc_enumerate] fn = get_external_enumerator_c() set_external_enumerator(enumerator) try: yield finally: set_external_enumerator_c(fn) def set_threads(int th=1): """ Set the number of threads. :param th: number of threads This is currently only used for enumeration. .. note: If you use ``multiprocessing`` etc you must call this function after forking to have an effect. This prevents the threadpool from being shared. """ return set_threads_c(th) def get_threads(): """ Get the number of threads. .. note: Currently only used for enumeration. """ return get_threads_c() @contextmanager def threads(int th=1): """ Run with ``th`` threads temporarily :param th: number of threads ≥ 1 :returns: number of threads used >>> from fpylll import FPLLL >>> import multiprocessing >>> max_th = multiprocessing.cpu_count() >>> with FPLLL.threads(4) as th: th == min(max_th, 4) True >>> FPLLL.get_threads() 1 >>> with FPLLL.threads(4) as th: FPLLL.get_threads() == min(max_th, 4) True """ old_th = get_threads() set_threads(th) try: yield get_threads() finally: set_threads(old_th) class FPLLL: set_precision = staticmethod(set_precision) get_precision = staticmethod(get_precision) precision = staticmethod(precision) set_threads = staticmethod(set_threads) get_threads = staticmethod(get_threads) threads = staticmethod(threads) set_random_seed = staticmethod(set_random_seed) randint = staticmethod(randint) set_external_enumerator = staticmethod(set_external_enumerator) external_enumerator = staticmethod(external_enumerator) fpylll-0.6.1/src/fpylll/util_helper.h000066400000000000000000000002731455321202600176000ustar00rootroot00000000000000#pragma once #include inline std::function void_ptr_to_function(void *ptr) { return reinterpret_cast(ptr); } fpylll-0.6.1/suggestions.txt000066400000000000000000000000451455321202600161320ustar00rootroot00000000000000ipython numpy Sphinx>=1.6 matplotlib fpylll-0.6.1/tests/000077500000000000000000000000001455321202600141625ustar00rootroot00000000000000fpylll-0.6.1/tests/test_bkz.py000066400000000000000000000044561455321202600163720ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import GSO, IntegerMatrix, BKZ, LLL from fpylll.config import float_types from copy import copy import tools dimensions = ((0, 0), (1, 1), (2, 2), (3, 3), (10, 10), (50, 50), (60, 60),) def make_integer_matrix(m, n, int_type="mpz"): A = IntegerMatrix(m, n, int_type=int_type) A.randomize("uniform", bits=20) return A def test_bkz_init(): for m, n in dimensions: A = make_integer_matrix(m, n) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) lll_obj = LLL.Reduction(M) param = BKZ.Param(block_size=3, strategies=BKZ.DEFAULT_STRATEGY) bkz = BKZ.Reduction(M, lll_obj, param) del bkz def test_bkz_bkz(): for m, n in dimensions: if m < 2 or n < 2: continue A = make_integer_matrix(m, n) b00 = [] for float_type in float_types: B = copy(A) M = GSO.Mat(B, float_type=float_type) lll_obj = LLL.Reduction(M) param = BKZ.Param(block_size=min(m, 40), strategies=BKZ.DEFAULT_STRATEGY) bkz = BKZ.Reduction(M, lll_obj, param) bkz() b00.append(B[0, 0]) for i in range(1, len(b00)): assert b00[0] == b00[i] def test_bkz_gram_bkz_coherence(): """ Test if BKZ is coherent if it is given a matrix A or its associated Gram matrix A*A^T We should have Gram(BKZ_basis(A)) = BKZ_Gram(Gram(A)). """ for m, n in dimensions: if m < 2 or n < 2: continue for float_type in float_types: A = make_integer_matrix(m, n) G = tools.compute_gram(A) GSO_A = GSO.Mat(A, float_type=float_type) GSO_G = GSO.Mat(G, float_type=float_type, gram=True) lll_obj_a = LLL.Reduction(GSO_A) lll_obj_g = LLL.Reduction(GSO_G) param = BKZ.Param(block_size=min(m, 40), strategies=BKZ.DEFAULT_STRATEGY) bkz_a = BKZ.Reduction(GSO_A, lll_obj_a, param) bkz_g = BKZ.Reduction(GSO_G, lll_obj_g, param) bkz_a() bkz_g() G_updated = tools.compute_gram(A) for i in range(m): for j in range(i + 1): assert G_updated[i, j] == G[i, j] fpylll-0.6.1/tests/test_bkz_python.py000066400000000000000000000036671455321202600177760ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Test if Python BKZ classes can be instantiated and run. """ from copy import copy from fpylll import IntegerMatrix, LLL from fpylll.algorithms.simple_bkz import BKZReduction as SimpleBKZ from fpylll.algorithms.simple_dbkz import DBKZReduction as SimpleDualBKZ from fpylll.algorithms.bkz import BKZReduction as BKZ from fpylll.algorithms.bkz2 import BKZReduction as BKZ2 from fpylll.tools.bkz_stats import BKZTreeTracer from fpylll import BKZ as fplll_bkz from fpylll import FPLLL dimensions = (31, 37) def make_integer_matrix(n): A = IntegerMatrix.random(n, "ntrulike", bits=30) return A def test_bkz_init(): for cls in (SimpleBKZ, SimpleDualBKZ, BKZ, BKZ2): for n in dimensions: FPLLL.set_random_seed(2**10 + n) A = make_integer_matrix(n) B = cls(copy(A)) del B def test_simple_bkz_call(block_size=10): for cls in (SimpleBKZ, SimpleDualBKZ): for n in dimensions: FPLLL.set_random_seed(n) A = make_integer_matrix(n) cls(A)(block_size=block_size) def test_bkz_call(block_size=10): params = fplll_bkz.Param(block_size=block_size, flags=fplll_bkz.VERBOSE|fplll_bkz.GH_BND) for cls in (BKZ, BKZ2): for n in dimensions: FPLLL.set_random_seed(n) A = make_integer_matrix(n) B = copy(A) cls(B)(params=params) def test_bkz_postprocessing(): A = IntegerMatrix.random(20, "qary", bits=20, k=10, int_type="long") LLL.reduction(A) bkz = BKZ(A) bkz.M.update_gso() tracer = BKZTreeTracer(bkz) solution = (2, 2, 0, 3, 4, 5, 7) v = A.multiply_left(solution, 3) bkz.svp_postprocessing(3, len(solution), solution, tracer) w = tuple(A[3]) assert v == w solution = (2, 1, 0, 3, 4, 5, 7) v = A.multiply_left(solution, 3) bkz.svp_postprocessing(3, len(solution), solution, tracer) w = tuple(A[3]) assert v == w fpylll-0.6.1/tests/test_callback_enum.py000066400000000000000000000014731455321202600203600ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import FPLLL, IntegerMatrix, LLL, GSO, Enumeration def test_callback_enum(d=40): FPLLL.set_random_seed(0x1337) A = LLL.reduction(IntegerMatrix.random(100, "qary", k=50, q=7681)) M = GSO.Mat(A) M.update_gso() # we are not imposing a constraint enum_obj = Enumeration(M) solutions = enum_obj.enumerate(0, d, 0.99 * M.get_r(0, 0), 0) max_dist, sol = solutions[0] assert A.multiply_left(sol)[0] != 2 # now we do def callback(new_sol_coord): if A.multiply_left(new_sol_coord)[0] == 2: return True else: return False enum_obj = Enumeration(M, callbackf=callback) solutions = enum_obj.enumerate(0, d, 0.99 * M.get_r(0, 0), 0) max_dist, sol = solutions[0] assert A.multiply_left(sol)[0] == 2 fpylll-0.6.1/tests/test_cvp.py000066400000000000000000000021041455321202600163600ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import GSO, IntegerMatrix, LLL, CVP, Enumeration import pytest dimensions = ((3, 3), (10, 10), (20, 20), (40, 40),) def make_integer_matrix(m, n): A = IntegerMatrix(m, n) A.randomize("uniform", bits=10) return A def test_cvp_cvp(): for m, n in dimensions: A = make_integer_matrix(m, n) A = LLL.reduction(A) M = GSO.Mat(A) M.update_gso() t = list(make_integer_matrix(n, n)[0]) v0 = CVP.closest_vector(A, t) E = Enumeration(M) _, v1 = E.enumerate(0, A.nrows, 2, 40, M.from_canonical(t))[0] v1 = IntegerMatrix.from_iterable(1, A.nrows, map(lambda x: int(round(x)), v1)) v1 = tuple((v1*A)[0]) assert v0 == v1 def test_cvp_too_large(): from fpylll.config import max_enum_dim m = max_enum_dim + 1 n = max_enum_dim + 1 A = make_integer_matrix(m, n) A = LLL.reduction(A) M = GSO.Mat(A) M.update_gso() t = list(make_integer_matrix(n, n)[0]) with pytest.raises(NotImplementedError): CVP.closest_vector(A, t) fpylll-0.6.1/tests/test_enum.py000066400000000000000000000041651455321202600165450ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import IntegerMatrix, GSO, LLL, Enumeration from fpylll.config import float_types, int_types from copy import copy import tools def make_integer_matrix(m, n, int_type="mpz"): A = IntegerMatrix(m, n, int_type=int_type) A.randomize("qary", k=m // 2, bits=m) return A def test_enum_init(): for int_type in int_types: A = make_integer_matrix(20, 20, int_type=int_type) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) enum_obj = Enumeration(M) del enum_obj def test_enum_enum(): for int_type in int_types: A = make_integer_matrix(20, 20, int_type=int_type) LLL.reduction(A) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) M.update_gso() enum_obj = Enumeration(M) enum_obj.enumerate(0, M.d, M.get_r(0, 0), 0) def test_enum_gram_coherence(): """ Test if the enumeration algorithm is consistent with the Gram matrices The vectors returned by the enumeration should be the same wether a lattice is given by its basis or by its Gram matrix """ dimensions = ((3, 3), (10, 10), (20, 20), (25, 25)) for m, n in dimensions: for int_type in int_types: A = make_integer_matrix(m, n, int_type=int_type) LLL.reduction(A) G = tools.compute_gram(A) for float_type in float_types: M_A = GSO.Mat(copy(A), float_type=float_type, gram=False) M_G = GSO.Mat(copy(G), float_type=float_type, gram=True) M_A.update_gso() M_G.update_gso() enum_obj_a = Enumeration(M_A, nr_solutions=min(m, 5)) shortest_vectors_a = enum_obj_a.enumerate(0, M_A.d, M_A.get_r(0, 0), 0) enum_obj_g = Enumeration(M_G, nr_solutions=min(m, 5)) shortest_vectors_g = enum_obj_g.enumerate(0, M_G.d, M_G.get_r(0, 0), 0) for i in range(len(shortest_vectors_a)): assert shortest_vectors_a[i] == shortest_vectors_g[i] fpylll-0.6.1/tests/test_gso.py000066400000000000000000000140331455321202600163640ustar00rootroot00000000000000# -*- coding: utf-8 -*- import sys import pytest from fpylll import GSO, IntegerMatrix, LLL from fpylll.config import float_types, int_types from copy import copy import tools if sys.maxsize >= 2**62: dimensions = ((0, 0), (2, 2), (3, 3), (10, 10), (30, 30), (50, 50), (60, 60)) else: dimensions = ((0, 0), (2, 2), (3, 3), (10, 10), (30, 30)) def make_integer_matrix(m, n, int_type="mpz"): A = IntegerMatrix(m, n, int_type=int_type) A.randomize("qary", k=m//2, bits=m) return A def test_gso_init(): for int_type in int_types: for m, n in dimensions: A = make_integer_matrix(m, n, int_type=int_type) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) del M U = IntegerMatrix(m, m, int_type=int_type) M = GSO.Mat(copy(A), U=U, float_type=float_type) del M UinvT = IntegerMatrix(m, m, int_type=int_type) M = GSO.Mat(copy(A), U=U, UinvT=UinvT, float_type=float_type) del M def test_gso_d(): for int_type in int_types: for m, n in dimensions: A = make_integer_matrix(m, n, int_type=int_type) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) assert M.d == m def test_gso_int_gram_enabled(): for int_type in int_types: for m, n in dimensions: A = make_integer_matrix(m, n, int_type=int_type) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) assert M.int_gram_enabled is False assert M.transform_enabled is False M = GSO.Mat(copy(A), float_type=float_type, flags=GSO.INT_GRAM) assert M.int_gram_enabled is True assert M.transform_enabled is False if m and n: U = IntegerMatrix(m, m, int_type=int_type) M = GSO.Mat(copy(A), U=U, float_type=float_type) assert M.transform_enabled is True assert M.inverse_transform_enabled is False UinvT = IntegerMatrix(m, m, int_type=int_type) M = GSO.Mat(copy(A), U=U, UinvT=UinvT, float_type=float_type) assert M.transform_enabled is True assert M.inverse_transform_enabled is True def test_gso_update_gso(): EPSILON = 0.0001 for int_type in int_types: for m, n in dimensions: A = make_integer_matrix(m, n, int_type=int_type) LLL.reduction(A) r00 = [] re00 = [] g00 = [] for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) M.update_gso() if (m, n) == (0, 0): continue r00.append(M.get_r(0, 0)) re00.append(M.get_r_exp(0, 0)[0]) g00.append(M.get_gram(0, 0)) for i in range(1, len(r00)): assert r00[0] == pytest.approx(r00[i], rel=EPSILON) assert re00[0] == pytest.approx(re00[i], rel=EPSILON) assert g00[0] == pytest.approx(g00[i], rel=EPSILON) def test_gso_babai(): for int_type in int_types: for m, n in ((0, 0), (2, 2), (3, 3), (10, 10), (30, 30)): if m <= 2 or n <= 2: continue A = make_integer_matrix(m, n, int_type=int_type) v = list(A[0]) LLL.reduction(A) for float_type in float_types: M = GSO.Mat(copy(A), update=True, float_type=float_type) try: w = M.babai(v) v_ = IntegerMatrix.from_iterable(1, m, w) * A v_ = list(v_[0]) assert v == v_ except NotImplementedError: pass def test_gso_conversion(): for int_type in int_types: for m, n in ((0, 0), (2, 2), (3, 3), (10, 10), (30, 30)): if m <= 2 or n <= 2: continue A = make_integer_matrix(m, n, int_type=int_type) v = list(A[0]) LLL.reduction(A) for float_type in float_types: M = GSO.Mat(copy(A), update=True, float_type=float_type) try: w = M.from_canonical(v) v_ = [int(round(v__)) for v__ in M.to_canonical(w)] assert v == v_ except NotImplementedError: pass def test_gso_coherence_gram_matrix(): """ Test if the GSO is coherent if it is given a matrix A or its associated Gram matrix A*A^T """ EPSILON = 0.0001 for m, n in dimensions: for int_type in int_types: # long is not tested for high dimensions because of integer overflow if m > 20 and int_type == "long": continue A = make_integer_matrix(m, n, int_type=int_type).transpose() G = tools.compute_gram(A) for float_type in float_types: M_A = GSO.Mat(copy(A), float_type=float_type, gram=False, flags=GSO.INT_GRAM) M_A.update_gso() M_G = GSO.Mat(copy(G), float_type=float_type, gram=True, flags=GSO.INT_GRAM) M_G.update_gso() # Check that the gram matrix coincide for i in range(m): for j in range(i): assert M_A.get_int_gram(i, j) == G[i, j] # Check if computations coincide for i in range(m): M_A.get_r(i, i) == pytest.approx(M_G.get_r(i, j), rel=EPSILON) for j in range(i): assert ( M_A.get_r(i, j) == pytest.approx(M_G.get_r(i, j), rel=EPSILON) ) assert ( M_A.get_mu(i, j) == pytest.approx(M_G.get_mu(i, j), rel=EPSILON) ) fpylll-0.6.1/tests/test_lll.py000066400000000000000000000045301455321202600163600ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import GSO, IntegerMatrix, LLL from fpylll.config import float_types, int_types from copy import copy import sys import tools if sys.maxsize > 2 ** 32: dimensions = ((0, 0), (1, 1), (2, 2), (3, 3), (10, 10), (50, 50), (60, 60)) else: # https://github.com/fplll/fpylll/issues/112 dimensions = ((0, 0), (1, 1), (2, 2), (3, 3), (10, 10), (20, 20), (30, 30)) def make_integer_matrix(m, n, int_type="mpz"): A = IntegerMatrix(m, n, int_type=int_type) A.randomize("qary", bits=20, k=min(m, n) // 2) return A def test_lll_init(): for m, n in dimensions: A = make_integer_matrix(m, n) for float_type in float_types: M = GSO.Mat(copy(A), float_type=float_type) lll = LLL.Reduction(M) del lll def test_lll_lll(): for m, n in dimensions: A = make_integer_matrix(m, n) for int_type in int_types: AA = IntegerMatrix.from_matrix(A, int_type=int_type) b00 = [] for float_type in float_types: B = copy(AA) M = GSO.Mat(B, float_type=float_type) lll = LLL.Reduction(M) lll() if (m, n) == (0, 0): continue b00.append(B[0, 0]) for i in range(1, len(b00)): assert b00[0] == b00[i] def test_lll_gram_lll_coherence(): """ Test if LLL is coherent if it is given a matrix A or its associated Gram matrix A*A^T We should have Gram(LLL_basis(A)) = LLL_Gram(Gram(A)). """ for m, n in dimensions: for int_type in int_types: A = make_integer_matrix(m, n) G = tools.compute_gram(A) for float_type in float_types: M_A = GSO.Mat(A, float_type=float_type, gram=False) lll_A = LLL.Reduction(M_A) M_G = GSO.Mat(G, float_type=float_type, gram=True) lll_G = LLL.Reduction(M_G) # A modified in place lll_A() # G modified in place lll_G() G_updated = tools.compute_gram(A) if (m, n) == (0, 0): continue for i in range(m): for j in range(i + 1): assert G_updated[i, j] == G[i, j] fpylll-0.6.1/tests/test_multisol_enum.py000066400000000000000000000104761455321202600204770ustar00rootroot00000000000000from fpylll import IntegerMatrix, LLL, GSO from fpylll import Enumeration # # A lattice with exactly 126 shortest vectors (non zero) of square length 48 # (note, maybe one should find only half of them because of negation # symmetry elimination in enum) # # There are also 5286 non zero vectors of norm <= 80 (including the one of length 48) AA = [(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 0, 0, 0, 0, 2077664, 58758639, -60836308), (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 736607, 128854488, -129591099), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 1, 0, 1, 6270701, 303352731, -309623439), (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 2, 2956191, 103100471, -106056667), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 3, 1094338, 319823711, -320918056), (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 0, 2, 1572079, 338875225, -340447311), (0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 5155388, 360557118, -365712508), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 8162869, 193281732, -201444604), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 1, 1, 0, 1, 3613941, 258239968, -261853919), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 1, 0, 0, 2, 6891515, 336608680, -343500203), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 4813850, 386563464, -391377318), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0, 1, 0, 3, 480602, 175051670, -175532280), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0, 2, 4968059, 351667445, -356635511), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9086805, 322136220, -331223027), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 2, 0, 0, 0, 0, 3565908, 41178307, -44744220), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 357731, 257708976, -258066710), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 261619, 193281732, -193543354), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 7350140, 268766620, -276116764), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 3, 6887608, 193281732, -200169345), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 835472, 64427244, -65262719), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 4668024, 327081548, -331749576), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 3, 1536276, 386563464, -388099746), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 3675412, 193281732, -196957148), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 6760718, 64427244, -71187963), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4668024, 322136220, -326804248), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9203892, 128854488, -138058380), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 450990708, -450990708) ] n, m = 27, 28 def make_integer_matrix(): A = IntegerMatrix(n, m) for i in range(n): for j in range(m): A[i, j] = AA[i][j] return A def test_multisol(): A = make_integer_matrix() m = GSO.Mat(A) lll_obj = LLL.Reduction(m) lll_obj() solutions = [] solutions = Enumeration(m, nr_solutions=200).enumerate(0, 27, 48.5, 0) assert len(solutions)== 126 / 2 for _, sol in solutions: sol = IntegerMatrix.from_iterable(1, A.nrows, map(lambda x: int(round(x)), sol)) sol = tuple((sol*A)[0]) dist = sum([x**2 for x in sol]) assert dist==48 solutions = [] solutions = Enumeration(m, nr_solutions=126 / 2).enumerate(0, 27, 100., 0) assert len(solutions)== 126 / 2 for _, sol in solutions: sol = IntegerMatrix.from_iterable(1, A.nrows, map(lambda x: int(round(x)), sol)) sol = tuple((sol*A)[0]) dist = sum([x**2 for x in sol]) assert dist==48 fpylll-0.6.1/tests/test_numpy.py000066400000000000000000000020211455321202600167360ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import IntegerMatrix, GSO try: from fpylll.numpy import dump_mu, dump_r have_numpy = True except ImportError: have_numpy = False def test_dump_mu(nrows=10): A = IntegerMatrix(nrows, nrows) A.randomize("ntrulike", bits=10, q=1023) M = GSO.Mat(A) if not have_numpy: return M.update_gso() mu = dump_mu(M, 0, nrows) for i in range(nrows): for j in range(i): assert abs(M.get_mu(i, j) - mu[i, j]) < 0.001 def test_dump_r(nrows=10): A = IntegerMatrix(nrows, nrows) A.randomize("ntrulike", bits=10, q=1023) M = GSO.Mat(A) if not have_numpy: return M.update_gso() r = dump_r(M, 0, nrows) for i in range(nrows): assert abs(M.get_r(i, i) - r[i]) < 0.001 def test_is_numpy_integer(nrows=10): if not have_numpy: return import numpy as np B = np.eye(nrows, dtype=np.int32) Bfpy = IntegerMatrix.from_matrix(B) for i in range(nrows): assert Bfpy[i][i] == 1 fpylll-0.6.1/tests/test_precision.py000066400000000000000000000003421455321202600175650ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import FPLLL def test_precision(): FPLLL.set_precision(53) assert FPLLL.get_precision() == 53 assert FPLLL.set_precision(100) == 53 assert FPLLL.set_precision(100) == 100 fpylll-0.6.1/tests/test_pruner.py000066400000000000000000000047741455321202600171220ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import Enumeration, GSO, IntegerMatrix, LLL, Pruning from fpylll.util import gaussian_heuristic try: from time import process_time # Python 3 except ImportError: from time import clock as process_time # Python 2 dim_oh = ((40, 2**22), (41, 2**22), (50, 2**24), (51, 2**24)) def prepare(n): A = IntegerMatrix.random(n, "qary", bits=n/2, k=n/2) M = GSO.Mat(A) L = LLL.Reduction(M) L() return M def test_pruner(): # A dummyPruningParams.run to load tabulated values Pruning.run(5, 50, 10*[1.], .5) for (n, overhead) in dim_oh: print(" \n ~~~~ Dim %d \n" % n) M = prepare(n) r = [M.get_r(i, i) for i in range(n)] print(" \n GREEDY") radius = gaussian_heuristic(r) * 1.6 print("pre-greedy radius %.4e" % radius) tt = process_time() pruning =Pruning.run(radius, overhead, r, 200, flags=Pruning.ZEALOUS, metric="solutions") print("Time %.4e"%(process_time() - tt)) print("post-greedy radius %.4e" % radius) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2 print(" \n GRADIENT \n") print("radius %.4e" % radius) tt = process_time() pruning = Pruning.run(radius, overhead, r, 200, flags=Pruning.GRADIENT, metric="solutions") print("Time %.4e"%(process_time() - tt)) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2 print(" \n HYBRID \n") print("radius %.4e" % radius) tt = process_time() pruning = Pruning.run(radius, overhead, r, 200, flags=Pruning.ZEALOUS, metric="solutions") print("Time %.4e"%(process_time() - tt)) print(pruning) print("cost %.4e" % sum(pruning.detailed_cost)) solutions = Enumeration(M, nr_solutions=10000).enumerate(0, n, radius, 0, pruning=pruning.coefficients) print(len(solutions)) assert len(solutions)/pruning.expectation < 2 assert len(solutions)/pruning.expectation > .2 fpylll-0.6.1/tests/test_random.py000066400000000000000000000007241455321202600170560ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import IntegerMatrix, FPLLL def make_integer_matrix(m, n, int_type="mpz"): A = IntegerMatrix(m, n, int_type=int_type) A.randomize("qary", k=m//2, bits=m) return A def test_randomize(): FPLLL.set_random_seed(1337) A0 = make_integer_matrix(20, 20) FPLLL.set_random_seed(1337) A1 = make_integer_matrix(20, 20) for i in range(20): for j in range(20): assert A0[i, j] == A1[i, j] fpylll-0.6.1/tests/test_simple_bkz.py000066400000000000000000000015661455321202600177420ustar00rootroot00000000000000# -*- coding: utf-8 -*- from copy import copy from fpylll import BKZ, IntegerMatrix, LLL from fpylll.algorithms.simple_bkz import BKZReduction as SimpleBKZ from fpylll import FPLLL dimensions = (61, 67) def make_integer_matrix(n): A = IntegerMatrix.random(n, "uniform", bits=30) return A def test_simple_bkz_init(): for n in dimensions: FPLLL.set_random_seed(2**10 + n) A = make_integer_matrix(n) B = SimpleBKZ(copy(A)) del B def test_simple_bkz_reduction(block_size=10): for n in dimensions: FPLLL.set_random_seed(n) A = make_integer_matrix(n) LLL.reduction(A) B = copy(A) BKZ.reduction(B, BKZ.Param(block_size=block_size)) C = copy(A) SimpleBKZ(C)(block_size=block_size) assert abs(C[0].norm() - B[0].norm()) < 0.1 assert abs(C[0].norm() < A[0].norm()) fpylll-0.6.1/tests/test_strategies.py000066400000000000000000000007561455321202600177550ustar00rootroot00000000000000from fpylll import IntegerMatrix, BKZ, Pruning from fpylll.fplll.bkz_param import Strategy def test_linear_pruning(): A = IntegerMatrix.random(25, "qary", k=15, q=127) block_size = 10 preprocessing = 3 strategies = [Strategy(i) for i in range(5)] for b in range(5, block_size+1): strategies.append(Strategy(b, [preprocessing], [Pruning.LinearPruningParams(b, 2)])) param = BKZ.Param(block_size=block_size, strategies=strategies) BKZ.reduction(A, param) fpylll-0.6.1/tests/test_svp.py000066400000000000000000000024651455321202600164120ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import GSO, IntegerMatrix, LLL, SVP, Enumeration import pytest dimensions = ((3, 3), (10, 10), (20, 20), (30, 30), (40, 40)) def make_integer_matrix(m, n): A = IntegerMatrix(m, n) A.randomize("uniform", bits=10) return A def test_svp(): for m, n in dimensions: A = make_integer_matrix(m, n) A = LLL.reduction(A) M = GSO.Mat(A) M.update_gso() E = Enumeration(M) _, v1 = E.enumerate(0, M.d, M.get_r(0, 0), 0)[0] v1 = A.multiply_left(v1) nv1 = sum([v_**2 for v_ in v1]) v0 = SVP.shortest_vector(A) nv0 = sum([v_**2 for v_ in v0]) assert nv0 == nv1 def test_svp_params(): params = [{"pruning": False, "preprocess": 2}, {"pruning": True, "preprocess": 30}, {"method": "proved"}, {"max_aux_solutions": 20}] for kwds in params: for m, n in dimensions: A = make_integer_matrix(m, n) SVP.shortest_vector(A, **kwds) def test_svp_too_large(): from fpylll.config import max_enum_dim m = max_enum_dim + 1 n = max_enum_dim + 1 A = make_integer_matrix(m, n) A = LLL.reduction(A) M = GSO.Mat(A) M.update_gso() with pytest.raises(NotImplementedError): SVP.shortest_vector(A) fpylll-0.6.1/tests/test_util.py000066400000000000000000000017441455321202600165560ustar00rootroot00000000000000# -*- coding: utf-8 -*- from fpylll import IntegerMatrix, GSO from fpylll.util import adjust_radius_to_gh_bound, set_random_seed, gaussian_heuristic dimensions = [20, 21, 40, 41, 60, 61, 80, 81, 100, 101, 200, 201, 300, 301, 400, 401] def make_integer_matrix(n): A = IntegerMatrix.random(n, "uniform", bits=30) return A def test_gh(): try: from fpylll.numpy import dump_r except ImportError: return for n in dimensions: set_random_seed(n) A = make_integer_matrix(n) try: M = GSO.Mat(A, float_type="ld") except ValueError: M = GSO.Mat(A, float_type="d") M.discover_all_rows() M.update_gso() radius = M.get_r(0, 0) root_det = M.get_root_det(0, n) gh_radius, ge = adjust_radius_to_gh_bound(2000*radius, 0, n, root_det, 1.0) gh1 = gh_radius * 2**ge r = dump_r(M, 0, n) gh2 = gaussian_heuristic(r) assert abs(gh1/gh2 -1) < 0.01 fpylll-0.6.1/tests/tools.py000066400000000000000000000003431455321202600156740ustar00rootroot00000000000000# -*- coding: utf-8 -*- from copy import copy def compute_gram(B): """ Compute the Gram matrix of the row-lattice with basis B """ B.transpose() Bt = copy(B) B.transpose() return copy(B * Bt)